85c6fd9074e5ffd394f415d8d983190939333674
[openwrt/svn-archive/archive.git] / target / linux / cns3xxx / files / drivers / usb / dwc / otg_pcd.c
1 /* ==========================================================================
2 * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_pcd.c $
3 * $Revision: #70 $
4 * $Date: 2008/10/14 $
5 * $Change: 1115682 $
6 *
7 * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
8 * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
9 * otherwise expressly agreed to in writing between Synopsys and you.
10 *
11 * The Software IS NOT an item of Licensed Software or Licensed Product under
12 * any End User Software License Agreement or Agreement for Licensed Product
13 * with Synopsys or any supplement thereto. You are permitted to use and
14 * redistribute this Software in source and binary forms, with or without
15 * modification, provided that redistributions of source code must retain this
16 * notice. You may not view, use, disclose, copy or distribute this file or
17 * any information contained herein except pursuant to this license grant from
18 * Synopsys. If you do not agree with this notice, including the disclaimer
19 * below, then you are not authorized to use the Software.
20 *
21 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
25 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
31 * DAMAGE.
32 * ========================================================================== */
33 #ifndef DWC_HOST_ONLY
34
35 /** @file
36 * This file implements the Peripheral Controller Driver.
37 *
38 * The Peripheral Controller Driver (PCD) is responsible for
39 * translating requests from the Function Driver into the appropriate
40 * actions on the DWC_otg controller. It isolates the Function Driver
41 * from the specifics of the controller by providing an API to the
42 * Function Driver.
43 *
44 * The Peripheral Controller Driver for Linux will implement the
45 * Gadget API, so that the existing Gadget drivers can be used.
46 * (Gadget Driver is the Linux terminology for a Function Driver.)
47 *
48 * The Linux Gadget API is defined in the header file
49 * <code><linux/usb_gadget.h></code>. The USB EP operations API is
50 * defined in the structure <code>usb_ep_ops</code> and the USB
51 * Controller API is defined in the structure
52 * <code>usb_gadget_ops</code>.
53 *
54 * An important function of the PCD is managing interrupts generated
55 * by the DWC_otg controller. The implementation of the DWC_otg device
56 * mode interrupt service routines is in dwc_otg_pcd_intr.c.
57 *
58 * @todo Add Device Mode test modes (Test J mode, Test K mode, etc).
59 * @todo Does it work when the request size is greater than DEPTSIZ
60 * transfer size
61 *
62 */
63
64
65 #include <linux/kernel.h>
66 #include <linux/module.h>
67 #include <linux/moduleparam.h>
68 #include <linux/init.h>
69 #include <linux/device.h>
70 #include <linux/platform_device.h>
71 #include <linux/errno.h>
72 #include <linux/list.h>
73 #include <linux/interrupt.h>
74 #include <linux/string.h>
75 #include <linux/dma-mapping.h>
76 #include <linux/version.h>
77
78 #include <mach/irqs.h>
79 #include <linux/usb/ch9.h>
80
81 //#include <linux/usb_gadget.h>
82
83 #include "otg_driver.h"
84 #include "otg_pcd.h"
85
86
87
88 /**
89 * Static PCD pointer for use in usb_gadget_register_driver and
90 * usb_gadget_unregister_driver. Initialized in dwc_otg_pcd_init.
91 */
92 static dwc_otg_pcd_t *s_pcd = 0;
93
94
95 /* Display the contents of the buffer */
96 extern void dump_msg(const u8 *buf, unsigned int length);
97
98
99 /**
100 * This function completes a request. It call's the request call back.
101 */
102 void dwc_otg_request_done(dwc_otg_pcd_ep_t *ep, dwc_otg_pcd_request_t *req,
103 int status)
104 {
105 unsigned stopped = ep->stopped;
106
107 DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, ep);
108 list_del_init(&req->queue);
109
110 if (req->req.status == -EINPROGRESS) {
111 req->req.status = status;
112 } else {
113 status = req->req.status;
114 }
115
116 /* don't modify queue heads during completion callback */
117 ep->stopped = 1;
118 SPIN_UNLOCK(&ep->pcd->lock);
119 req->req.complete(&ep->ep, &req->req);
120 SPIN_LOCK(&ep->pcd->lock);
121
122 if (ep->pcd->request_pending > 0) {
123 --ep->pcd->request_pending;
124 }
125
126 ep->stopped = stopped;
127 }
128
129 /**
130 * This function terminates all the requsts in the EP request queue.
131 */
132 void dwc_otg_request_nuke(dwc_otg_pcd_ep_t *ep)
133 {
134 dwc_otg_pcd_request_t *req;
135
136 ep->stopped = 1;
137
138 /* called with irqs blocked?? */
139 while (!list_empty(&ep->queue)) {
140 req = list_entry(ep->queue.next, dwc_otg_pcd_request_t,
141 queue);
142 dwc_otg_request_done(ep, req, -ESHUTDOWN);
143 }
144 }
145
146 /* USB Endpoint Operations */
147 /*
148 * The following sections briefly describe the behavior of the Gadget
149 * API endpoint operations implemented in the DWC_otg driver
150 * software. Detailed descriptions of the generic behavior of each of
151 * these functions can be found in the Linux header file
152 * include/linux/usb_gadget.h.
153 *
154 * The Gadget API provides wrapper functions for each of the function
155 * pointers defined in usb_ep_ops. The Gadget Driver calls the wrapper
156 * function, which then calls the underlying PCD function. The
157 * following sections are named according to the wrapper
158 * functions. Within each section, the corresponding DWC_otg PCD
159 * function name is specified.
160 *
161 */
162
163 /**
164 * This function assigns periodic Tx FIFO to an periodic EP
165 * in shared Tx FIFO mode
166 */
167 static uint32_t assign_perio_tx_fifo(dwc_otg_core_if_t *core_if)
168 {
169 uint32_t PerTxMsk = 1;
170 int i;
171 for(i = 0; i < core_if->hwcfg4.b.num_dev_perio_in_ep; ++i)
172 {
173 if((PerTxMsk & core_if->p_tx_msk) == 0) {
174 core_if->p_tx_msk |= PerTxMsk;
175 return i + 1;
176 }
177 PerTxMsk <<= 1;
178 }
179 return 0;
180 }
181 /**
182 * This function releases periodic Tx FIFO
183 * in shared Tx FIFO mode
184 */
185 static void release_perio_tx_fifo(dwc_otg_core_if_t *core_if, uint32_t fifo_num)
186 {
187 core_if->p_tx_msk = (core_if->p_tx_msk & (1 << (fifo_num - 1))) ^ core_if->p_tx_msk;
188 }
189 /**
190 * This function assigns periodic Tx FIFO to an periodic EP
191 * in shared Tx FIFO mode
192 */
193 static uint32_t assign_tx_fifo(dwc_otg_core_if_t *core_if)
194 {
195 uint32_t TxMsk = 1;
196 int i;
197
198 for(i = 0; i < core_if->hwcfg4.b.num_in_eps; ++i)
199 {
200 if((TxMsk & core_if->tx_msk) == 0) {
201 core_if->tx_msk |= TxMsk;
202 return i + 1;
203 }
204 TxMsk <<= 1;
205 }
206 return 0;
207 }
208 /**
209 * This function releases periodic Tx FIFO
210 * in shared Tx FIFO mode
211 */
212 static void release_tx_fifo(dwc_otg_core_if_t *core_if, uint32_t fifo_num)
213 {
214 core_if->tx_msk = (core_if->tx_msk & (1 << (fifo_num - 1))) ^ core_if->tx_msk;
215 }
216
217 /**
218 * This function is called by the Gadget Driver for each EP to be
219 * configured for the current configuration (SET_CONFIGURATION).
220 *
221 * This function initializes the dwc_otg_ep_t data structure, and then
222 * calls dwc_otg_ep_activate.
223 */
224 static int dwc_otg_pcd_ep_enable(struct usb_ep *usb_ep,
225 const struct usb_endpoint_descriptor *ep_desc)
226 {
227 dwc_otg_pcd_ep_t *ep = 0;
228 dwc_otg_pcd_t *pcd = 0;
229 unsigned long flags;
230
231 DWC_DEBUGPL(DBG_PCDV,"%s(%p,%p)\n", __func__, usb_ep, ep_desc);
232
233 ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep);
234 if (!usb_ep || !ep_desc || ep->desc ||
235 ep_desc->bDescriptorType != USB_DT_ENDPOINT) {
236 DWC_WARN("%s, bad ep or descriptor\n", __func__);
237 return -EINVAL;
238 }
239 if (ep == &ep->pcd->ep0) {
240 DWC_WARN("%s, bad ep(0)\n", __func__);
241 return -EINVAL;
242 }
243
244 /* Check FIFO size? */
245 if (!ep_desc->wMaxPacketSize) {
246 DWC_WARN("%s, bad %s maxpacket\n", __func__, usb_ep->name);
247 return -ERANGE;
248 }
249
250 pcd = ep->pcd;
251 if (!pcd->driver || pcd->gadget.speed == USB_SPEED_UNKNOWN) {
252 DWC_WARN("%s, bogus device state\n", __func__);
253 return -ESHUTDOWN;
254 }
255
256 SPIN_LOCK_IRQSAVE(&pcd->lock, flags);
257
258 ep->desc = ep_desc;
259 ep->ep.maxpacket = le16_to_cpu (ep_desc->wMaxPacketSize);
260
261 /*
262 * Activate the EP
263 */
264 ep->stopped = 0;
265
266 ep->dwc_ep.is_in = (USB_DIR_IN & ep_desc->bEndpointAddress) != 0;
267 ep->dwc_ep.maxpacket = ep->ep.maxpacket;
268
269 ep->dwc_ep.type = ep_desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
270
271 if(ep->dwc_ep.is_in) {
272 if(!pcd->otg_dev->core_if->en_multiple_tx_fifo) {
273 ep->dwc_ep.tx_fifo_num = 0;
274
275 if (ep->dwc_ep.type == USB_ENDPOINT_XFER_ISOC) {
276 /*
277 * if ISOC EP then assign a Periodic Tx FIFO.
278 */
279 ep->dwc_ep.tx_fifo_num = assign_perio_tx_fifo(pcd->otg_dev->core_if);
280 }
281 } else {
282 /*
283 * if Dedicated FIFOs mode is on then assign a Tx FIFO.
284 */
285 ep->dwc_ep.tx_fifo_num = assign_tx_fifo(pcd->otg_dev->core_if);
286
287 }
288 }
289 /* Set initial data PID. */
290 if (ep->dwc_ep.type == USB_ENDPOINT_XFER_BULK) {
291 ep->dwc_ep.data_pid_start = 0;
292 }
293
294 DWC_DEBUGPL(DBG_PCD, "Activate %s-%s: type=%d, mps=%d desc=%p\n",
295 ep->ep.name, (ep->dwc_ep.is_in ?"IN":"OUT"),
296 ep->dwc_ep.type, ep->dwc_ep.maxpacket, ep->desc);
297
298 if(ep->dwc_ep.type != USB_ENDPOINT_XFER_ISOC) {
299 ep->dwc_ep.desc_addr = dwc_otg_ep_alloc_desc_chain(&ep->dwc_ep.dma_desc_addr, MAX_DMA_DESC_CNT);
300 }
301
302 dwc_otg_ep_activate(GET_CORE_IF(pcd), &ep->dwc_ep);
303 SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags);
304
305 return 0;
306 }
307
308 /**
309 * This function is called when an EP is disabled due to disconnect or
310 * change in configuration. Any pending requests will terminate with a
311 * status of -ESHUTDOWN.
312 *
313 * This function modifies the dwc_otg_ep_t data structure for this EP,
314 * and then calls dwc_otg_ep_deactivate.
315 */
316 static int dwc_otg_pcd_ep_disable(struct usb_ep *usb_ep)
317 {
318 dwc_otg_pcd_ep_t *ep;
319 dwc_otg_pcd_t *pcd = 0;
320 unsigned long flags;
321
322 DWC_DEBUGPL(DBG_PCDV,"%s(%p)\n", __func__, usb_ep);
323 ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep);
324 if (!usb_ep || !ep->desc) {
325 DWC_DEBUGPL(DBG_PCD, "%s, %s not enabled\n", __func__,
326 usb_ep ? ep->ep.name : NULL);
327 return -EINVAL;
328 }
329
330 SPIN_LOCK_IRQSAVE(&ep->pcd->lock, flags);
331
332 dwc_otg_request_nuke(ep);
333
334 dwc_otg_ep_deactivate(GET_CORE_IF(ep->pcd), &ep->dwc_ep);
335 ep->desc = 0;
336 ep->stopped = 1;
337
338 if(ep->dwc_ep.is_in) {
339 dwc_otg_flush_tx_fifo(GET_CORE_IF(ep->pcd), ep->dwc_ep.tx_fifo_num);
340 release_perio_tx_fifo(GET_CORE_IF(ep->pcd), ep->dwc_ep.tx_fifo_num);
341 release_tx_fifo(GET_CORE_IF(ep->pcd), ep->dwc_ep.tx_fifo_num);
342 }
343
344 /* Free DMA Descriptors */
345 pcd = ep->pcd;
346
347 SPIN_UNLOCK_IRQRESTORE(&ep->pcd->lock, flags);
348
349 if(ep->dwc_ep.type != USB_ENDPOINT_XFER_ISOC && ep->dwc_ep.desc_addr) {
350 dwc_otg_ep_free_desc_chain(ep->dwc_ep.desc_addr, ep->dwc_ep.dma_desc_addr, MAX_DMA_DESC_CNT);
351 }
352
353 DWC_DEBUGPL(DBG_PCD, "%s disabled\n", usb_ep->name);
354 return 0;
355 }
356
357
358 /**
359 * This function allocates a request object to use with the specified
360 * endpoint.
361 *
362 * @param ep The endpoint to be used with with the request
363 * @param gfp_flags the GFP_* flags to use.
364 */
365 static struct usb_request *dwc_otg_pcd_alloc_request(struct usb_ep *ep,
366 gfp_t gfp_flags)
367 {
368 dwc_otg_pcd_request_t *req;
369
370 DWC_DEBUGPL(DBG_PCDV,"%s(%p,%d)\n", __func__, ep, gfp_flags);
371 if (0 == ep) {
372 DWC_WARN("%s() %s\n", __func__, "Invalid EP!\n");
373 return 0;
374 }
375 req = kmalloc(sizeof(dwc_otg_pcd_request_t), gfp_flags);
376 if (0 == req) {
377 DWC_WARN("%s() %s\n", __func__,
378 "request allocation failed!\n");
379 return 0;
380 }
381 memset(req, 0, sizeof(dwc_otg_pcd_request_t));
382 req->req.dma = DMA_ADDR_INVALID;
383 INIT_LIST_HEAD(&req->queue);
384 return &req->req;
385 }
386
387 /**
388 * This function frees a request object.
389 *
390 * @param ep The endpoint associated with the request
391 * @param req The request being freed
392 */
393 static void dwc_otg_pcd_free_request(struct usb_ep *ep,
394 struct usb_request *req)
395 {
396 dwc_otg_pcd_request_t *request;
397 DWC_DEBUGPL(DBG_PCDV,"%s(%p,%p)\n", __func__, ep, req);
398
399 if (0 == ep || 0 == req) {
400 DWC_WARN("%s() %s\n", __func__,
401 "Invalid ep or req argument!\n");
402 return;
403 }
404
405 request = container_of(req, dwc_otg_pcd_request_t, req);
406 kfree(request);
407 }
408
409 #if 0
410 /**
411 * This function allocates an I/O buffer to be used for a transfer
412 * to/from the specified endpoint.
413 *
414 * @param usb_ep The endpoint to be used with with the request
415 * @param bytes The desired number of bytes for the buffer
416 * @param dma Pointer to the buffer's DMA address; must be valid
417 * @param gfp_flags the GFP_* flags to use.
418 * @return address of a new buffer or null is buffer could not be allocated.
419 */
420 static void *dwc_otg_pcd_alloc_buffer(struct usb_ep *usb_ep, unsigned bytes,
421 dma_addr_t *dma,
422 gfp_t gfp_flags)
423 {
424 void *buf;
425 dwc_otg_pcd_ep_t *ep;
426 dwc_otg_pcd_t *pcd = 0;
427
428 ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep);
429 pcd = ep->pcd;
430
431 DWC_DEBUGPL(DBG_PCDV,"%s(%p,%d,%p,%0x)\n", __func__, usb_ep, bytes,
432 dma, gfp_flags);
433
434 /* Check dword alignment */
435 if ((bytes & 0x3UL) != 0) {
436 DWC_WARN("%s() Buffer size is not a multiple of"
437 "DWORD size (%d)",__func__, bytes);
438 }
439
440 if (GET_CORE_IF(pcd)->dma_enable) {
441 buf = dma_alloc_coherent (NULL, bytes, dma, gfp_flags);
442 }
443 else {
444 buf = kmalloc(bytes, gfp_flags);
445 }
446
447 /* Check dword alignment */
448 if (((int)buf & 0x3UL) != 0) {
449 DWC_WARN("%s() Buffer is not DWORD aligned (%p)",
450 __func__, buf);
451 }
452
453 return buf;
454 }
455
456 /**
457 * This function frees an I/O buffer that was allocated by alloc_buffer.
458 *
459 * @param usb_ep the endpoint associated with the buffer
460 * @param buf address of the buffer
461 * @param dma The buffer's DMA address
462 * @param bytes The number of bytes of the buffer
463 */
464 static void dwc_otg_pcd_free_buffer(struct usb_ep *usb_ep, void *buf,
465 dma_addr_t dma, unsigned bytes)
466 {
467 dwc_otg_pcd_ep_t *ep;
468 dwc_otg_pcd_t *pcd = 0;
469
470 ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep);
471 pcd = ep->pcd;
472
473 DWC_DEBUGPL(DBG_PCDV,"%s(%p,%p,%0x,%d)\n", __func__, ep, buf, dma, bytes);
474
475 if (GET_CORE_IF(pcd)->dma_enable) {
476 dma_free_coherent (NULL, bytes, buf, dma);
477 }
478 else {
479 kfree(buf);
480 }
481 }
482 #endif
483
484 /**
485 * This function is used to submit an I/O Request to an EP.
486 *
487 * - When the request completes the request's completion callback
488 * is called to return the request to the driver.
489 * - An EP, except control EPs, may have multiple requests
490 * pending.
491 * - Once submitted the request cannot be examined or modified.
492 * - Each request is turned into one or more packets.
493 * - A BULK EP can queue any amount of data; the transfer is
494 * packetized.
495 * - Zero length Packets are specified with the request 'zero'
496 * flag.
497 */
498 static int dwc_otg_pcd_ep_queue(struct usb_ep *usb_ep,
499 struct usb_request *usb_req,
500 gfp_t gfp_flags)
501 {
502 int prevented = 0;
503 dwc_otg_pcd_request_t *req;
504 dwc_otg_pcd_ep_t *ep;
505 dwc_otg_pcd_t *pcd;
506 unsigned long flags = 0;
507
508 DWC_DEBUGPL(DBG_PCDV,"%s(%p,%p,%d)\n",
509 __func__, usb_ep, usb_req, gfp_flags);
510
511 req = container_of(usb_req, dwc_otg_pcd_request_t, req);
512 if (!usb_req || !usb_req->complete || !usb_req->buf ||
513 !list_empty(&req->queue)) {
514 DWC_WARN("%s, bad params\n", __func__);
515 return -EINVAL;
516 }
517
518 ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep);
519 if (!usb_ep || (!ep->desc && ep->dwc_ep.num != 0)/* || ep->stopped != 0*/) {
520 DWC_WARN("%s, bad ep\n", __func__);
521 return -EINVAL;
522 }
523
524 pcd = ep->pcd;
525 if (!pcd->driver || pcd->gadget.speed == USB_SPEED_UNKNOWN) {
526 DWC_DEBUGPL(DBG_PCDV, "gadget.speed=%d\n", pcd->gadget.speed);
527 DWC_WARN("%s, bogus device state\n", __func__);
528 return -ESHUTDOWN;
529 }
530
531
532 DWC_DEBUGPL(DBG_PCD, "%s queue req %p, len %d buf %p\n",
533 usb_ep->name, usb_req, usb_req->length, usb_req->buf);
534
535 if (!GET_CORE_IF(pcd)->core_params->opt) {
536 if (ep->dwc_ep.num != 0) {
537 DWC_ERROR("%s queue req %p, len %d buf %p\n",
538 usb_ep->name, usb_req, usb_req->length, usb_req->buf);
539 }
540 }
541
542 SPIN_LOCK_IRQSAVE(&ep->pcd->lock, flags);
543
544 #if defined(DEBUG) & defined(VERBOSE)
545 dump_msg(usb_req->buf, usb_req->length);
546 #endif
547
548 usb_req->status = -EINPROGRESS;
549 usb_req->actual = 0;
550
551 /*
552 * For EP0 IN without premature status, zlp is required?
553 */
554 if (ep->dwc_ep.num == 0 && ep->dwc_ep.is_in) {
555 DWC_DEBUGPL(DBG_PCDV, "%s-OUT ZLP\n", usb_ep->name);
556 //_req->zero = 1;
557 }
558
559 /* Start the transfer */
560 if (list_empty(&ep->queue) && !ep->stopped) {
561 /* EP0 Transfer? */
562 if (ep->dwc_ep.num == 0) {
563 switch (pcd->ep0state) {
564 case EP0_IN_DATA_PHASE:
565 DWC_DEBUGPL(DBG_PCD,
566 "%s ep0: EP0_IN_DATA_PHASE\n",
567 __func__);
568 break;
569
570 case EP0_OUT_DATA_PHASE:
571 DWC_DEBUGPL(DBG_PCD,
572 "%s ep0: EP0_OUT_DATA_PHASE\n",
573 __func__);
574 if (pcd->request_config) {
575 /* Complete STATUS PHASE */
576 ep->dwc_ep.is_in = 1;
577 pcd->ep0state = EP0_IN_STATUS_PHASE;
578 }
579 break;
580
581 case EP0_IN_STATUS_PHASE:
582 DWC_DEBUGPL(DBG_PCD,
583 "%s ep0: EP0_IN_STATUS_PHASE\n",
584 __func__);
585 break;
586
587 default:
588 DWC_DEBUGPL(DBG_ANY, "ep0: odd state %d\n",
589 pcd->ep0state);
590 SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags);
591 return -EL2HLT;
592 }
593 ep->dwc_ep.dma_addr = usb_req->dma;
594 ep->dwc_ep.start_xfer_buff = usb_req->buf;
595 ep->dwc_ep.xfer_buff = usb_req->buf;
596 ep->dwc_ep.xfer_len = usb_req->length;
597 ep->dwc_ep.xfer_count = 0;
598 ep->dwc_ep.sent_zlp = 0;
599 ep->dwc_ep.total_len = ep->dwc_ep.xfer_len;
600
601 if(usb_req->zero) {
602 if((ep->dwc_ep.xfer_len % ep->dwc_ep.maxpacket == 0)
603 && (ep->dwc_ep.xfer_len != 0)) {
604 ep->dwc_ep.sent_zlp = 1;
605 }
606
607 }
608
609 ep_check_and_patch_dma_addr(ep);
610 dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep->dwc_ep);
611 }
612 else {
613
614 uint32_t max_transfer = GET_CORE_IF(ep->pcd)->core_params->max_transfer_size;
615
616 /* Setup and start the Transfer */
617 ep->dwc_ep.dma_addr = usb_req->dma;
618 ep->dwc_ep.start_xfer_buff = usb_req->buf;
619 ep->dwc_ep.xfer_buff = usb_req->buf;
620 ep->dwc_ep.sent_zlp = 0;
621 ep->dwc_ep.total_len = usb_req->length;
622 ep->dwc_ep.xfer_len = 0;
623 ep->dwc_ep.xfer_count = 0;
624
625 if(max_transfer > MAX_TRANSFER_SIZE) {
626 ep->dwc_ep.maxxfer = max_transfer - (max_transfer % ep->dwc_ep.maxpacket);
627 } else {
628 ep->dwc_ep.maxxfer = max_transfer;
629 }
630
631 if(usb_req->zero) {
632 if((ep->dwc_ep.total_len % ep->dwc_ep.maxpacket == 0)
633 && (ep->dwc_ep.total_len != 0)) {
634 ep->dwc_ep.sent_zlp = 1;
635 }
636
637 }
638
639 ep_check_and_patch_dma_addr(ep);
640 dwc_otg_ep_start_transfer(GET_CORE_IF(pcd), &ep->dwc_ep);
641 }
642 }
643
644 if ((req != 0) || prevented) {
645 ++pcd->request_pending;
646 list_add_tail(&req->queue, &ep->queue);
647 if (ep->dwc_ep.is_in && ep->stopped && !(GET_CORE_IF(pcd)->dma_enable)) {
648 /** @todo NGS Create a function for this. */
649 diepmsk_data_t diepmsk = { .d32 = 0};
650 diepmsk.b.intktxfemp = 1;
651 if(&GET_CORE_IF(pcd)->multiproc_int_enable) {
652 dwc_modify_reg32(&GET_CORE_IF(pcd)->dev_if->dev_global_regs->diepeachintmsk[ep->dwc_ep.num],
653 0, diepmsk.d32);
654 } else {
655 dwc_modify_reg32(&GET_CORE_IF(pcd)->dev_if->dev_global_regs->diepmsk, 0, diepmsk.d32);
656 }
657 }
658 }
659
660 SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags);
661 return 0;
662 }
663
664 /**
665 * This function cancels an I/O request from an EP.
666 */
667 static int dwc_otg_pcd_ep_dequeue(struct usb_ep *usb_ep,
668 struct usb_request *usb_req)
669 {
670 dwc_otg_pcd_request_t *req;
671 dwc_otg_pcd_ep_t *ep;
672 dwc_otg_pcd_t *pcd;
673 unsigned long flags;
674
675 DWC_DEBUGPL(DBG_PCDV,"%s(%p,%p)\n", __func__, usb_ep, usb_req);
676
677 ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep);
678 if (!usb_ep || !usb_req || (!ep->desc && ep->dwc_ep.num != 0)) {
679 DWC_WARN("%s, bad argument\n", __func__);
680 return -EINVAL;
681 }
682 pcd = ep->pcd;
683 if (!pcd->driver || pcd->gadget.speed == USB_SPEED_UNKNOWN) {
684 DWC_WARN("%s, bogus device state\n", __func__);
685 return -ESHUTDOWN;
686 }
687
688 SPIN_LOCK_IRQSAVE(&pcd->lock, flags);
689 DWC_DEBUGPL(DBG_PCDV, "%s %s %s %p\n", __func__, usb_ep->name,
690 ep->dwc_ep.is_in ? "IN" : "OUT",
691 usb_req);
692
693 /* make sure it's actually queued on this endpoint */
694 list_for_each_entry(req, &ep->queue, queue)
695 {
696 if (&req->req == usb_req) {
697 break;
698 }
699 }
700
701 if (&req->req != usb_req) {
702 SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags);
703 return -EINVAL;
704 }
705
706 if (!list_empty(&req->queue)) {
707 dwc_otg_request_done(ep, req, -ECONNRESET);
708 }
709 else {
710 req = 0;
711 }
712
713 SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags);
714
715 return req ? 0 : -EOPNOTSUPP;
716 }
717
718 /**
719 * usb_ep_set_halt stalls an endpoint.
720 *
721 * usb_ep_clear_halt clears an endpoint halt and resets its data
722 * toggle.
723 *
724 * Both of these functions are implemented with the same underlying
725 * function. The behavior depends on the value argument.
726 *
727 * @param[in] usb_ep the Endpoint to halt or clear halt.
728 * @param[in] value
729 * - 0 means clear_halt.
730 * - 1 means set_halt,
731 * - 2 means clear stall lock flag.
732 * - 3 means set stall lock flag.
733 */
734 static int dwc_otg_pcd_ep_set_halt(struct usb_ep *usb_ep, int value)
735 {
736 int retval = 0;
737 unsigned long flags;
738 dwc_otg_pcd_ep_t *ep = 0;
739
740
741 DWC_DEBUGPL(DBG_PCD,"HALT %s %d\n", usb_ep->name, value);
742
743 ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep);
744
745 if (!usb_ep || (!ep->desc && ep != &ep->pcd->ep0) ||
746 ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
747 DWC_WARN("%s, bad ep\n", __func__);
748 return -EINVAL;
749 }
750
751 SPIN_LOCK_IRQSAVE(&ep->pcd->lock, flags);
752 if (!list_empty(&ep->queue)) {
753 DWC_WARN("%s() %s XFer In process\n", __func__, usb_ep->name);
754 retval = -EAGAIN;
755 }
756 else if (value == 0) {
757 dwc_otg_ep_clear_stall(ep->pcd->otg_dev->core_if,
758 &ep->dwc_ep);
759 }
760 else if(value == 1) {
761 if (ep->dwc_ep.is_in == 1 && ep->pcd->otg_dev->core_if->dma_desc_enable) {
762 dtxfsts_data_t txstatus;
763 fifosize_data_t txfifosize;
764
765 txfifosize.d32 = dwc_read_reg32(&ep->pcd->otg_dev->core_if->core_global_regs->dptxfsiz_dieptxf[ep->dwc_ep.tx_fifo_num]);
766 txstatus.d32 = dwc_read_reg32(&ep->pcd->otg_dev->core_if->dev_if->in_ep_regs[ep->dwc_ep.num]->dtxfsts);
767
768 if(txstatus.b.txfspcavail < txfifosize.b.depth) {
769 DWC_WARN("%s() %s Data In Tx Fifo\n", __func__, usb_ep->name);
770 retval = -EAGAIN;
771 }
772 else {
773 if (ep->dwc_ep.num == 0) {
774 ep->pcd->ep0state = EP0_STALL;
775 }
776
777 ep->stopped = 1;
778 dwc_otg_ep_set_stall(ep->pcd->otg_dev->core_if,
779 &ep->dwc_ep);
780 }
781 }
782 else {
783 if (ep->dwc_ep.num == 0) {
784 ep->pcd->ep0state = EP0_STALL;
785 }
786
787 ep->stopped = 1;
788 dwc_otg_ep_set_stall(ep->pcd->otg_dev->core_if,
789 &ep->dwc_ep);
790 }
791 }
792 else if (value == 2) {
793 ep->dwc_ep.stall_clear_flag = 0;
794 }
795 else if (value == 3) {
796 ep->dwc_ep.stall_clear_flag = 1;
797 }
798
799 SPIN_UNLOCK_IRQRESTORE(&ep->pcd->lock, flags);
800 return retval;
801 }
802
803 /**
804 * This function allocates a DMA Descriptor chain for the Endpoint
805 * buffer to be used for a transfer to/from the specified endpoint.
806 */
807 dwc_otg_dma_desc_t* dwc_otg_ep_alloc_desc_chain(uint32_t * dma_desc_addr, uint32_t count)
808 {
809
810 return dma_alloc_coherent(NULL, count * sizeof(dwc_otg_dma_desc_t), dma_desc_addr, GFP_KERNEL);
811 }
812
813 LIST_HEAD(tofree_list);
814 DEFINE_SPINLOCK(tofree_list_lock);
815
816 struct free_param {
817 struct list_head list;
818
819 void* addr;
820 dma_addr_t dma_addr;
821 uint32_t size;
822 };
823 void free_list_agent_fn(void *data){
824 struct list_head free_list;
825 struct free_param *cur,*next;
826
827 spin_lock(&tofree_list_lock);
828 list_add(&free_list,&tofree_list);
829 list_del_init(&tofree_list);
830 spin_unlock(&tofree_list_lock);
831
832 list_for_each_entry_safe(cur,next,&free_list,list){
833 if(cur==&free_list) break;
834 dma_free_coherent(NULL,cur->size,cur->addr,cur->dma_addr);
835 list_del(&cur->list);
836 kfree(cur);
837 }
838 }
839 DECLARE_WORK(free_list_agent,free_list_agent_fn);
840 /**
841 * This function frees a DMA Descriptor chain that was allocated by ep_alloc_desc.
842 */
843 void dwc_otg_ep_free_desc_chain(dwc_otg_dma_desc_t* desc_addr, uint32_t dma_desc_addr, uint32_t count)
844 {
845 if(irqs_disabled()){
846 struct free_param* fp=kmalloc(sizeof(struct free_param),GFP_KERNEL);
847 fp->addr=desc_addr;
848 fp->dma_addr=dma_desc_addr;
849 fp->size=count*sizeof(dwc_otg_dma_desc_t);
850
851 spin_lock(&tofree_list_lock);
852 list_add(&fp->list,&tofree_list);
853 spin_unlock(&tofree_list_lock);
854
855 schedule_work(&free_list_agent);
856 return ;
857 }
858 dma_free_coherent(NULL, count * sizeof(dwc_otg_dma_desc_t), desc_addr, dma_desc_addr);
859 }
860
861 #ifdef DWC_EN_ISOC
862
863 /**
864 * This function initializes a descriptor chain for Isochronous transfer
865 *
866 * @param core_if Programming view of DWC_otg controller.
867 * @param dwc_ep The EP to start the transfer on.
868 *
869 */
870 void dwc_otg_iso_ep_start_ddma_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *dwc_ep)
871 {
872
873 dsts_data_t dsts = { .d32 = 0};
874 depctl_data_t depctl = { .d32 = 0 };
875 volatile uint32_t *addr;
876 int i, j;
877
878 if(dwc_ep->is_in)
879 dwc_ep->desc_cnt = dwc_ep->buf_proc_intrvl / dwc_ep->bInterval;
880 else
881 dwc_ep->desc_cnt = dwc_ep->buf_proc_intrvl * dwc_ep->pkt_per_frm / dwc_ep->bInterval;
882
883
884 /** Allocate descriptors for double buffering */
885 dwc_ep->iso_desc_addr = dwc_otg_ep_alloc_desc_chain(&dwc_ep->iso_dma_desc_addr,dwc_ep->desc_cnt*2);
886 if(dwc_ep->desc_addr) {
887 DWC_WARN("%s, can't allocate DMA descriptor chain\n", __func__);
888 return;
889 }
890
891 dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts);
892
893 /** ISO OUT EP */
894 if(dwc_ep->is_in == 0) {
895 desc_sts_data_t sts = { .d32 =0 };
896 dwc_otg_dma_desc_t* dma_desc = dwc_ep->iso_desc_addr;
897 dma_addr_t dma_ad;
898 uint32_t data_per_desc;
899 dwc_otg_dev_out_ep_regs_t *out_regs =
900 core_if->dev_if->out_ep_regs[dwc_ep->num];
901 int offset;
902
903 addr = &core_if->dev_if->out_ep_regs[dwc_ep->num]->doepctl;
904 dma_ad = (dma_addr_t)dwc_read_reg32(&(out_regs->doepdma));
905
906 /** Buffer 0 descriptors setup */
907 dma_ad = dwc_ep->dma_addr0;
908
909 sts.b_iso_out.bs = BS_HOST_READY;
910 sts.b_iso_out.rxsts = 0;
911 sts.b_iso_out.l = 0;
912 sts.b_iso_out.sp = 0;
913 sts.b_iso_out.ioc = 0;
914 sts.b_iso_out.pid = 0;
915 sts.b_iso_out.framenum = 0;
916
917 offset = 0;
918 for(i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm; i+= dwc_ep->pkt_per_frm)
919 {
920
921 for(j = 0; j < dwc_ep->pkt_per_frm; ++j)
922 {
923 data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ?
924 dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket;
925
926 data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0;
927 sts.b_iso_out.rxbytes = data_per_desc;
928 writel((uint32_t)dma_ad, &dma_desc->buf);
929 writel(sts.d32, &dma_desc->status);
930
931 offset += data_per_desc;
932 dma_desc ++;
933 //(uint32_t)dma_ad += data_per_desc;
934 dma_ad = (uint32_t)dma_ad + data_per_desc;
935 }
936 }
937
938 for(j = 0; j < dwc_ep->pkt_per_frm - 1; ++j)
939 {
940 data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ?
941 dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket;
942 data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0;
943 sts.b_iso_out.rxbytes = data_per_desc;
944 writel((uint32_t)dma_ad, &dma_desc->buf);
945 writel(sts.d32, &dma_desc->status);
946
947 offset += data_per_desc;
948 dma_desc ++;
949 //(uint32_t)dma_ad += data_per_desc;
950 dma_ad = (uint32_t)dma_ad + data_per_desc;
951 }
952
953 sts.b_iso_out.ioc = 1;
954 data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ?
955 dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket;
956 data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0;
957 sts.b_iso_out.rxbytes = data_per_desc;
958
959 writel((uint32_t)dma_ad, &dma_desc->buf);
960 writel(sts.d32, &dma_desc->status);
961 dma_desc ++;
962
963 /** Buffer 1 descriptors setup */
964 sts.b_iso_out.ioc = 0;
965 dma_ad = dwc_ep->dma_addr1;
966
967 offset = 0;
968 for(i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm; i+= dwc_ep->pkt_per_frm)
969 {
970 for(j = 0; j < dwc_ep->pkt_per_frm; ++j)
971 {
972 data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ?
973 dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket;
974 data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0;
975 sts.b_iso_out.rxbytes = data_per_desc;
976 writel((uint32_t)dma_ad, &dma_desc->buf);
977 writel(sts.d32, &dma_desc->status);
978
979 offset += data_per_desc;
980 dma_desc ++;
981 //(uint32_t)dma_ad += data_per_desc;
982 dma_ad = (uint32_t)dma_ad + data_per_desc;
983 }
984 }
985 for(j = 0; j < dwc_ep->pkt_per_frm - 1; ++j)
986 {
987 data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ?
988 dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket;
989 data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0;
990 sts.b_iso_out.rxbytes = data_per_desc;
991 writel((uint32_t)dma_ad, &dma_desc->buf);
992 writel(sts.d32, &dma_desc->status);
993
994 offset += data_per_desc;
995 dma_desc ++;
996 //(uint32_t)dma_ad += data_per_desc;
997 dma_ad = (uint32_t)dma_ad + data_per_desc;
998 }
999
1000 sts.b_iso_out.ioc = 1;
1001 sts.b_iso_out.l = 1;
1002 data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ?
1003 dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket;
1004 data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0;
1005 sts.b_iso_out.rxbytes = data_per_desc;
1006
1007 writel((uint32_t)dma_ad, &dma_desc->buf);
1008 writel(sts.d32, &dma_desc->status);
1009
1010 dwc_ep->next_frame = 0;
1011
1012 /** Write dma_ad into DOEPDMA register */
1013 dwc_write_reg32(&(out_regs->doepdma),(uint32_t)dwc_ep->iso_dma_desc_addr);
1014
1015 }
1016 /** ISO IN EP */
1017 else {
1018 desc_sts_data_t sts = { .d32 =0 };
1019 dwc_otg_dma_desc_t* dma_desc = dwc_ep->iso_desc_addr;
1020 dma_addr_t dma_ad;
1021 dwc_otg_dev_in_ep_regs_t *in_regs =
1022 core_if->dev_if->in_ep_regs[dwc_ep->num];
1023 unsigned int frmnumber;
1024 fifosize_data_t txfifosize,rxfifosize;
1025
1026 txfifosize.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[dwc_ep->num]->dtxfsts);
1027 rxfifosize.d32 = dwc_read_reg32(&core_if->core_global_regs->grxfsiz);
1028
1029
1030 addr = &core_if->dev_if->in_ep_regs[dwc_ep->num]->diepctl;
1031
1032 dma_ad = dwc_ep->dma_addr0;
1033
1034 dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts);
1035
1036 sts.b_iso_in.bs = BS_HOST_READY;
1037 sts.b_iso_in.txsts = 0;
1038 sts.b_iso_in.sp = (dwc_ep->data_per_frame % dwc_ep->maxpacket)? 1 : 0;
1039 sts.b_iso_in.ioc = 0;
1040 sts.b_iso_in.pid = dwc_ep->pkt_per_frm;
1041
1042
1043 frmnumber = dwc_ep->next_frame;
1044
1045 sts.b_iso_in.framenum = frmnumber;
1046 sts.b_iso_in.txbytes = dwc_ep->data_per_frame;
1047 sts.b_iso_in.l = 0;
1048
1049 /** Buffer 0 descriptors setup */
1050 for(i = 0; i < dwc_ep->desc_cnt - 1; i++)
1051 {
1052 writel((uint32_t)dma_ad, &dma_desc->buf);
1053 writel(sts.d32, &dma_desc->status);
1054 dma_desc ++;
1055
1056 //(uint32_t)dma_ad += dwc_ep->data_per_frame;
1057 dma_ad = (uint32_t)dma_ad + dwc_ep->data_per_frame;
1058 sts.b_iso_in.framenum += dwc_ep->bInterval;
1059 }
1060
1061 sts.b_iso_in.ioc = 1;
1062 writel((uint32_t)dma_ad, &dma_desc->buf);
1063 writel(sts.d32, &dma_desc->status);
1064 ++dma_desc;
1065
1066 /** Buffer 1 descriptors setup */
1067 sts.b_iso_in.ioc = 0;
1068 dma_ad = dwc_ep->dma_addr1;
1069
1070 for(i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm; i+= dwc_ep->pkt_per_frm)
1071 {
1072 writel((uint32_t)dma_ad, &dma_desc->buf);
1073 writel(sts.d32, &dma_desc->status);
1074 dma_desc ++;
1075
1076 //(uint32_t)dma_ad += dwc_ep->data_per_frame;
1077 dma_ad = (uint32_t)dma_ad + dwc_ep->data_per_frame;
1078 sts.b_iso_in.framenum += dwc_ep->bInterval;
1079
1080 sts.b_iso_in.ioc = 0;
1081 }
1082 sts.b_iso_in.ioc = 1;
1083 sts.b_iso_in.l = 1;
1084
1085 writel((uint32_t)dma_ad, &dma_desc->buf);
1086 writel(sts.d32, &dma_desc->status);
1087
1088 dwc_ep->next_frame = sts.b_iso_in.framenum + dwc_ep->bInterval;
1089
1090 /** Write dma_ad into diepdma register */
1091 dwc_write_reg32(&(in_regs->diepdma),(uint32_t)dwc_ep->iso_dma_desc_addr);
1092 }
1093 /** Enable endpoint, clear nak */
1094 depctl.d32 = 0;
1095 depctl.b.epena = 1;
1096 depctl.b.usbactep = 1;
1097 depctl.b.cnak = 1;
1098
1099 dwc_modify_reg32(addr, depctl.d32,depctl.d32);
1100 depctl.d32 = dwc_read_reg32(addr);
1101 }
1102
1103 /**
1104 * This function initializes a descriptor chain for Isochronous transfer
1105 *
1106 * @param core_if Programming view of DWC_otg controller.
1107 * @param ep The EP to start the transfer on.
1108 *
1109 */
1110
1111 void dwc_otg_iso_ep_start_buf_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
1112 {
1113 depctl_data_t depctl = { .d32 = 0 };
1114 volatile uint32_t *addr;
1115
1116
1117 if(ep->is_in) {
1118 addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl;
1119 } else {
1120 addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl;
1121 }
1122
1123
1124 if(core_if->dma_enable == 0 || core_if->dma_desc_enable!= 0) {
1125 return;
1126 } else {
1127 deptsiz_data_t deptsiz = { .d32 = 0 };
1128
1129 ep->xfer_len = ep->data_per_frame * ep->buf_proc_intrvl / ep->bInterval;
1130 ep->pkt_cnt = (ep->xfer_len - 1 + ep->maxpacket) /
1131 ep->maxpacket;
1132 ep->xfer_count = 0;
1133 ep->xfer_buff = (ep->proc_buf_num) ? ep->xfer_buff1 : ep->xfer_buff0;
1134 ep->dma_addr = (ep->proc_buf_num) ? ep->dma_addr1 : ep->dma_addr0;
1135
1136 if(ep->is_in) {
1137 /* Program the transfer size and packet count
1138 * as follows: xfersize = N * maxpacket +
1139 * short_packet pktcnt = N + (short_packet
1140 * exist ? 1 : 0)
1141 */
1142 deptsiz.b.mc = ep->pkt_per_frm;
1143 deptsiz.b.xfersize = ep->xfer_len;
1144 deptsiz.b.pktcnt =
1145 (ep->xfer_len - 1 + ep->maxpacket) /
1146 ep->maxpacket;
1147 dwc_write_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dieptsiz, deptsiz.d32);
1148
1149 /* Write the DMA register */
1150 dwc_write_reg32 (&(core_if->dev_if->in_ep_regs[ep->num]->diepdma), (uint32_t)ep->dma_addr);
1151
1152 } else {
1153 deptsiz.b.pktcnt =
1154 (ep->xfer_len + (ep->maxpacket - 1)) /
1155 ep->maxpacket;
1156 deptsiz.b.xfersize = deptsiz.b.pktcnt * ep->maxpacket;
1157
1158 dwc_write_reg32(&core_if->dev_if->out_ep_regs[ep->num]->doeptsiz, deptsiz.d32);
1159
1160 /* Write the DMA register */
1161 dwc_write_reg32 (&(core_if->dev_if->out_ep_regs[ep->num]->doepdma), (uint32_t)ep->dma_addr);
1162
1163 }
1164 /** Enable endpoint, clear nak */
1165 depctl.d32 = 0;
1166 dwc_modify_reg32(addr, depctl.d32,depctl.d32);
1167
1168 depctl.b.epena = 1;
1169 depctl.b.cnak = 1;
1170
1171 dwc_modify_reg32(addr, depctl.d32,depctl.d32);
1172 }
1173 }
1174
1175
1176 /**
1177 * This function does the setup for a data transfer for an EP and
1178 * starts the transfer. For an IN transfer, the packets will be
1179 * loaded into the appropriate Tx FIFO in the ISR. For OUT transfers,
1180 * the packets are unloaded from the Rx FIFO in the ISR. the ISR.
1181 *
1182 * @param core_if Programming view of DWC_otg controller.
1183 * @param ep The EP to start the transfer on.
1184 */
1185
1186 void dwc_otg_iso_ep_start_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
1187 {
1188 if(core_if->dma_enable) {
1189 if(core_if->dma_desc_enable) {
1190 if(ep->is_in) {
1191 ep->desc_cnt = ep->pkt_cnt / ep->pkt_per_frm;
1192 } else {
1193 ep->desc_cnt = ep->pkt_cnt;
1194 }
1195 dwc_otg_iso_ep_start_ddma_transfer(core_if, ep);
1196 } else {
1197 if(core_if->pti_enh_enable) {
1198 dwc_otg_iso_ep_start_buf_transfer(core_if, ep);
1199 } else {
1200 ep->cur_pkt_addr = (ep->proc_buf_num) ? ep->xfer_buff1 : ep->xfer_buff0;
1201 ep->cur_pkt_dma_addr = (ep->proc_buf_num) ? ep->dma_addr1 : ep->dma_addr0;
1202 dwc_otg_iso_ep_start_frm_transfer(core_if, ep);
1203 }
1204 }
1205 } else {
1206 ep->cur_pkt_addr = (ep->proc_buf_num) ? ep->xfer_buff1 : ep->xfer_buff0;
1207 ep->cur_pkt_dma_addr = (ep->proc_buf_num) ? ep->dma_addr1 : ep->dma_addr0;
1208 dwc_otg_iso_ep_start_frm_transfer(core_if, ep);
1209 }
1210 }
1211
1212 /**
1213 * This function does the setup for a data transfer for an EP and
1214 * starts the transfer. For an IN transfer, the packets will be
1215 * loaded into the appropriate Tx FIFO in the ISR. For OUT transfers,
1216 * the packets are unloaded from the Rx FIFO in the ISR. the ISR.
1217 *
1218 * @param core_if Programming view of DWC_otg controller.
1219 * @param ep The EP to start the transfer on.
1220 */
1221
1222 void dwc_otg_iso_ep_stop_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
1223 {
1224 depctl_data_t depctl = { .d32 = 0 };
1225 volatile uint32_t *addr;
1226
1227 if(ep->is_in == 1) {
1228 addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl;
1229 }
1230 else {
1231 addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl;
1232 }
1233
1234 /* disable the ep */
1235 depctl.d32 = dwc_read_reg32(addr);
1236
1237 depctl.b.epdis = 1;
1238 depctl.b.snak = 1;
1239
1240 dwc_write_reg32(addr, depctl.d32);
1241
1242 if(core_if->dma_desc_enable &&
1243 ep->iso_desc_addr && ep->iso_dma_desc_addr) {
1244 dwc_otg_ep_free_desc_chain(ep->iso_desc_addr,ep->iso_dma_desc_addr,ep->desc_cnt * 2);
1245 }
1246
1247 /* reset varibales */
1248 ep->dma_addr0 = 0;
1249 ep->dma_addr1 = 0;
1250 ep->xfer_buff0 = 0;
1251 ep->xfer_buff1 = 0;
1252 ep->data_per_frame = 0;
1253 ep->data_pattern_frame = 0;
1254 ep->sync_frame = 0;
1255 ep->buf_proc_intrvl = 0;
1256 ep->bInterval = 0;
1257 ep->proc_buf_num = 0;
1258 ep->pkt_per_frm = 0;
1259 ep->pkt_per_frm = 0;
1260 ep->desc_cnt = 0;
1261 ep->iso_desc_addr = 0;
1262 ep->iso_dma_desc_addr = 0;
1263 }
1264
1265
1266 /**
1267 * This function is used to submit an ISOC Transfer Request to an EP.
1268 *
1269 * - Every time a sync period completes the request's completion callback
1270 * is called to provide data to the gadget driver.
1271 * - Once submitted the request cannot be modified.
1272 * - Each request is turned into periodic data packets untill ISO
1273 * Transfer is stopped..
1274 */
1275 static int dwc_otg_pcd_iso_ep_start(struct usb_ep *usb_ep, struct usb_iso_request *req,
1276 gfp_t gfp_flags)
1277 {
1278 dwc_otg_pcd_ep_t *ep;
1279 dwc_otg_pcd_t *pcd;
1280 dwc_ep_t *dwc_ep;
1281 unsigned long flags = 0;
1282 int32_t frm_data;
1283 dwc_otg_core_if_t *core_if;
1284 dcfg_data_t dcfg;
1285 dsts_data_t dsts;
1286
1287
1288 if (!req || !req->process_buffer || !req->buf0 || !req->buf1) {
1289 DWC_WARN("%s, bad params\n", __func__);
1290 return -EINVAL;
1291 }
1292
1293 ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep);
1294
1295 if (!usb_ep || !ep->desc || ep->dwc_ep.num == 0) {
1296 DWC_WARN("%s, bad ep\n", __func__);
1297 return -EINVAL;
1298 }
1299
1300 pcd = ep->pcd;
1301 core_if = GET_CORE_IF(pcd);
1302
1303 dcfg.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dcfg);
1304
1305 if (!pcd->driver || pcd->gadget.speed == USB_SPEED_UNKNOWN) {
1306 DWC_DEBUGPL(DBG_PCDV, "gadget.speed=%d\n", pcd->gadget.speed);
1307 DWC_WARN("%s, bogus device state\n", __func__);
1308 return -ESHUTDOWN;
1309 }
1310
1311 SPIN_LOCK_IRQSAVE(&ep->pcd->lock, flags);
1312
1313 dwc_ep = &ep->dwc_ep;
1314
1315 if(ep->iso_req) {
1316 DWC_WARN("%s, iso request in progress\n", __func__);
1317 }
1318 req->status = -EINPROGRESS;
1319
1320 dwc_ep->dma_addr0 = req->dma0;
1321 dwc_ep->dma_addr1 = req->dma1;
1322
1323 dwc_ep->xfer_buff0 = req->buf0;
1324 dwc_ep->xfer_buff1 = req->buf1;
1325
1326 ep->iso_req = req;
1327
1328 dwc_ep->data_per_frame = req->data_per_frame;
1329
1330 /** @todo - pattern data support is to be implemented in the future */
1331 dwc_ep->data_pattern_frame = req->data_pattern_frame;
1332 dwc_ep->sync_frame = req->sync_frame;
1333
1334 dwc_ep->buf_proc_intrvl = req->buf_proc_intrvl;
1335
1336 dwc_ep->bInterval = 1 << (ep->desc->bInterval - 1);
1337
1338 dwc_ep->proc_buf_num = 0;
1339
1340 dwc_ep->pkt_per_frm = 0;
1341 frm_data = ep->dwc_ep.data_per_frame;
1342 while(frm_data > 0) {
1343 dwc_ep->pkt_per_frm++;
1344 frm_data -= ep->dwc_ep.maxpacket;
1345 }
1346
1347 dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts);
1348
1349 if(req->flags & USB_REQ_ISO_ASAP) {
1350 dwc_ep->next_frame = dsts.b.soffn + 1;
1351 if(dwc_ep->bInterval != 1){
1352 dwc_ep->next_frame = dwc_ep->next_frame + (dwc_ep->bInterval - 1 - dwc_ep->next_frame % dwc_ep->bInterval);
1353 }
1354 } else {
1355 dwc_ep->next_frame = req->start_frame;
1356 }
1357
1358
1359 if(!core_if->pti_enh_enable) {
1360 dwc_ep->pkt_cnt = dwc_ep->buf_proc_intrvl * dwc_ep->pkt_per_frm / dwc_ep->bInterval;
1361 } else {
1362 dwc_ep->pkt_cnt =
1363 (dwc_ep->data_per_frame * (dwc_ep->buf_proc_intrvl / dwc_ep->bInterval)
1364 - 1 + dwc_ep->maxpacket) / dwc_ep->maxpacket;
1365 }
1366
1367 if(core_if->dma_desc_enable) {
1368 dwc_ep->desc_cnt =
1369 dwc_ep->buf_proc_intrvl * dwc_ep->pkt_per_frm / dwc_ep->bInterval;
1370 }
1371
1372 dwc_ep->pkt_info = kmalloc(sizeof(iso_pkt_info_t) * dwc_ep->pkt_cnt, GFP_KERNEL);
1373 if(!dwc_ep->pkt_info) {
1374 return -ENOMEM;
1375 }
1376 if(core_if->pti_enh_enable) {
1377 memset(dwc_ep->pkt_info, 0, sizeof(iso_pkt_info_t) * dwc_ep->pkt_cnt);
1378 }
1379
1380 dwc_ep->cur_pkt = 0;
1381
1382 SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags);
1383
1384 dwc_otg_iso_ep_start_transfer(core_if, dwc_ep);
1385
1386 return 0;
1387 }
1388
1389 /**
1390 * This function stops ISO EP Periodic Data Transfer.
1391 */
1392 static int dwc_otg_pcd_iso_ep_stop(struct usb_ep *usb_ep, struct usb_iso_request *req)
1393 {
1394 dwc_otg_pcd_ep_t *ep;
1395 dwc_otg_pcd_t *pcd;
1396 dwc_ep_t *dwc_ep;
1397 unsigned long flags;
1398
1399 ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep);
1400
1401 if (!usb_ep || !ep->desc || ep->dwc_ep.num == 0) {
1402 DWC_WARN("%s, bad ep\n", __func__);
1403 return -EINVAL;
1404 }
1405
1406 pcd = ep->pcd;
1407
1408 if (!pcd->driver || pcd->gadget.speed == USB_SPEED_UNKNOWN) {
1409 DWC_DEBUGPL(DBG_PCDV, "gadget.speed=%d\n", pcd->gadget.speed);
1410 DWC_WARN("%s, bogus device state\n", __func__);
1411 return -ESHUTDOWN;
1412 }
1413
1414 dwc_ep = &ep->dwc_ep;
1415
1416 dwc_otg_iso_ep_stop_transfer(GET_CORE_IF(pcd), dwc_ep);
1417
1418 kfree(dwc_ep->pkt_info);
1419
1420 SPIN_LOCK_IRQSAVE(&pcd->lock, flags);
1421
1422 if(ep->iso_req != req) {
1423 return -EINVAL;
1424 }
1425
1426 req->status = -ECONNRESET;
1427
1428 SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags);
1429
1430
1431 ep->iso_req = 0;
1432
1433 return 0;
1434 }
1435
1436 /**
1437 * This function is used for perodical data exchnage between PCD and gadget drivers.
1438 * for Isochronous EPs
1439 *
1440 * - Every time a sync period completes this function is called to
1441 * perform data exchange between PCD and gadget
1442 */
1443 void dwc_otg_iso_buffer_done(dwc_otg_pcd_ep_t *ep, dwc_otg_pcd_iso_request_t *req)
1444 {
1445 int i;
1446 struct usb_gadget_iso_packet_descriptor *iso_packet;
1447 dwc_ep_t *dwc_ep;
1448
1449 dwc_ep = &ep->dwc_ep;
1450
1451 if(ep->iso_req->status == -ECONNRESET) {
1452 DWC_PRINT("Device has already disconnected\n");
1453 /*Device has been disconnected*/
1454 return;
1455 }
1456
1457 if(dwc_ep->proc_buf_num != 0) {
1458 iso_packet = ep->iso_req->iso_packet_desc0;
1459 }
1460
1461 else {
1462 iso_packet = ep->iso_req->iso_packet_desc1;
1463 }
1464
1465 /* Fill in ISOC packets descriptors & pass to gadget driver*/
1466
1467 for(i = 0; i < dwc_ep->pkt_cnt; ++i) {
1468 iso_packet[i].status = dwc_ep->pkt_info[i].status;
1469 iso_packet[i].offset = dwc_ep->pkt_info[i].offset;
1470 iso_packet[i].actual_length = dwc_ep->pkt_info[i].length;
1471 dwc_ep->pkt_info[i].status = 0;
1472 dwc_ep->pkt_info[i].offset = 0;
1473 dwc_ep->pkt_info[i].length = 0;
1474 }
1475
1476 /* Call callback function to process data buffer */
1477 ep->iso_req->status = 0;/* success */
1478
1479 SPIN_UNLOCK(&ep->pcd->lock);
1480 ep->iso_req->process_buffer(&ep->ep, ep->iso_req);
1481 SPIN_LOCK(&ep->pcd->lock);
1482 }
1483
1484
1485 static struct usb_iso_request *dwc_otg_pcd_alloc_iso_request(struct usb_ep *ep,int packets,
1486 gfp_t gfp_flags)
1487 {
1488 struct usb_iso_request *pReq = NULL;
1489 uint32_t req_size;
1490
1491
1492 req_size = sizeof(struct usb_iso_request);
1493 req_size += (2 * packets * (sizeof(struct usb_gadget_iso_packet_descriptor)));
1494
1495
1496 pReq = kmalloc(req_size, gfp_flags);
1497 if (!pReq) {
1498 DWC_WARN("%s, can't allocate Iso Request\n", __func__);
1499 return 0;
1500 }
1501 pReq->iso_packet_desc0 = (void*) (pReq + 1);
1502
1503 pReq->iso_packet_desc1 = pReq->iso_packet_desc0 + packets;
1504
1505 return pReq;
1506 }
1507
1508 static void dwc_otg_pcd_free_iso_request(struct usb_ep *ep, struct usb_iso_request *req)
1509 {
1510 kfree(req);
1511 }
1512
1513 static struct usb_isoc_ep_ops dwc_otg_pcd_ep_ops =
1514 {
1515 .ep_ops =
1516 {
1517 .enable = dwc_otg_pcd_ep_enable,
1518 .disable = dwc_otg_pcd_ep_disable,
1519
1520 .alloc_request = dwc_otg_pcd_alloc_request,
1521 .free_request = dwc_otg_pcd_free_request,
1522
1523 //.alloc_buffer = dwc_otg_pcd_alloc_buffer,
1524 //.free_buffer = dwc_otg_pcd_free_buffer,
1525
1526 .queue = dwc_otg_pcd_ep_queue,
1527 .dequeue = dwc_otg_pcd_ep_dequeue,
1528
1529 .set_halt = dwc_otg_pcd_ep_set_halt,
1530 .fifo_status = 0,
1531 .fifo_flush = 0,
1532 },
1533 .iso_ep_start = dwc_otg_pcd_iso_ep_start,
1534 .iso_ep_stop = dwc_otg_pcd_iso_ep_stop,
1535 .alloc_iso_request = dwc_otg_pcd_alloc_iso_request,
1536 .free_iso_request = dwc_otg_pcd_free_iso_request,
1537 };
1538
1539 #else
1540
1541
1542 static struct usb_ep_ops dwc_otg_pcd_ep_ops =
1543 {
1544 .enable = dwc_otg_pcd_ep_enable,
1545 .disable = dwc_otg_pcd_ep_disable,
1546
1547 .alloc_request = dwc_otg_pcd_alloc_request,
1548 .free_request = dwc_otg_pcd_free_request,
1549
1550 // .alloc_buffer = dwc_otg_pcd_alloc_buffer,
1551 // .free_buffer = dwc_otg_pcd_free_buffer,
1552
1553 .queue = dwc_otg_pcd_ep_queue,
1554 .dequeue = dwc_otg_pcd_ep_dequeue,
1555
1556 .set_halt = dwc_otg_pcd_ep_set_halt,
1557 .fifo_status = 0,
1558 .fifo_flush = 0,
1559
1560
1561 };
1562
1563 #endif /* DWC_EN_ISOC */
1564 /* Gadget Operations */
1565 /**
1566 * The following gadget operations will be implemented in the DWC_otg
1567 * PCD. Functions in the API that are not described below are not
1568 * implemented.
1569 *
1570 * The Gadget API provides wrapper functions for each of the function
1571 * pointers defined in usb_gadget_ops. The Gadget Driver calls the
1572 * wrapper function, which then calls the underlying PCD function. The
1573 * following sections are named according to the wrapper functions
1574 * (except for ioctl, which doesn't have a wrapper function). Within
1575 * each section, the corresponding DWC_otg PCD function name is
1576 * specified.
1577 *
1578 */
1579
1580 /**
1581 *Gets the USB Frame number of the last SOF.
1582 */
1583 static int dwc_otg_pcd_get_frame(struct usb_gadget *gadget)
1584 {
1585 dwc_otg_pcd_t *pcd;
1586
1587 DWC_DEBUGPL(DBG_PCDV,"%s(%p)\n", __func__, gadget);
1588
1589 if (gadget == 0) {
1590 return -ENODEV;
1591 }
1592 else {
1593 pcd = container_of(gadget, dwc_otg_pcd_t, gadget);
1594 dwc_otg_get_frame_number(GET_CORE_IF(pcd));
1595 }
1596
1597 return 0;
1598 }
1599
1600 void dwc_otg_pcd_initiate_srp(dwc_otg_pcd_t *pcd)
1601 {
1602 uint32_t *addr = (uint32_t *)&(GET_CORE_IF(pcd)->core_global_regs->gotgctl);
1603 gotgctl_data_t mem;
1604 gotgctl_data_t val;
1605
1606 val.d32 = dwc_read_reg32(addr);
1607 if (val.b.sesreq) {
1608 DWC_ERROR("Session Request Already active!\n");
1609 return;
1610 }
1611
1612 DWC_NOTICE("Session Request Initated\n");
1613 mem.d32 = dwc_read_reg32(addr);
1614 mem.b.sesreq = 1;
1615 dwc_write_reg32(addr, mem.d32);
1616
1617 /* Start the SRP timer */
1618 dwc_otg_pcd_start_srp_timer(pcd);
1619 return;
1620 }
1621
1622 void dwc_otg_pcd_remote_wakeup(dwc_otg_pcd_t *pcd, int set)
1623 {
1624 dctl_data_t dctl = {.d32=0};
1625 volatile uint32_t *addr = &(GET_CORE_IF(pcd)->dev_if->dev_global_regs->dctl);
1626
1627 if (dwc_otg_is_device_mode(GET_CORE_IF(pcd))) {
1628 if (pcd->remote_wakeup_enable) {
1629 if (set) {
1630 dctl.b.rmtwkupsig = 1;
1631 dwc_modify_reg32(addr, 0, dctl.d32);
1632 DWC_DEBUGPL(DBG_PCD, "Set Remote Wakeup\n");
1633 mdelay(1);
1634 dwc_modify_reg32(addr, dctl.d32, 0);
1635 DWC_DEBUGPL(DBG_PCD, "Clear Remote Wakeup\n");
1636 }
1637 else {
1638 }
1639 }
1640 else {
1641 DWC_DEBUGPL(DBG_PCD, "Remote Wakeup is disabled\n");
1642 }
1643 }
1644 return;
1645 }
1646
1647 /**
1648 * Initiates Session Request Protocol (SRP) to wakeup the host if no
1649 * session is in progress. If a session is already in progress, but
1650 * the device is suspended, remote wakeup signaling is started.
1651 *
1652 */
1653 static int dwc_otg_pcd_wakeup(struct usb_gadget *gadget)
1654 {
1655 unsigned long flags;
1656 dwc_otg_pcd_t *pcd;
1657 dsts_data_t dsts;
1658 gotgctl_data_t gotgctl;
1659
1660 DWC_DEBUGPL(DBG_PCDV,"%s(%p)\n", __func__, gadget);
1661
1662 if (gadget == 0) {
1663 return -ENODEV;
1664 }
1665 else {
1666 pcd = container_of(gadget, dwc_otg_pcd_t, gadget);
1667 }
1668 SPIN_LOCK_IRQSAVE(&pcd->lock, flags);
1669
1670 /*
1671 * This function starts the Protocol if no session is in progress. If
1672 * a session is already in progress, but the device is suspended,
1673 * remote wakeup signaling is started.
1674 */
1675
1676 /* Check if valid session */
1677 gotgctl.d32 = dwc_read_reg32(&(GET_CORE_IF(pcd)->core_global_regs->gotgctl));
1678 if (gotgctl.b.bsesvld) {
1679 /* Check if suspend state */
1680 dsts.d32 = dwc_read_reg32(&(GET_CORE_IF(pcd)->dev_if->dev_global_regs->dsts));
1681 if (dsts.b.suspsts) {
1682 dwc_otg_pcd_remote_wakeup(pcd, 1);
1683 }
1684 }
1685 else {
1686 dwc_otg_pcd_initiate_srp(pcd);
1687 }
1688
1689 SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags);
1690 return 0;
1691 }
1692
1693 static const struct usb_gadget_ops dwc_otg_pcd_ops =
1694 {
1695 .get_frame = dwc_otg_pcd_get_frame,
1696 .wakeup = dwc_otg_pcd_wakeup,
1697 // current versions must always be self-powered
1698 };
1699
1700 /**
1701 * This function updates the otg values in the gadget structure.
1702 */
1703 void dwc_otg_pcd_update_otg(dwc_otg_pcd_t *pcd, const unsigned reset)
1704 {
1705
1706 if (!pcd->gadget.is_otg)
1707 return;
1708
1709 if (reset) {
1710 pcd->b_hnp_enable = 0;
1711 pcd->a_hnp_support = 0;
1712 pcd->a_alt_hnp_support = 0;
1713 }
1714
1715 pcd->gadget.b_hnp_enable = pcd->b_hnp_enable;
1716 pcd->gadget.a_hnp_support = pcd->a_hnp_support;
1717 pcd->gadget.a_alt_hnp_support = pcd->a_alt_hnp_support;
1718 }
1719
1720 /**
1721 * This function is the top level PCD interrupt handler.
1722 */
1723 static irqreturn_t dwc_otg_pcd_irq(int irq, void *dev)
1724 {
1725 dwc_otg_pcd_t *pcd = dev;
1726 int32_t retval = IRQ_NONE;
1727
1728 retval = dwc_otg_pcd_handle_intr(pcd);
1729 return IRQ_RETVAL(retval);
1730 }
1731
1732 /**
1733 * PCD Callback function for initializing the PCD when switching to
1734 * device mode.
1735 *
1736 * @param p void pointer to the <code>dwc_otg_pcd_t</code>
1737 */
1738 static int32_t dwc_otg_pcd_start_cb(void *p)
1739 {
1740 dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *)p;
1741
1742 /*
1743 * Initialized the Core for Device mode.
1744 */
1745 if (dwc_otg_is_device_mode(GET_CORE_IF(pcd))) {
1746 dwc_otg_core_dev_init(GET_CORE_IF(pcd));
1747 }
1748 return 1;
1749 }
1750
1751 /**
1752 * PCD Callback function for stopping the PCD when switching to Host
1753 * mode.
1754 *
1755 * @param p void pointer to the <code>dwc_otg_pcd_t</code>
1756 */
1757 static int32_t dwc_otg_pcd_stop_cb(void *p)
1758 {
1759 dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *)p;
1760 extern void dwc_otg_pcd_stop(dwc_otg_pcd_t *_pcd);
1761
1762 dwc_otg_pcd_stop(pcd);
1763 return 1;
1764 }
1765
1766
1767 /**
1768 * PCD Callback function for notifying the PCD when resuming from
1769 * suspend.
1770 *
1771 * @param p void pointer to the <code>dwc_otg_pcd_t</code>
1772 */
1773 static int32_t dwc_otg_pcd_suspend_cb(void *p)
1774 {
1775 dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *)p;
1776
1777 if (pcd->driver && pcd->driver->resume) {
1778 SPIN_UNLOCK(&pcd->lock);
1779 pcd->driver->suspend(&pcd->gadget);
1780 SPIN_LOCK(&pcd->lock);
1781 }
1782
1783 return 1;
1784 }
1785
1786
1787 /**
1788 * PCD Callback function for notifying the PCD when resuming from
1789 * suspend.
1790 *
1791 * @param p void pointer to the <code>dwc_otg_pcd_t</code>
1792 */
1793 static int32_t dwc_otg_pcd_resume_cb(void *p)
1794 {
1795 dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *)p;
1796
1797 if (pcd->driver && pcd->driver->resume) {
1798 SPIN_UNLOCK(&pcd->lock);
1799 pcd->driver->resume(&pcd->gadget);
1800 SPIN_LOCK(&pcd->lock);
1801 }
1802
1803 /* Stop the SRP timeout timer. */
1804 if ((GET_CORE_IF(pcd)->core_params->phy_type != DWC_PHY_TYPE_PARAM_FS) ||
1805 (!GET_CORE_IF(pcd)->core_params->i2c_enable)) {
1806 if (GET_CORE_IF(pcd)->srp_timer_started) {
1807 GET_CORE_IF(pcd)->srp_timer_started = 0;
1808 del_timer(&pcd->srp_timer);
1809 }
1810 }
1811 return 1;
1812 }
1813
1814
1815 /**
1816 * PCD Callback structure for handling mode switching.
1817 */
1818 static dwc_otg_cil_callbacks_t pcd_callbacks =
1819 {
1820 .start = dwc_otg_pcd_start_cb,
1821 .stop = dwc_otg_pcd_stop_cb,
1822 .suspend = dwc_otg_pcd_suspend_cb,
1823 .resume_wakeup = dwc_otg_pcd_resume_cb,
1824 .p = 0, /* Set at registration */
1825 };
1826
1827 /**
1828 * This function is called when the SRP timer expires. The SRP should
1829 * complete within 6 seconds.
1830 */
1831 static void srp_timeout(unsigned long ptr)
1832 {
1833 gotgctl_data_t gotgctl;
1834 dwc_otg_core_if_t *core_if = (dwc_otg_core_if_t *)ptr;
1835 volatile uint32_t *addr = &core_if->core_global_regs->gotgctl;
1836
1837 gotgctl.d32 = dwc_read_reg32(addr);
1838
1839 core_if->srp_timer_started = 0;
1840
1841 if ((core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS) &&
1842 (core_if->core_params->i2c_enable)) {
1843 DWC_PRINT("SRP Timeout\n");
1844
1845 if ((core_if->srp_success) &&
1846 (gotgctl.b.bsesvld)) {
1847 if (core_if->pcd_cb && core_if->pcd_cb->resume_wakeup) {
1848 core_if->pcd_cb->resume_wakeup(core_if->pcd_cb->p);
1849 }
1850
1851 /* Clear Session Request */
1852 gotgctl.d32 = 0;
1853 gotgctl.b.sesreq = 1;
1854 dwc_modify_reg32(&core_if->core_global_regs->gotgctl,
1855 gotgctl.d32, 0);
1856
1857 core_if->srp_success = 0;
1858 }
1859 else {
1860 DWC_ERROR("Device not connected/responding\n");
1861 gotgctl.b.sesreq = 0;
1862 dwc_write_reg32(addr, gotgctl.d32);
1863 }
1864 }
1865 else if (gotgctl.b.sesreq) {
1866 DWC_PRINT("SRP Timeout\n");
1867
1868 DWC_ERROR("Device not connected/responding\n");
1869 gotgctl.b.sesreq = 0;
1870 dwc_write_reg32(addr, gotgctl.d32);
1871 }
1872 else {
1873 DWC_PRINT(" SRP GOTGCTL=%0x\n", gotgctl.d32);
1874 }
1875 }
1876
1877 /**
1878 * Start the SRP timer to detect when the SRP does not complete within
1879 * 6 seconds.
1880 *
1881 * @param pcd the pcd structure.
1882 */
1883 void dwc_otg_pcd_start_srp_timer(dwc_otg_pcd_t *pcd)
1884 {
1885 struct timer_list *srp_timer = &pcd->srp_timer;
1886 GET_CORE_IF(pcd)->srp_timer_started = 1;
1887 init_timer(srp_timer);
1888 srp_timer->function = srp_timeout;
1889 srp_timer->data = (unsigned long)GET_CORE_IF(pcd);
1890 srp_timer->expires = jiffies + (HZ*6);
1891 add_timer(srp_timer);
1892 }
1893
1894 /**
1895 * Tasklet
1896 *
1897 */
1898 extern void start_next_request(dwc_otg_pcd_ep_t *ep);
1899
1900 static void start_xfer_tasklet_func (unsigned long data)
1901 {
1902 dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t*)data;
1903 dwc_otg_core_if_t *core_if = pcd->otg_dev->core_if;
1904
1905 int i;
1906 depctl_data_t diepctl;
1907
1908 DWC_DEBUGPL(DBG_PCDV, "Start xfer tasklet\n");
1909
1910 diepctl.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[0]->diepctl);
1911
1912 if (pcd->ep0.queue_sof) {
1913 pcd->ep0.queue_sof = 0;
1914 start_next_request (&pcd->ep0);
1915 // break;
1916 }
1917
1918 for (i=0; i<core_if->dev_if->num_in_eps; i++)
1919 {
1920 depctl_data_t diepctl;
1921 diepctl.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[i]->diepctl);
1922
1923 if (pcd->in_ep[i].queue_sof) {
1924 pcd->in_ep[i].queue_sof = 0;
1925 start_next_request (&pcd->in_ep[i]);
1926 // break;
1927 }
1928 }
1929
1930 return;
1931 }
1932
1933
1934
1935
1936
1937
1938
1939 static struct tasklet_struct start_xfer_tasklet = {
1940 .next = NULL,
1941 .state = 0,
1942 .count = ATOMIC_INIT(0),
1943 .func = start_xfer_tasklet_func,
1944 .data = 0,
1945 };
1946 /**
1947 * This function initialized the pcd Dp structures to there default
1948 * state.
1949 *
1950 * @param pcd the pcd structure.
1951 */
1952 void dwc_otg_pcd_reinit(dwc_otg_pcd_t *pcd)
1953 {
1954 static const char * names[] =
1955 {
1956
1957 "ep0",
1958 "ep1in",
1959 "ep2in",
1960 "ep3in",
1961 "ep4in",
1962 "ep5in",
1963 "ep6in",
1964 "ep7in",
1965 "ep8in",
1966 "ep9in",
1967 "ep10in",
1968 "ep11in",
1969 "ep12in",
1970 "ep13in",
1971 "ep14in",
1972 "ep15in",
1973 "ep1out",
1974 "ep2out",
1975 "ep3out",
1976 "ep4out",
1977 "ep5out",
1978 "ep6out",
1979 "ep7out",
1980 "ep8out",
1981 "ep9out",
1982 "ep10out",
1983 "ep11out",
1984 "ep12out",
1985 "ep13out",
1986 "ep14out",
1987 "ep15out"
1988
1989 };
1990
1991 int i;
1992 int in_ep_cntr, out_ep_cntr;
1993 uint32_t hwcfg1;
1994 uint32_t num_in_eps = (GET_CORE_IF(pcd))->dev_if->num_in_eps;
1995 uint32_t num_out_eps = (GET_CORE_IF(pcd))->dev_if->num_out_eps;
1996 dwc_otg_pcd_ep_t *ep;
1997
1998 DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, pcd);
1999
2000 INIT_LIST_HEAD (&pcd->gadget.ep_list);
2001 pcd->gadget.ep0 = &pcd->ep0.ep;
2002 pcd->gadget.speed = USB_SPEED_UNKNOWN;
2003
2004 INIT_LIST_HEAD (&pcd->gadget.ep0->ep_list);
2005
2006 /**
2007 * Initialize the EP0 structure.
2008 */
2009 ep = &pcd->ep0;
2010
2011 /* Init EP structure */
2012 ep->desc = 0;
2013 ep->pcd = pcd;
2014 ep->stopped = 1;
2015
2016 /* Init DWC ep structure */
2017 ep->dwc_ep.num = 0;
2018 ep->dwc_ep.active = 0;
2019 ep->dwc_ep.tx_fifo_num = 0;
2020 /* Control until ep is actvated */
2021 ep->dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL;
2022 ep->dwc_ep.maxpacket = MAX_PACKET_SIZE;
2023 ep->dwc_ep.dma_addr = 0;
2024 ep->dwc_ep.start_xfer_buff = 0;
2025 ep->dwc_ep.xfer_buff = 0;
2026 ep->dwc_ep.xfer_len = 0;
2027 ep->dwc_ep.xfer_count = 0;
2028 ep->dwc_ep.sent_zlp = 0;
2029 ep->dwc_ep.total_len = 0;
2030 ep->queue_sof = 0;
2031 ep->dwc_ep.desc_addr = 0;
2032 ep->dwc_ep.dma_desc_addr = 0;
2033
2034 ep->dwc_ep.aligned_buf=NULL;
2035 ep->dwc_ep.aligned_buf_size=0;
2036 ep->dwc_ep.aligned_dma_addr=0;
2037
2038
2039 /* Init the usb_ep structure. */
2040 ep->ep.name = names[0];
2041 ep->ep.ops = (struct usb_ep_ops*)&dwc_otg_pcd_ep_ops;
2042
2043 /**
2044 * @todo NGS: What should the max packet size be set to
2045 * here? Before EP type is set?
2046 */
2047 ep->ep.maxpacket = MAX_PACKET_SIZE;
2048
2049 list_add_tail (&ep->ep.ep_list, &pcd->gadget.ep_list);
2050
2051 INIT_LIST_HEAD (&ep->queue);
2052 /**
2053 * Initialize the EP structures.
2054 */
2055 in_ep_cntr = 0;
2056 hwcfg1 = (GET_CORE_IF(pcd))->hwcfg1.d32 >> 3;
2057
2058 for (i = 1; in_ep_cntr < num_in_eps; i++)
2059 {
2060 if((hwcfg1 & 0x1) == 0) {
2061 dwc_otg_pcd_ep_t *ep = &pcd->in_ep[in_ep_cntr];
2062 in_ep_cntr ++;
2063
2064 /* Init EP structure */
2065 ep->desc = 0;
2066 ep->pcd = pcd;
2067 ep->stopped = 1;
2068
2069 /* Init DWC ep structure */
2070 ep->dwc_ep.is_in = 1;
2071 ep->dwc_ep.num = i;
2072 ep->dwc_ep.active = 0;
2073 ep->dwc_ep.tx_fifo_num = 0;
2074
2075 /* Control until ep is actvated */
2076 ep->dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL;
2077 ep->dwc_ep.maxpacket = MAX_PACKET_SIZE;
2078 ep->dwc_ep.dma_addr = 0;
2079 ep->dwc_ep.start_xfer_buff = 0;
2080 ep->dwc_ep.xfer_buff = 0;
2081 ep->dwc_ep.xfer_len = 0;
2082 ep->dwc_ep.xfer_count = 0;
2083 ep->dwc_ep.sent_zlp = 0;
2084 ep->dwc_ep.total_len = 0;
2085 ep->queue_sof = 0;
2086 ep->dwc_ep.desc_addr = 0;
2087 ep->dwc_ep.dma_desc_addr = 0;
2088
2089 /* Init the usb_ep structure. */
2090 ep->ep.name = names[i];
2091 ep->ep.ops = (struct usb_ep_ops*)&dwc_otg_pcd_ep_ops;
2092
2093 /**
2094 * @todo NGS: What should the max packet size be set to
2095 * here? Before EP type is set?
2096 */
2097 ep->ep.maxpacket = MAX_PACKET_SIZE;
2098
2099 //add only even number ep as in
2100 if((i%2)==1)
2101 list_add_tail (&ep->ep.ep_list, &pcd->gadget.ep_list);
2102
2103 INIT_LIST_HEAD (&ep->queue);
2104 }
2105 hwcfg1 >>= 2;
2106 }
2107
2108 out_ep_cntr = 0;
2109 hwcfg1 = (GET_CORE_IF(pcd))->hwcfg1.d32 >> 2;
2110
2111 for (i = 1; out_ep_cntr < num_out_eps; i++)
2112 {
2113 if((hwcfg1 & 0x1) == 0) {
2114 dwc_otg_pcd_ep_t *ep = &pcd->out_ep[out_ep_cntr];
2115 out_ep_cntr++;
2116
2117 /* Init EP structure */
2118 ep->desc = 0;
2119 ep->pcd = pcd;
2120 ep->stopped = 1;
2121
2122 /* Init DWC ep structure */
2123 ep->dwc_ep.is_in = 0;
2124 ep->dwc_ep.num = i;
2125 ep->dwc_ep.active = 0;
2126 ep->dwc_ep.tx_fifo_num = 0;
2127 /* Control until ep is actvated */
2128 ep->dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL;
2129 ep->dwc_ep.maxpacket = MAX_PACKET_SIZE;
2130 ep->dwc_ep.dma_addr = 0;
2131 ep->dwc_ep.start_xfer_buff = 0;
2132 ep->dwc_ep.xfer_buff = 0;
2133 ep->dwc_ep.xfer_len = 0;
2134 ep->dwc_ep.xfer_count = 0;
2135 ep->dwc_ep.sent_zlp = 0;
2136 ep->dwc_ep.total_len = 0;
2137 ep->queue_sof = 0;
2138
2139 /* Init the usb_ep structure. */
2140 ep->ep.name = names[15 + i];
2141 ep->ep.ops = (struct usb_ep_ops*)&dwc_otg_pcd_ep_ops;
2142 /**
2143 * @todo NGS: What should the max packet size be set to
2144 * here? Before EP type is set?
2145 */
2146 ep->ep.maxpacket = MAX_PACKET_SIZE;
2147
2148 //add only odd number ep as out
2149 if((i%2)==0)
2150 list_add_tail (&ep->ep.ep_list, &pcd->gadget.ep_list);
2151
2152 INIT_LIST_HEAD (&ep->queue);
2153 }
2154 hwcfg1 >>= 2;
2155 }
2156
2157 /* remove ep0 from the list. There is a ep0 pointer.*/
2158 list_del_init (&pcd->ep0.ep.ep_list);
2159
2160 pcd->ep0state = EP0_DISCONNECT;
2161 pcd->ep0.ep.maxpacket = MAX_EP0_SIZE;
2162 pcd->ep0.dwc_ep.maxpacket = MAX_EP0_SIZE;
2163 pcd->ep0.dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL;
2164 }
2165
2166 /**
2167 * This function releases the Gadget device.
2168 * required by device_unregister().
2169 *
2170 * @todo Should this do something? Should it free the PCD?
2171 */
2172 static void dwc_otg_pcd_gadget_release(struct device *dev)
2173 {
2174 DWC_DEBUGPL(DBG_PCDV,"%s(%p)\n", __func__, dev);
2175 }
2176
2177
2178
2179 /**
2180 * This function initialized the PCD portion of the driver.
2181 *
2182 */
2183 u8 dev_id[]="gadget";
2184 int dwc_otg_pcd_init(struct platform_device *pdev)
2185 {
2186 static char pcd_name[] = "dwc_otg_pcd";
2187 dwc_otg_pcd_t *pcd;
2188 dwc_otg_core_if_t* core_if;
2189 dwc_otg_dev_if_t* dev_if;
2190 dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev);
2191 int retval = 0;
2192
2193
2194 DWC_DEBUGPL(DBG_PCDV,"%s(%p)\n",__func__, pdev);
2195 /*
2196 * Allocate PCD structure
2197 */
2198 pcd = kmalloc(sizeof(dwc_otg_pcd_t), GFP_KERNEL);
2199
2200 if (pcd == 0) {
2201 return -ENOMEM;
2202 }
2203
2204 memset(pcd, 0, sizeof(dwc_otg_pcd_t));
2205 spin_lock_init(&pcd->lock);
2206
2207 otg_dev->pcd = pcd;
2208 s_pcd = pcd;
2209 pcd->gadget.name = pcd_name;
2210
2211 pcd->gadget.dev.init_name = dev_id;
2212 pcd->otg_dev = platform_get_drvdata(pdev);
2213
2214 pcd->gadget.dev.parent = &pdev->dev;
2215 pcd->gadget.dev.release = dwc_otg_pcd_gadget_release;
2216 pcd->gadget.ops = &dwc_otg_pcd_ops;
2217
2218 core_if = GET_CORE_IF(pcd);
2219 dev_if = core_if->dev_if;
2220
2221 if(core_if->hwcfg4.b.ded_fifo_en) {
2222 DWC_PRINT("Dedicated Tx FIFOs mode\n");
2223 }
2224 else {
2225 DWC_PRINT("Shared Tx FIFO mode\n");
2226 }
2227
2228 /* If the module is set to FS or if the PHY_TYPE is FS then the gadget
2229 * should not report as dual-speed capable. replace the following line
2230 * with the block of code below it once the software is debugged for
2231 * this. If is_dualspeed = 0 then the gadget driver should not report
2232 * a device qualifier descriptor when queried. */
2233 if ((GET_CORE_IF(pcd)->core_params->speed == DWC_SPEED_PARAM_FULL) ||
2234 ((GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type == 2) &&
2235 (GET_CORE_IF(pcd)->hwcfg2.b.fs_phy_type == 1) &&
2236 (GET_CORE_IF(pcd)->core_params->ulpi_fs_ls))) {
2237 pcd->gadget.max_speed = USB_SPEED_FULL;
2238 }
2239 else {
2240 pcd->gadget.max_speed = USB_SPEED_HIGH;
2241 }
2242
2243 if ((otg_dev->core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE) ||
2244 (otg_dev->core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST) ||
2245 (otg_dev->core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_SRP_CAPABLE_DEVICE) ||
2246 (otg_dev->core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_SRP_CAPABLE_HOST)) {
2247 pcd->gadget.is_otg = 0;
2248 }
2249 else {
2250 pcd->gadget.is_otg = 1;
2251 }
2252
2253
2254 pcd->driver = 0;
2255 /* Register the gadget device */
2256 printk("%s: 1\n",__func__);
2257 retval = device_register(&pcd->gadget.dev);
2258 if (retval != 0) {
2259 kfree (pcd);
2260 printk("%s: 2\n",__func__);
2261 return retval;
2262 }
2263
2264
2265 /*
2266 * Initialized the Core for Device mode.
2267 */
2268 if (dwc_otg_is_device_mode(core_if)) {
2269 dwc_otg_core_dev_init(core_if);
2270 }
2271
2272 /*
2273 * Initialize EP structures
2274 */
2275 dwc_otg_pcd_reinit(pcd);
2276
2277 /*
2278 * Register the PCD Callbacks.
2279 */
2280 dwc_otg_cil_register_pcd_callbacks(otg_dev->core_if, &pcd_callbacks,
2281 pcd);
2282 /*
2283 * Setup interupt handler
2284 */
2285 DWC_DEBUGPL(DBG_ANY, "registering handler for irq%d\n", otg_dev->irq);
2286 retval = request_irq(otg_dev->irq, dwc_otg_pcd_irq,
2287 IRQF_SHARED, pcd->gadget.name, pcd);
2288 if (retval != 0) {
2289 DWC_ERROR("request of irq%d failed\n", otg_dev->irq);
2290 device_unregister(&pcd->gadget.dev);
2291 kfree (pcd);
2292 return -EBUSY;
2293 }
2294
2295 /*
2296 * Initialize the DMA buffer for SETUP packets
2297 */
2298 if (GET_CORE_IF(pcd)->dma_enable) {
2299 pcd->setup_pkt = dma_alloc_coherent (NULL, sizeof (*pcd->setup_pkt) * 5, &pcd->setup_pkt_dma_handle, 0);
2300 if (pcd->setup_pkt == 0) {
2301 free_irq(otg_dev->irq, pcd);
2302 device_unregister(&pcd->gadget.dev);
2303 kfree (pcd);
2304 return -ENOMEM;
2305 }
2306
2307 pcd->status_buf = dma_alloc_coherent (NULL, sizeof (uint16_t), &pcd->status_buf_dma_handle, 0);
2308 if (pcd->status_buf == 0) {
2309 dma_free_coherent(NULL, sizeof(*pcd->setup_pkt), pcd->setup_pkt, pcd->setup_pkt_dma_handle);
2310 free_irq(otg_dev->irq, pcd);
2311 device_unregister(&pcd->gadget.dev);
2312 kfree (pcd);
2313 return -ENOMEM;
2314 }
2315
2316 if (GET_CORE_IF(pcd)->dma_desc_enable) {
2317 dev_if->setup_desc_addr[0] = dwc_otg_ep_alloc_desc_chain(&dev_if->dma_setup_desc_addr[0], 1);
2318 dev_if->setup_desc_addr[1] = dwc_otg_ep_alloc_desc_chain(&dev_if->dma_setup_desc_addr[1], 1);
2319 dev_if->in_desc_addr = dwc_otg_ep_alloc_desc_chain(&dev_if->dma_in_desc_addr, 1);
2320 dev_if->out_desc_addr = dwc_otg_ep_alloc_desc_chain(&dev_if->dma_out_desc_addr, 1);
2321
2322 if(dev_if->setup_desc_addr[0] == 0
2323 || dev_if->setup_desc_addr[1] == 0
2324 || dev_if->in_desc_addr == 0
2325 || dev_if->out_desc_addr == 0 ) {
2326
2327 if(dev_if->out_desc_addr)
2328 dwc_otg_ep_free_desc_chain(dev_if->out_desc_addr, dev_if->dma_out_desc_addr, 1);
2329 if(dev_if->in_desc_addr)
2330 dwc_otg_ep_free_desc_chain(dev_if->in_desc_addr, dev_if->dma_in_desc_addr, 1);
2331 if(dev_if->setup_desc_addr[1])
2332 dwc_otg_ep_free_desc_chain(dev_if->setup_desc_addr[1], dev_if->dma_setup_desc_addr[1], 1);
2333 if(dev_if->setup_desc_addr[0])
2334 dwc_otg_ep_free_desc_chain(dev_if->setup_desc_addr[0], dev_if->dma_setup_desc_addr[0], 1);
2335
2336
2337 dma_free_coherent(NULL, sizeof(*pcd->status_buf), pcd->status_buf, pcd->setup_pkt_dma_handle);
2338 dma_free_coherent(NULL, sizeof(*pcd->setup_pkt), pcd->setup_pkt, pcd->setup_pkt_dma_handle);
2339
2340 free_irq(otg_dev->irq, pcd);
2341 device_unregister(&pcd->gadget.dev);
2342 kfree (pcd);
2343
2344 return -ENOMEM;
2345 }
2346 }
2347 }
2348 else {
2349 pcd->setup_pkt = kmalloc (sizeof (*pcd->setup_pkt) * 5, GFP_KERNEL);
2350 if (pcd->setup_pkt == 0) {
2351 free_irq(otg_dev->irq, pcd);
2352 device_unregister(&pcd->gadget.dev);
2353 kfree (pcd);
2354 return -ENOMEM;
2355 }
2356
2357 pcd->status_buf = kmalloc (sizeof (uint16_t), GFP_KERNEL);
2358 if (pcd->status_buf == 0) {
2359 kfree(pcd->setup_pkt);
2360 free_irq(otg_dev->irq, pcd);
2361 device_unregister(&pcd->gadget.dev);
2362 kfree (pcd);
2363 return -ENOMEM;
2364 }
2365 }
2366
2367
2368 /* Initialize tasklet */
2369 start_xfer_tasklet.data = (unsigned long)pcd;
2370 pcd->start_xfer_tasklet = &start_xfer_tasklet;
2371
2372 return 0;
2373 }
2374
2375 /**
2376 * Cleanup the PCD.
2377 */
2378 void dwc_otg_pcd_remove(struct platform_device *pdev)
2379 {
2380 dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev);
2381 dwc_otg_pcd_t *pcd = otg_dev->pcd;
2382 dwc_otg_dev_if_t* dev_if = GET_CORE_IF(pcd)->dev_if;
2383
2384 DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, pdev);
2385
2386 /*
2387 * Free the IRQ
2388 */
2389 free_irq(otg_dev->irq, pcd);
2390
2391 /* start with the driver above us */
2392 if (pcd->driver) {
2393 /* should have been done already by driver model core */
2394 DWC_WARN("driver '%s' is still registered\n",
2395 pcd->driver->driver.name);
2396 usb_gadget_unregister_driver(pcd->driver);
2397 }
2398 device_unregister(&pcd->gadget.dev);
2399
2400 if (GET_CORE_IF(pcd)->dma_enable) {
2401 dma_free_coherent (NULL, sizeof (*pcd->setup_pkt) * 5, pcd->setup_pkt, pcd->setup_pkt_dma_handle);
2402 dma_free_coherent (NULL, sizeof (uint16_t), pcd->status_buf, pcd->status_buf_dma_handle);
2403 if (GET_CORE_IF(pcd)->dma_desc_enable) {
2404 dwc_otg_ep_free_desc_chain(dev_if->setup_desc_addr[0], dev_if->dma_setup_desc_addr[0], 1);
2405 dwc_otg_ep_free_desc_chain(dev_if->setup_desc_addr[1], dev_if->dma_setup_desc_addr[1], 1);
2406 dwc_otg_ep_free_desc_chain(dev_if->in_desc_addr, dev_if->dma_in_desc_addr, 1);
2407 dwc_otg_ep_free_desc_chain(dev_if->out_desc_addr, dev_if->dma_out_desc_addr, 1);
2408 }
2409 }
2410 else {
2411 kfree (pcd->setup_pkt);
2412 kfree (pcd->status_buf);
2413 }
2414
2415 kfree(pcd);
2416 otg_dev->pcd = 0;
2417 }
2418
2419 #endif /* DWC_HOST_ONLY */