cns3xxx: convert dwc_otg patches to files
[openwrt/svn-archive/archive.git] / target / linux / cns3xxx / files / drivers / usb / dwc / otg_pcd_intr.c
1 /* ==========================================================================
2 * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_pcd_intr.c $
3 * $Revision: #83 $
4 * $Date: 2008/10/14 $
5 * $Change: 1115682 $
6 *
7 * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
8 * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
9 * otherwise expressly agreed to in writing between Synopsys and you.
10 *
11 * The Software IS NOT an item of Licensed Software or Licensed Product under
12 * any End User Software License Agreement or Agreement for Licensed Product
13 * with Synopsys or any supplement thereto. You are permitted to use and
14 * redistribute this Software in source and binary forms, with or without
15 * modification, provided that redistributions of source code must retain this
16 * notice. You may not view, use, disclose, copy or distribute this file or
17 * any information contained herein except pursuant to this license grant from
18 * Synopsys. If you do not agree with this notice, including the disclaimer
19 * below, then you are not authorized to use the Software.
20 *
21 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
25 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
31 * DAMAGE.
32 * ========================================================================== */
33 #ifndef DWC_HOST_ONLY
34 #include <linux/interrupt.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/version.h>
37 #include <linux/pci.h>
38
39 #include "otg_driver.h"
40 #include "otg_pcd.h"
41
42
43 #define DEBUG_EP0
44
45
46 /* request functions defined in "dwc_otg_pcd.c" */
47
48 /** @file
49 * This file contains the implementation of the PCD Interrupt handlers.
50 *
51 * The PCD handles the device interrupts. Many conditions can cause a
52 * device interrupt. When an interrupt occurs, the device interrupt
53 * service routine determines the cause of the interrupt and
54 * dispatches handling to the appropriate function. These interrupt
55 * handling functions are described below.
56 * All interrupt registers are processed from LSB to MSB.
57 */
58
59
60 /**
61 * This function prints the ep0 state for debug purposes.
62 */
63 static inline void print_ep0_state(dwc_otg_pcd_t *pcd)
64 {
65 #ifdef DEBUG
66 char str[40];
67
68 switch (pcd->ep0state) {
69 case EP0_DISCONNECT:
70 strcpy(str, "EP0_DISCONNECT");
71 break;
72 case EP0_IDLE:
73 strcpy(str, "EP0_IDLE");
74 break;
75 case EP0_IN_DATA_PHASE:
76 strcpy(str, "EP0_IN_DATA_PHASE");
77 break;
78 case EP0_OUT_DATA_PHASE:
79 strcpy(str, "EP0_OUT_DATA_PHASE");
80 break;
81 case EP0_IN_STATUS_PHASE:
82 strcpy(str,"EP0_IN_STATUS_PHASE");
83 break;
84 case EP0_OUT_STATUS_PHASE:
85 strcpy(str,"EP0_OUT_STATUS_PHASE");
86 break;
87 case EP0_STALL:
88 strcpy(str,"EP0_STALL");
89 break;
90 default:
91 strcpy(str,"EP0_INVALID");
92 }
93
94 DWC_DEBUGPL(DBG_ANY, "%s(%d)\n", str, pcd->ep0state);
95 #endif
96 }
97
98 /**
99 * This function returns pointer to in ep struct with number ep_num
100 */
101 static inline dwc_otg_pcd_ep_t* get_in_ep(dwc_otg_pcd_t *pcd, uint32_t ep_num)
102 {
103 int i;
104 int num_in_eps = GET_CORE_IF(pcd)->dev_if->num_in_eps;
105 if(ep_num == 0) {
106 return &pcd->ep0;
107 }
108 else {
109 for(i = 0; i < num_in_eps; ++i)
110 {
111 if(pcd->in_ep[i].dwc_ep.num == ep_num)
112 return &pcd->in_ep[i];
113 }
114 return 0;
115 }
116 }
117 /**
118 * This function returns pointer to out ep struct with number ep_num
119 */
120 static inline dwc_otg_pcd_ep_t* get_out_ep(dwc_otg_pcd_t *pcd, uint32_t ep_num)
121 {
122 int i;
123 int num_out_eps = GET_CORE_IF(pcd)->dev_if->num_out_eps;
124 if(ep_num == 0) {
125 return &pcd->ep0;
126 }
127 else {
128 for(i = 0; i < num_out_eps; ++i)
129 {
130 if(pcd->out_ep[i].dwc_ep.num == ep_num)
131 return &pcd->out_ep[i];
132 }
133 return 0;
134 }
135 }
136 /**
137 * This functions gets a pointer to an EP from the wIndex address
138 * value of the control request.
139 */
140 static dwc_otg_pcd_ep_t *get_ep_by_addr (dwc_otg_pcd_t *pcd, u16 wIndex)
141 {
142 dwc_otg_pcd_ep_t *ep;
143
144 if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
145 return &pcd->ep0;
146 list_for_each_entry(ep, &pcd->gadget.ep_list, ep.ep_list)
147 {
148 u8 bEndpointAddress;
149
150 if (!ep->desc)
151 continue;
152
153 bEndpointAddress = ep->desc->bEndpointAddress;
154 if((wIndex & (USB_DIR_IN | USB_ENDPOINT_NUMBER_MASK))
155 == (bEndpointAddress & (USB_DIR_IN | USB_ENDPOINT_NUMBER_MASK)))
156 return ep;
157 }
158 return NULL;
159 }
160
161 /**
162 * This function checks the EP request queue, if the queue is not
163 * empty the next request is started.
164 */
165 void start_next_request(dwc_otg_pcd_ep_t *ep)
166 {
167 dwc_otg_pcd_request_t *req = 0;
168 uint32_t max_transfer = GET_CORE_IF(ep->pcd)->core_params->max_transfer_size;
169 if (!list_empty(&ep->queue)) {
170 req = list_entry(ep->queue.next,
171 dwc_otg_pcd_request_t, queue);
172
173 /* Setup and start the Transfer */
174 ep->dwc_ep.dma_addr = req->req.dma;
175 ep->dwc_ep.start_xfer_buff = req->req.buf;
176 ep->dwc_ep.xfer_buff = req->req.buf;
177 ep->dwc_ep.sent_zlp = 0;
178 ep->dwc_ep.total_len = req->req.length;
179 ep->dwc_ep.xfer_len = 0;
180 ep->dwc_ep.xfer_count = 0;
181
182 if(max_transfer > MAX_TRANSFER_SIZE) {
183 ep->dwc_ep.maxxfer = max_transfer - (max_transfer % ep->dwc_ep.maxpacket);
184 } else {
185 ep->dwc_ep.maxxfer = max_transfer;
186 }
187
188 if(req->req.zero) {
189 if((ep->dwc_ep.total_len % ep->dwc_ep.maxpacket == 0)
190 && (ep->dwc_ep.total_len != 0)) {
191 ep->dwc_ep.sent_zlp = 1;
192 }
193
194 }
195 ep_check_and_patch_dma_addr(ep);
196 dwc_otg_ep_start_transfer(GET_CORE_IF(ep->pcd), &ep->dwc_ep);
197 }
198 }
199
200 /**
201 * This function handles the SOF Interrupts. At this time the SOF
202 * Interrupt is disabled.
203 */
204 int32_t dwc_otg_pcd_handle_sof_intr(dwc_otg_pcd_t *pcd)
205 {
206 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
207
208 gintsts_data_t gintsts;
209
210 DWC_DEBUGPL(DBG_PCD, "SOF\n");
211
212 /* Clear interrupt */
213 gintsts.d32 = 0;
214 gintsts.b.sofintr = 1;
215 dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32);
216
217 return 1;
218 }
219
220
221 /**
222 * This function handles the Rx Status Queue Level Interrupt, which
223 * indicates that there is a least one packet in the Rx FIFO. The
224 * packets are moved from the FIFO to memory, where they will be
225 * processed when the Endpoint Interrupt Register indicates Transfer
226 * Complete or SETUP Phase Done.
227 *
228 * Repeat the following until the Rx Status Queue is empty:
229 * -# Read the Receive Status Pop Register (GRXSTSP) to get Packet
230 * info
231 * -# If Receive FIFO is empty then skip to step Clear the interrupt
232 * and exit
233 * -# If SETUP Packet call dwc_otg_read_setup_packet to copy the
234 * SETUP data to the buffer
235 * -# If OUT Data Packet call dwc_otg_read_packet to copy the data
236 * to the destination buffer
237 */
238 int32_t dwc_otg_pcd_handle_rx_status_q_level_intr(dwc_otg_pcd_t *pcd)
239 {
240 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
241 dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
242 gintmsk_data_t gintmask = {.d32=0};
243 device_grxsts_data_t status;
244 dwc_otg_pcd_ep_t *ep;
245 gintsts_data_t gintsts;
246 #ifdef DEBUG
247 static char *dpid_str[] ={ "D0", "D2", "D1", "MDATA" };
248 #endif
249
250 //DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, _pcd);
251 /* Disable the Rx Status Queue Level interrupt */
252 gintmask.b.rxstsqlvl= 1;
253 dwc_modify_reg32(&global_regs->gintmsk, gintmask.d32, 0);
254
255 /* Get the Status from the top of the FIFO */
256 status.d32 = dwc_read_reg32(&global_regs->grxstsp);
257
258 DWC_DEBUGPL(DBG_PCD, "EP:%d BCnt:%d DPID:%s "
259 "pktsts:%x Frame:%d(0x%0x)\n",
260 status.b.epnum, status.b.bcnt,
261 dpid_str[status.b.dpid],
262 status.b.pktsts, status.b.fn, status.b.fn);
263 /* Get pointer to EP structure */
264 ep = get_out_ep(pcd, status.b.epnum);
265
266 switch (status.b.pktsts) {
267 case DWC_DSTS_GOUT_NAK:
268 DWC_DEBUGPL(DBG_PCDV, "Global OUT NAK\n");
269 break;
270 case DWC_STS_DATA_UPDT:
271 DWC_DEBUGPL(DBG_PCDV, "OUT Data Packet\n");
272 if (status.b.bcnt && ep->dwc_ep.xfer_buff) {
273 /** @todo NGS Check for buffer overflow? */
274 dwc_otg_read_packet(core_if,
275 ep->dwc_ep.xfer_buff,
276 status.b.bcnt);
277 ep->dwc_ep.xfer_count += status.b.bcnt;
278 ep->dwc_ep.xfer_buff += status.b.bcnt;
279 }
280 break;
281 case DWC_STS_XFER_COMP:
282 DWC_DEBUGPL(DBG_PCDV, "OUT Complete\n");
283 break;
284 case DWC_DSTS_SETUP_COMP:
285 #ifdef DEBUG_EP0
286 DWC_DEBUGPL(DBG_PCDV, "Setup Complete\n");
287 #endif
288 break;
289 case DWC_DSTS_SETUP_UPDT:
290 dwc_otg_read_setup_packet(core_if, pcd->setup_pkt->d32);
291 #ifdef DEBUG_EP0
292 DWC_DEBUGPL(DBG_PCD,
293 "SETUP PKT: %02x.%02x v%04x i%04x l%04x\n",
294 pcd->setup_pkt->req.bRequestType,
295 pcd->setup_pkt->req.bRequest,
296 pcd->setup_pkt->req.wValue,
297 pcd->setup_pkt->req.wIndex,
298 pcd->setup_pkt->req.wLength);
299 #endif
300 ep->dwc_ep.xfer_count += status.b.bcnt;
301 break;
302 default:
303 DWC_DEBUGPL(DBG_PCDV, "Invalid Packet Status (0x%0x)\n",
304 status.b.pktsts);
305 break;
306 }
307
308 /* Enable the Rx Status Queue Level interrupt */
309 dwc_modify_reg32(&global_regs->gintmsk, 0, gintmask.d32);
310 /* Clear interrupt */
311 gintsts.d32 = 0;
312 gintsts.b.rxstsqlvl = 1;
313 dwc_write_reg32 (&global_regs->gintsts, gintsts.d32);
314
315 //DWC_DEBUGPL(DBG_PCDV, "EXIT: %s\n", __func__);
316 return 1;
317 }
318 /**
319 * This function examines the Device IN Token Learning Queue to
320 * determine the EP number of the last IN token received. This
321 * implementation is for the Mass Storage device where there are only
322 * 2 IN EPs (Control-IN and BULK-IN).
323 *
324 * The EP numbers for the first six IN Tokens are in DTKNQR1 and there
325 * are 8 EP Numbers in each of the other possible DTKNQ Registers.
326 *
327 * @param core_if Programming view of DWC_otg controller.
328 *
329 */
330 static inline int get_ep_of_last_in_token(dwc_otg_core_if_t *core_if)
331 {
332 dwc_otg_device_global_regs_t *dev_global_regs =
333 core_if->dev_if->dev_global_regs;
334 const uint32_t TOKEN_Q_DEPTH = core_if->hwcfg2.b.dev_token_q_depth;
335 /* Number of Token Queue Registers */
336 const int DTKNQ_REG_CNT = (TOKEN_Q_DEPTH + 7) / 8;
337 dtknq1_data_t dtknqr1;
338 uint32_t in_tkn_epnums[4];
339 int ndx = 0;
340 int i = 0;
341 volatile uint32_t *addr = &dev_global_regs->dtknqr1;
342 int epnum = 0;
343
344 //DWC_DEBUGPL(DBG_PCD,"dev_token_q_depth=%d\n",TOKEN_Q_DEPTH);
345
346 /* Read the DTKNQ Registers */
347 for (i = 0; i < DTKNQ_REG_CNT; i++)
348 {
349 in_tkn_epnums[ i ] = dwc_read_reg32(addr);
350 DWC_DEBUGPL(DBG_PCDV, "DTKNQR%d=0x%08x\n", i+1,
351 in_tkn_epnums[i]);
352 if (addr == &dev_global_regs->dvbusdis) {
353 addr = &dev_global_regs->dtknqr3_dthrctl;
354 }
355 else {
356 ++addr;
357 }
358 }
359
360 /* Copy the DTKNQR1 data to the bit field. */
361 dtknqr1.d32 = in_tkn_epnums[0];
362 /* Get the EP numbers */
363 in_tkn_epnums[0] = dtknqr1.b.epnums0_5;
364 ndx = dtknqr1.b.intknwptr - 1;
365
366 //DWC_DEBUGPL(DBG_PCDV,"ndx=%d\n",ndx);
367 if (ndx == -1) {
368 /** @todo Find a simpler way to calculate the max
369 * queue position.*/
370 int cnt = TOKEN_Q_DEPTH;
371 if (TOKEN_Q_DEPTH <= 6) {
372 cnt = TOKEN_Q_DEPTH - 1;
373 }
374 else if (TOKEN_Q_DEPTH <= 14) {
375 cnt = TOKEN_Q_DEPTH - 7;
376 }
377 else if (TOKEN_Q_DEPTH <= 22) {
378 cnt = TOKEN_Q_DEPTH - 15;
379 }
380 else {
381 cnt = TOKEN_Q_DEPTH - 23;
382 }
383 epnum = (in_tkn_epnums[ DTKNQ_REG_CNT - 1 ] >> (cnt * 4)) & 0xF;
384 }
385 else {
386 if (ndx <= 5) {
387 epnum = (in_tkn_epnums[0] >> (ndx * 4)) & 0xF;
388 }
389 else if (ndx <= 13) {
390 ndx -= 6;
391 epnum = (in_tkn_epnums[1] >> (ndx * 4)) & 0xF;
392 }
393 else if (ndx <= 21) {
394 ndx -= 14;
395 epnum = (in_tkn_epnums[2] >> (ndx * 4)) & 0xF;
396 }
397 else if (ndx <= 29) {
398 ndx -= 22;
399 epnum = (in_tkn_epnums[3] >> (ndx * 4)) & 0xF;
400 }
401 }
402 //DWC_DEBUGPL(DBG_PCD,"epnum=%d\n",epnum);
403 return epnum;
404 }
405
406 /**
407 * This interrupt occurs when the non-periodic Tx FIFO is half-empty.
408 * The active request is checked for the next packet to be loaded into
409 * the non-periodic Tx FIFO.
410 */
411 int32_t dwc_otg_pcd_handle_np_tx_fifo_empty_intr(dwc_otg_pcd_t *pcd)
412 {
413 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
414 dwc_otg_core_global_regs_t *global_regs =
415 core_if->core_global_regs;
416 dwc_otg_dev_in_ep_regs_t *ep_regs;
417 gnptxsts_data_t txstatus = {.d32 = 0};
418 gintsts_data_t gintsts;
419
420 int epnum = 0;
421 dwc_otg_pcd_ep_t *ep = 0;
422 uint32_t len = 0;
423 int dwords;
424
425 /* Get the epnum from the IN Token Learning Queue. */
426 epnum = get_ep_of_last_in_token(core_if);
427 ep = get_in_ep(pcd, epnum);
428
429 DWC_DEBUGPL(DBG_PCD, "NP TxFifo Empty: %s(%d) \n", ep->ep.name, epnum);
430 ep_regs = core_if->dev_if->in_ep_regs[epnum];
431
432 len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count;
433 if (len > ep->dwc_ep.maxpacket) {
434 len = ep->dwc_ep.maxpacket;
435 }
436 dwords = (len + 3)/4;
437
438 /* While there is space in the queue and space in the FIFO and
439 * More data to tranfer, Write packets to the Tx FIFO */
440 txstatus.d32 = dwc_read_reg32(&global_regs->gnptxsts);
441 DWC_DEBUGPL(DBG_PCDV, "b4 GNPTXSTS=0x%08x\n",txstatus.d32);
442
443 while (txstatus.b.nptxqspcavail > 0 &&
444 txstatus.b.nptxfspcavail > dwords &&
445 ep->dwc_ep.xfer_count < ep->dwc_ep.xfer_len) {
446 /* Write the FIFO */
447 dwc_otg_ep_write_packet(core_if, &ep->dwc_ep, 0);
448 len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count;
449
450 if (len > ep->dwc_ep.maxpacket) {
451 len = ep->dwc_ep.maxpacket;
452 }
453
454 dwords = (len + 3)/4;
455 txstatus.d32 = dwc_read_reg32(&global_regs->gnptxsts);
456 DWC_DEBUGPL(DBG_PCDV,"GNPTXSTS=0x%08x\n",txstatus.d32);
457 }
458
459 DWC_DEBUGPL(DBG_PCDV, "GNPTXSTS=0x%08x\n",
460 dwc_read_reg32(&global_regs->gnptxsts));
461
462 /* Clear interrupt */
463 gintsts.d32 = 0;
464 gintsts.b.nptxfempty = 1;
465 dwc_write_reg32 (&global_regs->gintsts, gintsts.d32);
466
467 return 1;
468 }
469
470 /**
471 * This function is called when dedicated Tx FIFO Empty interrupt occurs.
472 * The active request is checked for the next packet to be loaded into
473 * apropriate Tx FIFO.
474 */
475 static int32_t write_empty_tx_fifo(dwc_otg_pcd_t *pcd, uint32_t epnum)
476 {
477 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
478 dwc_otg_dev_if_t* dev_if = core_if->dev_if;
479 dwc_otg_dev_in_ep_regs_t *ep_regs;
480 dtxfsts_data_t txstatus = {.d32 = 0};
481 dwc_otg_pcd_ep_t *ep = 0;
482 uint32_t len = 0;
483 int dwords;
484
485 ep = get_in_ep(pcd, epnum);
486
487 DWC_DEBUGPL(DBG_PCD, "Dedicated TxFifo Empty: %s(%d) \n", ep->ep.name, epnum);
488
489 ep_regs = core_if->dev_if->in_ep_regs[epnum];
490
491 len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count;
492
493 if (len > ep->dwc_ep.maxpacket) {
494 len = ep->dwc_ep.maxpacket;
495 }
496
497 dwords = (len + 3)/4;
498
499 /* While there is space in the queue and space in the FIFO and
500 * More data to tranfer, Write packets to the Tx FIFO */
501 txstatus.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dtxfsts);
502 DWC_DEBUGPL(DBG_PCDV, "b4 dtxfsts[%d]=0x%08x\n",epnum,txstatus.d32);
503
504 while (txstatus.b.txfspcavail > dwords &&
505 ep->dwc_ep.xfer_count < ep->dwc_ep.xfer_len &&
506 ep->dwc_ep.xfer_len != 0) {
507 /* Write the FIFO */
508 dwc_otg_ep_write_packet(core_if, &ep->dwc_ep, 0);
509
510 len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count;
511 if (len > ep->dwc_ep.maxpacket) {
512 len = ep->dwc_ep.maxpacket;
513 }
514
515 dwords = (len + 3)/4;
516 txstatus.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dtxfsts);
517 DWC_DEBUGPL(DBG_PCDV,"dtxfsts[%d]=0x%08x\n", epnum, txstatus.d32);
518 }
519
520 DWC_DEBUGPL(DBG_PCDV, "b4 dtxfsts[%d]=0x%08x\n",epnum,dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dtxfsts));
521
522 return 1;
523 }
524
525 /**
526 * This function is called when the Device is disconnected. It stops
527 * any active requests and informs the Gadget driver of the
528 * disconnect.
529 */
530 void dwc_otg_pcd_stop(dwc_otg_pcd_t *pcd)
531 {
532 int i, num_in_eps, num_out_eps;
533 dwc_otg_pcd_ep_t *ep;
534
535 gintmsk_data_t intr_mask = {.d32 = 0};
536
537 num_in_eps = GET_CORE_IF(pcd)->dev_if->num_in_eps;
538 num_out_eps = GET_CORE_IF(pcd)->dev_if->num_out_eps;
539
540 DWC_DEBUGPL(DBG_PCDV, "%s() \n", __func__);
541 /* don't disconnect drivers more than once */
542 if (pcd->ep0state == EP0_DISCONNECT) {
543 DWC_DEBUGPL(DBG_ANY, "%s() Already Disconnected\n", __func__);
544 return;
545 }
546 pcd->ep0state = EP0_DISCONNECT;
547
548 /* Reset the OTG state. */
549 dwc_otg_pcd_update_otg(pcd, 1);
550
551 /* Disable the NP Tx Fifo Empty Interrupt. */
552 intr_mask.b.nptxfempty = 1;
553 dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
554 intr_mask.d32, 0);
555
556 /* Flush the FIFOs */
557 /**@todo NGS Flush Periodic FIFOs */
558 dwc_otg_flush_tx_fifo(GET_CORE_IF(pcd), 0x10);
559 dwc_otg_flush_rx_fifo(GET_CORE_IF(pcd));
560
561 /* prevent new request submissions, kill any outstanding requests */
562 ep = &pcd->ep0;
563 dwc_otg_request_nuke(ep);
564 /* prevent new request submissions, kill any outstanding requests */
565 for (i = 0; i < num_in_eps; i++)
566 {
567 dwc_otg_pcd_ep_t *ep = &pcd->in_ep[i];
568 dwc_otg_request_nuke(ep);
569 }
570 /* prevent new request submissions, kill any outstanding requests */
571 for (i = 0; i < num_out_eps; i++)
572 {
573 dwc_otg_pcd_ep_t *ep = &pcd->out_ep[i];
574 dwc_otg_request_nuke(ep);
575 }
576
577 /* report disconnect; the driver is already quiesced */
578 if (pcd->driver && pcd->driver->disconnect) {
579 SPIN_UNLOCK(&pcd->lock);
580 pcd->driver->disconnect(&pcd->gadget);
581 SPIN_LOCK(&pcd->lock);
582 }
583 }
584
585 /**
586 * This interrupt indicates that ...
587 */
588 int32_t dwc_otg_pcd_handle_i2c_intr(dwc_otg_pcd_t *pcd)
589 {
590 gintmsk_data_t intr_mask = { .d32 = 0};
591 gintsts_data_t gintsts;
592
593 DWC_PRINT("INTERRUPT Handler not implemented for %s\n", "i2cintr");
594 intr_mask.b.i2cintr = 1;
595 dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
596 intr_mask.d32, 0);
597
598 /* Clear interrupt */
599 gintsts.d32 = 0;
600 gintsts.b.i2cintr = 1;
601 dwc_write_reg32 (&GET_CORE_IF(pcd)->core_global_regs->gintsts,
602 gintsts.d32);
603 return 1;
604 }
605
606
607 /**
608 * This interrupt indicates that ...
609 */
610 int32_t dwc_otg_pcd_handle_early_suspend_intr(dwc_otg_pcd_t *pcd)
611 {
612 gintsts_data_t gintsts;
613 #if defined(VERBOSE)
614 DWC_PRINT("Early Suspend Detected\n");
615 #endif
616 /* Clear interrupt */
617 gintsts.d32 = 0;
618 gintsts.b.erlysuspend = 1;
619 dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
620 gintsts.d32);
621 return 1;
622 }
623
624 /**
625 * This function configures EPO to receive SETUP packets.
626 *
627 * @todo NGS: Update the comments from the HW FS.
628 *
629 * -# Program the following fields in the endpoint specific registers
630 * for Control OUT EP 0, in order to receive a setup packet
631 * - DOEPTSIZ0.Packet Count = 3 (To receive up to 3 back to back
632 * setup packets)
633 * - DOEPTSIZE0.Transfer Size = 24 Bytes (To receive up to 3 back
634 * to back setup packets)
635 * - In DMA mode, DOEPDMA0 Register with a memory address to
636 * store any setup packets received
637 *
638 * @param core_if Programming view of DWC_otg controller.
639 * @param pcd Programming view of the PCD.
640 */
641 static inline void ep0_out_start(dwc_otg_core_if_t *core_if, dwc_otg_pcd_t *pcd)
642 {
643 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
644 deptsiz0_data_t doeptsize0 = { .d32 = 0};
645 dwc_otg_dma_desc_t* dma_desc;
646 depctl_data_t doepctl = { .d32 = 0 };
647
648 #ifdef VERBOSE
649 DWC_DEBUGPL(DBG_PCDV,"%s() doepctl0=%0x\n", __func__,
650 dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl));
651 #endif
652
653 doeptsize0.b.supcnt = 3;
654 doeptsize0.b.pktcnt = 1;
655 doeptsize0.b.xfersize = 8*3;
656
657 if (core_if->dma_enable) {
658 if (!core_if->dma_desc_enable) {
659 /** put here as for Hermes mode deptisz register should not be written */
660 dwc_write_reg32(&dev_if->out_ep_regs[0]->doeptsiz,
661 doeptsize0.d32);
662
663 /** @todo dma needs to handle multiple setup packets (up to 3) */
664 VERIFY_PCD_DMA_ADDR(pcd->setup_pkt_dma_handle);
665
666 dwc_write_reg32(&dev_if->out_ep_regs[0]->doepdma,
667 pcd->setup_pkt_dma_handle);
668 } else {
669 dev_if->setup_desc_index = (dev_if->setup_desc_index + 1) & 1;
670 dma_desc = dev_if->setup_desc_addr[dev_if->setup_desc_index];
671
672 /** DMA Descriptor Setup */
673 dma_desc->status.b.bs = BS_HOST_BUSY;
674 dma_desc->status.b.l = 1;
675 dma_desc->status.b.ioc = 1;
676 dma_desc->status.b.bytes = pcd->ep0.dwc_ep.maxpacket;
677 dma_desc->buf = pcd->setup_pkt_dma_handle;
678 dma_desc->status.b.bs = BS_HOST_READY;
679
680 /** DOEPDMA0 Register write */
681 VERIFY_PCD_DMA_ADDR(dev_if->dma_setup_desc_addr[dev_if->setup_desc_index]);
682 dwc_write_reg32(&dev_if->out_ep_regs[0]->doepdma, dev_if->dma_setup_desc_addr[dev_if->setup_desc_index]);
683 }
684
685 } else {
686 /** put here as for Hermes mode deptisz register should not be written */
687 dwc_write_reg32(&dev_if->out_ep_regs[0]->doeptsiz,
688 doeptsize0.d32);
689 }
690
691 /** DOEPCTL0 Register write */
692 doepctl.b.epena = 1;
693 doepctl.b.cnak = 1;
694 dwc_write_reg32(&dev_if->out_ep_regs[0]->doepctl, doepctl.d32);
695
696 #ifdef VERBOSE
697 DWC_DEBUGPL(DBG_PCDV,"doepctl0=%0x\n",
698 dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl));
699 DWC_DEBUGPL(DBG_PCDV,"diepctl0=%0x\n",
700 dwc_read_reg32(&dev_if->in_ep_regs[0]->diepctl));
701 #endif
702 }
703
704 /**
705 * This interrupt occurs when a USB Reset is detected. When the USB
706 * Reset Interrupt occurs the device state is set to DEFAULT and the
707 * EP0 state is set to IDLE.
708 * -# Set the NAK bit for all OUT endpoints (DOEPCTLn.SNAK = 1)
709 * -# Unmask the following interrupt bits
710 * - DAINTMSK.INEP0 = 1 (Control 0 IN endpoint)
711 * - DAINTMSK.OUTEP0 = 1 (Control 0 OUT endpoint)
712 * - DOEPMSK.SETUP = 1
713 * - DOEPMSK.XferCompl = 1
714 * - DIEPMSK.XferCompl = 1
715 * - DIEPMSK.TimeOut = 1
716 * -# Program the following fields in the endpoint specific registers
717 * for Control OUT EP 0, in order to receive a setup packet
718 * - DOEPTSIZ0.Packet Count = 3 (To receive up to 3 back to back
719 * setup packets)
720 * - DOEPTSIZE0.Transfer Size = 24 Bytes (To receive up to 3 back
721 * to back setup packets)
722 * - In DMA mode, DOEPDMA0 Register with a memory address to
723 * store any setup packets received
724 * At this point, all the required initialization, except for enabling
725 * the control 0 OUT endpoint is done, for receiving SETUP packets.
726 */
727 int32_t dwc_otg_pcd_handle_usb_reset_intr(dwc_otg_pcd_t * pcd)
728 {
729 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
730 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
731 depctl_data_t doepctl = { .d32 = 0};
732
733 daint_data_t daintmsk = { .d32 = 0};
734 doepmsk_data_t doepmsk = { .d32 = 0};
735 diepmsk_data_t diepmsk = { .d32 = 0};
736
737 dcfg_data_t dcfg = { .d32=0 };
738 grstctl_t resetctl = { .d32=0 };
739 dctl_data_t dctl = {.d32=0};
740 int i = 0;
741 gintsts_data_t gintsts;
742
743 DWC_PRINT("USB RESET\n");
744 #ifdef DWC_EN_ISOC
745 for(i = 1;i < 16; ++i)
746 {
747 dwc_otg_pcd_ep_t *ep;
748 dwc_ep_t *dwc_ep;
749 ep = get_in_ep(pcd,i);
750 if(ep != 0){
751 dwc_ep = &ep->dwc_ep;
752 dwc_ep->next_frame = 0xffffffff;
753 }
754 }
755 #endif /* DWC_EN_ISOC */
756
757 /* reset the HNP settings */
758 dwc_otg_pcd_update_otg(pcd, 1);
759
760 /* Clear the Remote Wakeup Signalling */
761 dctl.b.rmtwkupsig = 1;
762 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dctl,
763 dctl.d32, 0);
764
765 /* Set NAK for all OUT EPs */
766 doepctl.b.snak = 1;
767 for (i=0; i <= dev_if->num_out_eps; i++)
768 {
769 dwc_write_reg32(&dev_if->out_ep_regs[i]->doepctl,
770 doepctl.d32);
771 }
772
773 /* Flush the NP Tx FIFO */
774 dwc_otg_flush_tx_fifo(core_if, 0x10);
775 /* Flush the Learning Queue */
776 resetctl.b.intknqflsh = 1;
777 dwc_write_reg32(&core_if->core_global_regs->grstctl, resetctl.d32);
778
779 if(core_if->multiproc_int_enable) {
780 daintmsk.b.inep0 = 1;
781 daintmsk.b.outep0 = 1;
782 dwc_write_reg32(&dev_if->dev_global_regs->deachintmsk, daintmsk.d32);
783
784 doepmsk.b.setup = 1;
785 doepmsk.b.xfercompl = 1;
786 doepmsk.b.ahberr = 1;
787 doepmsk.b.epdisabled = 1;
788
789 if(core_if->dma_desc_enable) {
790 doepmsk.b.stsphsercvd = 1;
791 doepmsk.b.bna = 1;
792 }
793 /*
794 doepmsk.b.babble = 1;
795 doepmsk.b.nyet = 1;
796
797 if(core_if->dma_enable) {
798 doepmsk.b.nak = 1;
799 }
800 */
801 dwc_write_reg32(&dev_if->dev_global_regs->doepeachintmsk[0], doepmsk.d32);
802
803 diepmsk.b.xfercompl = 1;
804 diepmsk.b.timeout = 1;
805 diepmsk.b.epdisabled = 1;
806 diepmsk.b.ahberr = 1;
807 diepmsk.b.intknepmis = 1;
808
809 if(core_if->dma_desc_enable) {
810 diepmsk.b.bna = 1;
811 }
812 /*
813 if(core_if->dma_enable) {
814 diepmsk.b.nak = 1;
815 }
816 */
817 dwc_write_reg32(&dev_if->dev_global_regs->diepeachintmsk[0], diepmsk.d32);
818 } else{
819 daintmsk.b.inep0 = 1;
820 daintmsk.b.outep0 = 1;
821 dwc_write_reg32(&dev_if->dev_global_regs->daintmsk, daintmsk.d32);
822
823 doepmsk.b.setup = 1;
824 doepmsk.b.xfercompl = 1;
825 doepmsk.b.ahberr = 1;
826 doepmsk.b.epdisabled = 1;
827
828 if(core_if->dma_desc_enable) {
829 doepmsk.b.stsphsercvd = 1;
830 doepmsk.b.bna = 1;
831 }
832 /*
833 doepmsk.b.babble = 1;
834 doepmsk.b.nyet = 1;
835 doepmsk.b.nak = 1;
836 */
837 dwc_write_reg32(&dev_if->dev_global_regs->doepmsk, doepmsk.d32);
838
839 diepmsk.b.xfercompl = 1;
840 diepmsk.b.timeout = 1;
841 diepmsk.b.epdisabled = 1;
842 diepmsk.b.ahberr = 1;
843 diepmsk.b.intknepmis = 1;
844
845 if(core_if->dma_desc_enable) {
846 diepmsk.b.bna = 1;
847 }
848
849 // diepmsk.b.nak = 1;
850
851 dwc_write_reg32(&dev_if->dev_global_regs->diepmsk, diepmsk.d32);
852 }
853
854 /* Reset Device Address */
855 dcfg.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dcfg);
856 dcfg.b.devaddr = 0;
857 dwc_write_reg32(&dev_if->dev_global_regs->dcfg, dcfg.d32);
858
859 /* setup EP0 to receive SETUP packets */
860 ep0_out_start(core_if, pcd);
861
862 /* Clear interrupt */
863 gintsts.d32 = 0;
864 gintsts.b.usbreset = 1;
865 dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32);
866
867 return 1;
868 }
869
870 /**
871 * Get the device speed from the device status register and convert it
872 * to USB speed constant.
873 *
874 * @param core_if Programming view of DWC_otg controller.
875 */
876 static int get_device_speed(dwc_otg_core_if_t *core_if)
877 {
878 dsts_data_t dsts;
879 enum usb_device_speed speed = USB_SPEED_UNKNOWN;
880 dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts);
881
882 switch (dsts.b.enumspd) {
883 case DWC_DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ:
884 speed = USB_SPEED_HIGH;
885 break;
886 case DWC_DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ:
887 case DWC_DSTS_ENUMSPD_FS_PHY_48MHZ:
888 speed = USB_SPEED_FULL;
889 break;
890
891 case DWC_DSTS_ENUMSPD_LS_PHY_6MHZ:
892 speed = USB_SPEED_LOW;
893 break;
894 }
895
896 return speed;
897 }
898
899 /**
900 * Read the device status register and set the device speed in the
901 * data structure.
902 * Set up EP0 to receive SETUP packets by calling dwc_ep0_activate.
903 */
904 int32_t dwc_otg_pcd_handle_enum_done_intr(dwc_otg_pcd_t *pcd)
905 {
906 dwc_otg_pcd_ep_t *ep0 = &pcd->ep0;
907 gintsts_data_t gintsts;
908 gusbcfg_data_t gusbcfg;
909 dwc_otg_core_global_regs_t *global_regs =
910 GET_CORE_IF(pcd)->core_global_regs;
911 uint8_t utmi16b, utmi8b;
912 // DWC_DEBUGPL(DBG_PCD, "SPEED ENUM\n");
913 DWC_PRINT("SPEED ENUM\n");
914
915 if (GET_CORE_IF(pcd)->snpsid >= 0x4F54260A) {
916 utmi16b = 6;
917 utmi8b = 9;
918 } else {
919 utmi16b = 4;
920 utmi8b = 8;
921 }
922 dwc_otg_ep0_activate(GET_CORE_IF(pcd), &ep0->dwc_ep);
923
924 #ifdef DEBUG_EP0
925 print_ep0_state(pcd);
926 #endif
927
928 if (pcd->ep0state == EP0_DISCONNECT) {
929 pcd->ep0state = EP0_IDLE;
930 }
931 else if (pcd->ep0state == EP0_STALL) {
932 pcd->ep0state = EP0_IDLE;
933 }
934
935 pcd->ep0state = EP0_IDLE;
936
937 ep0->stopped = 0;
938
939 pcd->gadget.speed = get_device_speed(GET_CORE_IF(pcd));
940
941 /* Set USB turnaround time based on device speed and PHY interface. */
942 gusbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
943 if (pcd->gadget.speed == USB_SPEED_HIGH) {
944 if (GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type == DWC_HWCFG2_HS_PHY_TYPE_ULPI) {
945 /* ULPI interface */
946 gusbcfg.b.usbtrdtim = 9;
947 }
948 if (GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type == DWC_HWCFG2_HS_PHY_TYPE_UTMI) {
949 /* UTMI+ interface */
950 if (GET_CORE_IF(pcd)->hwcfg4.b.utmi_phy_data_width == 0) {
951 gusbcfg.b.usbtrdtim = utmi8b;
952 }
953 else if (GET_CORE_IF(pcd)->hwcfg4.b.utmi_phy_data_width == 1) {
954 gusbcfg.b.usbtrdtim = utmi16b;
955 }
956 else if (GET_CORE_IF(pcd)->core_params->phy_utmi_width == 8) {
957 gusbcfg.b.usbtrdtim = utmi8b;
958 }
959 else {
960 gusbcfg.b.usbtrdtim = utmi16b;
961 }
962 }
963 if (GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type == DWC_HWCFG2_HS_PHY_TYPE_UTMI_ULPI) {
964 /* UTMI+ OR ULPI interface */
965 if (gusbcfg.b.ulpi_utmi_sel == 1) {
966 /* ULPI interface */
967 gusbcfg.b.usbtrdtim = 9;
968 }
969 else {
970 /* UTMI+ interface */
971 if (GET_CORE_IF(pcd)->core_params->phy_utmi_width == 16) {
972 gusbcfg.b.usbtrdtim = utmi16b;
973 }
974 else {
975 gusbcfg.b.usbtrdtim = utmi8b;
976 }
977 }
978 }
979 }
980 else {
981 /* Full or low speed */
982 gusbcfg.b.usbtrdtim = 9;
983 }
984 dwc_write_reg32(&global_regs->gusbcfg, gusbcfg.d32);
985
986 /* Clear interrupt */
987 gintsts.d32 = 0;
988 gintsts.b.enumdone = 1;
989 dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
990 gintsts.d32);
991 return 1;
992 }
993
994 /**
995 * This interrupt indicates that the ISO OUT Packet was dropped due to
996 * Rx FIFO full or Rx Status Queue Full. If this interrupt occurs
997 * read all the data from the Rx FIFO.
998 */
999 int32_t dwc_otg_pcd_handle_isoc_out_packet_dropped_intr(dwc_otg_pcd_t *pcd)
1000 {
1001 gintmsk_data_t intr_mask = { .d32 = 0};
1002 gintsts_data_t gintsts;
1003
1004 DWC_PRINT("INTERRUPT Handler not implemented for %s\n",
1005 "ISOC Out Dropped");
1006
1007 intr_mask.b.isooutdrop = 1;
1008 dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
1009 intr_mask.d32, 0);
1010
1011 /* Clear interrupt */
1012
1013 gintsts.d32 = 0;
1014 gintsts.b.isooutdrop = 1;
1015 dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
1016 gintsts.d32);
1017
1018 return 1;
1019 }
1020
1021 /**
1022 * This interrupt indicates the end of the portion of the micro-frame
1023 * for periodic transactions. If there is a periodic transaction for
1024 * the next frame, load the packets into the EP periodic Tx FIFO.
1025 */
1026 int32_t dwc_otg_pcd_handle_end_periodic_frame_intr(dwc_otg_pcd_t *pcd)
1027 {
1028 gintmsk_data_t intr_mask = { .d32 = 0};
1029 gintsts_data_t gintsts;
1030 DWC_PRINT("INTERRUPT Handler not implemented for %s\n", "EOP");
1031
1032 intr_mask.b.eopframe = 1;
1033 dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
1034 intr_mask.d32, 0);
1035
1036 /* Clear interrupt */
1037 gintsts.d32 = 0;
1038 gintsts.b.eopframe = 1;
1039 dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts, gintsts.d32);
1040
1041 return 1;
1042 }
1043
1044 /**
1045 * This interrupt indicates that EP of the packet on the top of the
1046 * non-periodic Tx FIFO does not match EP of the IN Token received.
1047 *
1048 * The "Device IN Token Queue" Registers are read to determine the
1049 * order the IN Tokens have been received. The non-periodic Tx FIFO
1050 * is flushed, so it can be reloaded in the order seen in the IN Token
1051 * Queue.
1052 */
1053 int32_t dwc_otg_pcd_handle_ep_mismatch_intr(dwc_otg_core_if_t *core_if)
1054 {
1055 gintsts_data_t gintsts;
1056 DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, core_if);
1057
1058 /* Clear interrupt */
1059 gintsts.d32 = 0;
1060 gintsts.b.epmismatch = 1;
1061 dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32);
1062
1063 return 1;
1064 }
1065
1066 /**
1067 * This funcion stalls EP0.
1068 */
1069 static inline void ep0_do_stall(dwc_otg_pcd_t *pcd, const int err_val)
1070 {
1071 dwc_otg_pcd_ep_t *ep0 = &pcd->ep0;
1072 struct usb_ctrlrequest *ctrl = &pcd->setup_pkt->req;
1073 DWC_WARN("req %02x.%02x protocol STALL; err %d\n",
1074 ctrl->bRequestType, ctrl->bRequest, err_val);
1075
1076 ep0->dwc_ep.is_in = 1;
1077 dwc_otg_ep_set_stall(pcd->otg_dev->core_if, &ep0->dwc_ep);
1078 pcd->ep0.stopped = 1;
1079 pcd->ep0state = EP0_IDLE;
1080 ep0_out_start(GET_CORE_IF(pcd), pcd);
1081 }
1082
1083 /**
1084 * This functions delegates the setup command to the gadget driver.
1085 */
1086 static inline void do_gadget_setup(dwc_otg_pcd_t *pcd,
1087 struct usb_ctrlrequest * ctrl)
1088 {
1089 int ret = 0;
1090 if (pcd->driver && pcd->driver->setup) {
1091 SPIN_UNLOCK(&pcd->lock);
1092 ret = pcd->driver->setup(&pcd->gadget, ctrl);
1093 SPIN_LOCK(&pcd->lock);
1094 if (ret < 0) {
1095 ep0_do_stall(pcd, ret);
1096 }
1097
1098 /** @todo This is a g_file_storage gadget driver specific
1099 * workaround: a DELAYED_STATUS result from the fsg_setup
1100 * routine will result in the gadget queueing a EP0 IN status
1101 * phase for a two-stage control transfer. Exactly the same as
1102 * a SET_CONFIGURATION/SET_INTERFACE except that this is a class
1103 * specific request. Need a generic way to know when the gadget
1104 * driver will queue the status phase. Can we assume when we
1105 * call the gadget driver setup() function that it will always
1106 * queue and require the following flag? Need to look into
1107 * this.
1108 */
1109
1110 if (ret == 256 + 999) {
1111 pcd->request_config = 1;
1112 }
1113 }
1114 }
1115
1116 /**
1117 * This function starts the Zero-Length Packet for the IN status phase
1118 * of a 2 stage control transfer.
1119 */
1120 static inline void do_setup_in_status_phase(dwc_otg_pcd_t *pcd)
1121 {
1122 dwc_otg_pcd_ep_t *ep0 = &pcd->ep0;
1123 if (pcd->ep0state == EP0_STALL) {
1124 return;
1125 }
1126
1127 pcd->ep0state = EP0_IN_STATUS_PHASE;
1128
1129 /* Prepare for more SETUP Packets */
1130 DWC_DEBUGPL(DBG_PCD, "EP0 IN ZLP\n");
1131 ep0->dwc_ep.xfer_len = 0;
1132 ep0->dwc_ep.xfer_count = 0;
1133 ep0->dwc_ep.is_in = 1;
1134 ep0->dwc_ep.dma_addr = pcd->setup_pkt_dma_handle;
1135 dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep0->dwc_ep);
1136
1137 /* Prepare for more SETUP Packets */
1138 // if(GET_CORE_IF(pcd)->dma_enable == 0) ep0_out_start(GET_CORE_IF(pcd), pcd);
1139 }
1140
1141 /**
1142 * This function starts the Zero-Length Packet for the OUT status phase
1143 * of a 2 stage control transfer.
1144 */
1145 static inline void do_setup_out_status_phase(dwc_otg_pcd_t *pcd)
1146 {
1147 dwc_otg_pcd_ep_t *ep0 = &pcd->ep0;
1148 if (pcd->ep0state == EP0_STALL) {
1149 DWC_DEBUGPL(DBG_PCD, "EP0 STALLED\n");
1150 return;
1151 }
1152 pcd->ep0state = EP0_OUT_STATUS_PHASE;
1153
1154 DWC_DEBUGPL(DBG_PCD, "EP0 OUT ZLP\n");
1155 ep0->dwc_ep.xfer_len = 0;
1156 ep0->dwc_ep.xfer_count = 0;
1157 ep0->dwc_ep.is_in = 0;
1158 ep0->dwc_ep.dma_addr = pcd->setup_pkt_dma_handle;
1159 dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep0->dwc_ep);
1160
1161 /* Prepare for more SETUP Packets */
1162 if(GET_CORE_IF(pcd)->dma_enable == 0) {
1163 ep0_out_start(GET_CORE_IF(pcd), pcd);
1164 }
1165 }
1166
1167 /**
1168 * Clear the EP halt (STALL) and if pending requests start the
1169 * transfer.
1170 */
1171 static inline void pcd_clear_halt(dwc_otg_pcd_t *pcd, dwc_otg_pcd_ep_t *ep)
1172 {
1173 if(ep->dwc_ep.stall_clear_flag == 0)
1174 dwc_otg_ep_clear_stall(GET_CORE_IF(pcd), &ep->dwc_ep);
1175
1176 /* Reactive the EP */
1177 dwc_otg_ep_activate(GET_CORE_IF(pcd), &ep->dwc_ep);
1178 if (ep->stopped) {
1179 ep->stopped = 0;
1180 /* If there is a request in the EP queue start it */
1181
1182 /** @todo FIXME: this causes an EP mismatch in DMA mode.
1183 * epmismatch not yet implemented. */
1184
1185 /*
1186 * Above fixme is solved by implmenting a tasklet to call the
1187 * start_next_request(), outside of interrupt context at some
1188 * time after the current time, after a clear-halt setup packet.
1189 * Still need to implement ep mismatch in the future if a gadget
1190 * ever uses more than one endpoint at once
1191 */
1192 ep->queue_sof = 1;
1193 tasklet_schedule (pcd->start_xfer_tasklet);
1194 }
1195 /* Start Control Status Phase */
1196 do_setup_in_status_phase(pcd);
1197 }
1198
1199 /**
1200 * This function is called when the SET_FEATURE TEST_MODE Setup packet
1201 * is sent from the host. The Device Control register is written with
1202 * the Test Mode bits set to the specified Test Mode. This is done as
1203 * a tasklet so that the "Status" phase of the control transfer
1204 * completes before transmitting the TEST packets.
1205 *
1206 * @todo This has not been tested since the tasklet struct was put
1207 * into the PCD struct!
1208 *
1209 */
1210 static void do_test_mode(unsigned long data)
1211 {
1212 dctl_data_t dctl;
1213 dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *)data;
1214 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1215 int test_mode = pcd->test_mode;
1216
1217
1218 // DWC_WARN("%s() has not been tested since being rewritten!\n", __func__);
1219
1220 dctl.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dctl);
1221 switch (test_mode) {
1222 case 1: // TEST_J
1223 dctl.b.tstctl = 1;
1224 break;
1225
1226 case 2: // TEST_K
1227 dctl.b.tstctl = 2;
1228 break;
1229
1230 case 3: // TEST_SE0_NAK
1231 dctl.b.tstctl = 3;
1232 break;
1233
1234 case 4: // TEST_PACKET
1235 dctl.b.tstctl = 4;
1236 break;
1237
1238 case 5: // TEST_FORCE_ENABLE
1239 dctl.b.tstctl = 5;
1240 break;
1241 }
1242 dwc_write_reg32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32);
1243 }
1244
1245 /**
1246 * This function process the GET_STATUS Setup Commands.
1247 */
1248 static inline void do_get_status(dwc_otg_pcd_t *pcd)
1249 {
1250 struct usb_ctrlrequest ctrl = pcd->setup_pkt->req;
1251 dwc_otg_pcd_ep_t *ep;
1252 dwc_otg_pcd_ep_t *ep0 = &pcd->ep0;
1253 uint16_t *status = pcd->status_buf;
1254
1255 #ifdef DEBUG_EP0
1256 DWC_DEBUGPL(DBG_PCD,
1257 "GET_STATUS %02x.%02x v%04x i%04x l%04x\n",
1258 ctrl.bRequestType, ctrl.bRequest,
1259 ctrl.wValue, ctrl.wIndex, ctrl.wLength);
1260 #endif
1261
1262 switch (ctrl.bRequestType & USB_RECIP_MASK) {
1263 case USB_RECIP_DEVICE:
1264 *status = 0x1; /* Self powered */
1265 *status |= pcd->remote_wakeup_enable << 1;
1266 break;
1267
1268 case USB_RECIP_INTERFACE:
1269 *status = 0;
1270 break;
1271
1272 case USB_RECIP_ENDPOINT:
1273 ep = get_ep_by_addr(pcd, ctrl.wIndex);
1274 if (ep == 0 || ctrl.wLength > 2) {
1275 ep0_do_stall(pcd, -EOPNOTSUPP);
1276 return;
1277 }
1278 /** @todo check for EP stall */
1279 *status = ep->stopped;
1280 break;
1281 }
1282 pcd->ep0_pending = 1;
1283 ep0->dwc_ep.start_xfer_buff = (uint8_t *)status;
1284 ep0->dwc_ep.xfer_buff = (uint8_t *)status;
1285 ep0->dwc_ep.dma_addr = pcd->status_buf_dma_handle;
1286 ep0->dwc_ep.xfer_len = 2;
1287 ep0->dwc_ep.xfer_count = 0;
1288 ep0->dwc_ep.total_len = ep0->dwc_ep.xfer_len;
1289 dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep0->dwc_ep);
1290 }
1291 /**
1292 * This function process the SET_FEATURE Setup Commands.
1293 */
1294 static inline void do_set_feature(dwc_otg_pcd_t *pcd)
1295 {
1296 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1297 dwc_otg_core_global_regs_t *global_regs =
1298 core_if->core_global_regs;
1299 struct usb_ctrlrequest ctrl = pcd->setup_pkt->req;
1300 dwc_otg_pcd_ep_t *ep = 0;
1301 int32_t otg_cap_param = core_if->core_params->otg_cap;
1302 gotgctl_data_t gotgctl = { .d32 = 0 };
1303
1304 DWC_DEBUGPL(DBG_PCD, "SET_FEATURE:%02x.%02x v%04x i%04x l%04x\n",
1305 ctrl.bRequestType, ctrl.bRequest,
1306 ctrl.wValue, ctrl.wIndex, ctrl.wLength);
1307 DWC_DEBUGPL(DBG_PCD,"otg_cap=%d\n", otg_cap_param);
1308
1309
1310 switch (ctrl.bRequestType & USB_RECIP_MASK) {
1311 case USB_RECIP_DEVICE:
1312 switch (ctrl.wValue) {
1313 case USB_DEVICE_REMOTE_WAKEUP:
1314 pcd->remote_wakeup_enable = 1;
1315 break;
1316
1317 case USB_DEVICE_TEST_MODE:
1318 /* Setup the Test Mode tasklet to do the Test
1319 * Packet generation after the SETUP Status
1320 * phase has completed. */
1321
1322 /** @todo This has not been tested since the
1323 * tasklet struct was put into the PCD
1324 * struct! */
1325 pcd->test_mode_tasklet.next = 0;
1326 pcd->test_mode_tasklet.state = 0;
1327 atomic_set(&pcd->test_mode_tasklet.count, 0);
1328 pcd->test_mode_tasklet.func = do_test_mode;
1329 pcd->test_mode_tasklet.data = (unsigned long)pcd;
1330 pcd->test_mode = ctrl.wIndex >> 8;
1331 tasklet_schedule(&pcd->test_mode_tasklet);
1332 break;
1333
1334 case USB_DEVICE_B_HNP_ENABLE:
1335 DWC_DEBUGPL(DBG_PCDV, "SET_FEATURE: USB_DEVICE_B_HNP_ENABLE\n");
1336
1337 /* dev may initiate HNP */
1338 if (otg_cap_param == DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE) {
1339 pcd->b_hnp_enable = 1;
1340 dwc_otg_pcd_update_otg(pcd, 0);
1341 DWC_DEBUGPL(DBG_PCD, "Request B HNP\n");
1342 /**@todo Is the gotgctl.devhnpen cleared
1343 * by a USB Reset? */
1344 gotgctl.b.devhnpen = 1;
1345 gotgctl.b.hnpreq = 1;
1346 dwc_write_reg32(&global_regs->gotgctl, gotgctl.d32);
1347 }
1348 else {
1349 ep0_do_stall(pcd, -EOPNOTSUPP);
1350 }
1351 break;
1352
1353 case USB_DEVICE_A_HNP_SUPPORT:
1354 /* RH port supports HNP */
1355 DWC_DEBUGPL(DBG_PCDV, "SET_FEATURE: USB_DEVICE_A_HNP_SUPPORT\n");
1356 if (otg_cap_param == DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE) {
1357 pcd->a_hnp_support = 1;
1358 dwc_otg_pcd_update_otg(pcd, 0);
1359 }
1360 else {
1361 ep0_do_stall(pcd, -EOPNOTSUPP);
1362 }
1363 break;
1364
1365 case USB_DEVICE_A_ALT_HNP_SUPPORT:
1366 /* other RH port does */
1367 DWC_DEBUGPL(DBG_PCDV, "SET_FEATURE: USB_DEVICE_A_ALT_HNP_SUPPORT\n");
1368 if (otg_cap_param == DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE) {
1369 pcd->a_alt_hnp_support = 1;
1370 dwc_otg_pcd_update_otg(pcd, 0);
1371 }
1372 else {
1373 ep0_do_stall(pcd, -EOPNOTSUPP);
1374 }
1375 break;
1376 }
1377 do_setup_in_status_phase(pcd);
1378 break;
1379
1380 case USB_RECIP_INTERFACE:
1381 do_gadget_setup(pcd, &ctrl);
1382 break;
1383
1384 case USB_RECIP_ENDPOINT:
1385 if (ctrl.wValue == USB_ENDPOINT_HALT) {
1386 ep = get_ep_by_addr(pcd, ctrl.wIndex);
1387 if (ep == 0) {
1388 ep0_do_stall(pcd, -EOPNOTSUPP);
1389 return;
1390 }
1391 ep->stopped = 1;
1392 dwc_otg_ep_set_stall(core_if, &ep->dwc_ep);
1393 }
1394 do_setup_in_status_phase(pcd);
1395 break;
1396 }
1397 }
1398
1399 /**
1400 * This function process the CLEAR_FEATURE Setup Commands.
1401 */
1402 static inline void do_clear_feature(dwc_otg_pcd_t *pcd)
1403 {
1404 struct usb_ctrlrequest ctrl = pcd->setup_pkt->req;
1405 dwc_otg_pcd_ep_t *ep = 0;
1406
1407 DWC_DEBUGPL(DBG_PCD,
1408 "CLEAR_FEATURE:%02x.%02x v%04x i%04x l%04x\n",
1409 ctrl.bRequestType, ctrl.bRequest,
1410 ctrl.wValue, ctrl.wIndex, ctrl.wLength);
1411
1412 switch (ctrl.bRequestType & USB_RECIP_MASK) {
1413 case USB_RECIP_DEVICE:
1414 switch (ctrl.wValue) {
1415 case USB_DEVICE_REMOTE_WAKEUP:
1416 pcd->remote_wakeup_enable = 0;
1417 break;
1418
1419 case USB_DEVICE_TEST_MODE:
1420 /** @todo Add CLEAR_FEATURE for TEST modes. */
1421 break;
1422 }
1423 do_setup_in_status_phase(pcd);
1424 break;
1425
1426 case USB_RECIP_ENDPOINT:
1427 ep = get_ep_by_addr(pcd, ctrl.wIndex);
1428 if (ep == 0) {
1429 ep0_do_stall(pcd, -EOPNOTSUPP);
1430 return;
1431 }
1432
1433 pcd_clear_halt(pcd, ep);
1434
1435 break;
1436 }
1437 }
1438
1439 /**
1440 * This function process the SET_ADDRESS Setup Commands.
1441 */
1442 static inline void do_set_address(dwc_otg_pcd_t *pcd)
1443 {
1444 dwc_otg_dev_if_t *dev_if = GET_CORE_IF(pcd)->dev_if;
1445 struct usb_ctrlrequest ctrl = pcd->setup_pkt->req;
1446
1447 if (ctrl.bRequestType == USB_RECIP_DEVICE) {
1448 dcfg_data_t dcfg = {.d32=0};
1449
1450 #ifdef DEBUG_EP0
1451 // DWC_DEBUGPL(DBG_PCDV, "SET_ADDRESS:%d\n", ctrl.wValue);
1452 #endif
1453 dcfg.b.devaddr = ctrl.wValue;
1454 dwc_modify_reg32(&dev_if->dev_global_regs->dcfg, 0, dcfg.d32);
1455 do_setup_in_status_phase(pcd);
1456 }
1457 }
1458
1459 /**
1460 * This function processes SETUP commands. In Linux, the USB Command
1461 * processing is done in two places - the first being the PCD and the
1462 * second in the Gadget Driver (for example, the File-Backed Storage
1463 * Gadget Driver).
1464 *
1465 * <table>
1466 * <tr><td>Command </td><td>Driver </td><td>Description</td></tr>
1467 *
1468 * <tr><td>GET_STATUS </td><td>PCD </td><td>Command is processed as
1469 * defined in chapter 9 of the USB 2.0 Specification chapter 9
1470 * </td></tr>
1471 *
1472 * <tr><td>CLEAR_FEATURE </td><td>PCD </td><td>The Device and Endpoint
1473 * requests are the ENDPOINT_HALT feature is procesed, all others the
1474 * interface requests are ignored.</td></tr>
1475 *
1476 * <tr><td>SET_FEATURE </td><td>PCD </td><td>The Device and Endpoint
1477 * requests are processed by the PCD. Interface requests are passed
1478 * to the Gadget Driver.</td></tr>
1479 *
1480 * <tr><td>SET_ADDRESS </td><td>PCD </td><td>Program the DCFG reg,
1481 * with device address received </td></tr>
1482 *
1483 * <tr><td>GET_DESCRIPTOR </td><td>Gadget Driver </td><td>Return the
1484 * requested descriptor</td></tr>
1485 *
1486 * <tr><td>SET_DESCRIPTOR </td><td>Gadget Driver </td><td>Optional -
1487 * not implemented by any of the existing Gadget Drivers.</td></tr>
1488 *
1489 * <tr><td>SET_CONFIGURATION </td><td>Gadget Driver </td><td>Disable
1490 * all EPs and enable EPs for new configuration.</td></tr>
1491 *
1492 * <tr><td>GET_CONFIGURATION </td><td>Gadget Driver </td><td>Return
1493 * the current configuration</td></tr>
1494 *
1495 * <tr><td>SET_INTERFACE </td><td>Gadget Driver </td><td>Disable all
1496 * EPs and enable EPs for new configuration.</td></tr>
1497 *
1498 * <tr><td>GET_INTERFACE </td><td>Gadget Driver </td><td>Return the
1499 * current interface.</td></tr>
1500 *
1501 * <tr><td>SYNC_FRAME </td><td>PCD </td><td>Display debug
1502 * message.</td></tr>
1503 * </table>
1504 *
1505 * When the SETUP Phase Done interrupt occurs, the PCD SETUP commands are
1506 * processed by pcd_setup. Calling the Function Driver's setup function from
1507 * pcd_setup processes the gadget SETUP commands.
1508 */
1509 static inline void pcd_setup(dwc_otg_pcd_t *pcd)
1510 {
1511 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1512 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
1513 struct usb_ctrlrequest ctrl = pcd->setup_pkt->req;
1514 dwc_otg_pcd_ep_t *ep0 = &pcd->ep0;
1515
1516 deptsiz0_data_t doeptsize0 = { .d32 = 0};
1517
1518 #ifdef DEBUG_EP0
1519 DWC_DEBUGPL(DBG_PCD, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1520 ctrl.bRequestType, ctrl.bRequest,
1521 ctrl.wValue, ctrl.wIndex, ctrl.wLength);
1522 #endif
1523
1524 doeptsize0.d32 = dwc_read_reg32(&dev_if->out_ep_regs[0]->doeptsiz);
1525
1526 /** @todo handle > 1 setup packet , assert error for now */
1527
1528 if (core_if->dma_enable && core_if->dma_desc_enable == 0 && (doeptsize0.b.supcnt < 2)) {
1529 DWC_ERROR ("\n\n----------- CANNOT handle > 1 setup packet in DMA mode\n\n");
1530 }
1531
1532 /* Clean up the request queue */
1533 dwc_otg_request_nuke(ep0);
1534 ep0->stopped = 0;
1535
1536 if (ctrl.bRequestType & USB_DIR_IN) {
1537 ep0->dwc_ep.is_in = 1;
1538 pcd->ep0state = EP0_IN_DATA_PHASE;
1539 }
1540 else {
1541 ep0->dwc_ep.is_in = 0;
1542 pcd->ep0state = EP0_OUT_DATA_PHASE;
1543 }
1544
1545 if(ctrl.wLength == 0) {
1546 ep0->dwc_ep.is_in = 1;
1547 pcd->ep0state = EP0_IN_STATUS_PHASE;
1548 }
1549
1550 if ((ctrl.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD) {
1551 /* handle non-standard (class/vendor) requests in the gadget driver */
1552 do_gadget_setup(pcd, &ctrl);
1553 return;
1554 }
1555
1556 /** @todo NGS: Handle bad setup packet? */
1557
1558 ///////////////////////////////////////////
1559 //// --- Standard Request handling --- ////
1560
1561 switch (ctrl.bRequest) {
1562 case USB_REQ_GET_STATUS:
1563 do_get_status(pcd);
1564 break;
1565
1566 case USB_REQ_CLEAR_FEATURE:
1567 do_clear_feature(pcd);
1568 break;
1569
1570 case USB_REQ_SET_FEATURE:
1571 do_set_feature(pcd);
1572 break;
1573
1574 case USB_REQ_SET_ADDRESS:
1575 do_set_address(pcd);
1576 break;
1577
1578 case USB_REQ_SET_INTERFACE:
1579 case USB_REQ_SET_CONFIGURATION:
1580 // _pcd->request_config = 1; /* Configuration changed */
1581 do_gadget_setup(pcd, &ctrl);
1582 break;
1583
1584 case USB_REQ_SYNCH_FRAME:
1585 do_gadget_setup(pcd, &ctrl);
1586 break;
1587
1588 default:
1589 /* Call the Gadget Driver's setup functions */
1590 do_gadget_setup(pcd, &ctrl);
1591 break;
1592 }
1593 }
1594
1595 /**
1596 * This function completes the ep0 control transfer.
1597 */
1598 static int32_t ep0_complete_request(dwc_otg_pcd_ep_t *ep)
1599 {
1600 dwc_otg_core_if_t *core_if = GET_CORE_IF(ep->pcd);
1601 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
1602 dwc_otg_dev_in_ep_regs_t *in_ep_regs =
1603 dev_if->in_ep_regs[ep->dwc_ep.num];
1604 #ifdef DEBUG_EP0
1605 dwc_otg_dev_out_ep_regs_t *out_ep_regs =
1606 dev_if->out_ep_regs[ep->dwc_ep.num];
1607 #endif
1608 deptsiz0_data_t deptsiz;
1609 desc_sts_data_t desc_sts;
1610 dwc_otg_pcd_request_t *req;
1611 int is_last = 0;
1612 dwc_otg_pcd_t *pcd = ep->pcd;
1613
1614 //DWC_DEBUGPL(DBG_PCDV, "%s() %s\n", __func__, _ep->ep.name);
1615
1616 if (pcd->ep0_pending && list_empty(&ep->queue)) {
1617 if (ep->dwc_ep.is_in) {
1618 #ifdef DEBUG_EP0
1619 DWC_DEBUGPL(DBG_PCDV, "Do setup OUT status phase\n");
1620 #endif
1621 do_setup_out_status_phase(pcd);
1622 }
1623 else {
1624 #ifdef DEBUG_EP0
1625 DWC_DEBUGPL(DBG_PCDV, "Do setup IN status phase\n");
1626 #endif
1627 do_setup_in_status_phase(pcd);
1628 }
1629 pcd->ep0_pending = 0;
1630 return 1;
1631 }
1632
1633 if (list_empty(&ep->queue)) {
1634 return 0;
1635 }
1636 req = list_entry(ep->queue.next, dwc_otg_pcd_request_t, queue);
1637
1638
1639 if (pcd->ep0state == EP0_OUT_STATUS_PHASE || pcd->ep0state == EP0_IN_STATUS_PHASE) {
1640 is_last = 1;
1641 }
1642 else if (ep->dwc_ep.is_in) {
1643 deptsiz.d32 = dwc_read_reg32(&in_ep_regs->dieptsiz);
1644 if(core_if->dma_desc_enable != 0)
1645 desc_sts.d32 = readl(dev_if->in_desc_addr);
1646 #ifdef DEBUG_EP0
1647 DWC_DEBUGPL(DBG_PCDV, "%s len=%d xfersize=%d pktcnt=%d\n",
1648 ep->ep.name, ep->dwc_ep.xfer_len,
1649 deptsiz.b.xfersize, deptsiz.b.pktcnt);
1650 #endif
1651
1652 if (((core_if->dma_desc_enable == 0) && (deptsiz.b.xfersize == 0)) ||
1653 ((core_if->dma_desc_enable != 0) && (desc_sts.b.bytes == 0))) {
1654 req->req.actual = ep->dwc_ep.xfer_count;
1655 /* Is a Zero Len Packet needed? */
1656 if (req->req.zero) {
1657 #ifdef DEBUG_EP0
1658 DWC_DEBUGPL(DBG_PCD, "Setup Rx ZLP\n");
1659 #endif
1660 req->req.zero = 0;
1661 }
1662 do_setup_out_status_phase(pcd);
1663 }
1664 }
1665 else {
1666 /* ep0-OUT */
1667 #ifdef DEBUG_EP0
1668 deptsiz.d32 = dwc_read_reg32(&out_ep_regs->doeptsiz);
1669 DWC_DEBUGPL(DBG_PCDV, "%s len=%d xsize=%d pktcnt=%d\n",
1670 ep->ep.name, ep->dwc_ep.xfer_len,
1671 deptsiz.b.xfersize,
1672 deptsiz.b.pktcnt);
1673 #endif
1674 req->req.actual = ep->dwc_ep.xfer_count;
1675 /* Is a Zero Len Packet needed? */
1676 if (req->req.zero) {
1677 #ifdef DEBUG_EP0
1678 DWC_DEBUGPL(DBG_PCDV, "Setup Tx ZLP\n");
1679 #endif
1680 req->req.zero = 0;
1681 }
1682 if(core_if->dma_desc_enable == 0)
1683 do_setup_in_status_phase(pcd);
1684 }
1685
1686 /* Complete the request */
1687 if (is_last) {
1688 dwc_otg_request_done(ep, req, 0);
1689 ep->dwc_ep.start_xfer_buff = 0;
1690 ep->dwc_ep.xfer_buff = 0;
1691 ep->dwc_ep.xfer_len = 0;
1692 return 1;
1693 }
1694 return 0;
1695 }
1696
1697 inline void aligned_buf_patch_on_buf_dma_oep_completion(dwc_otg_pcd_ep_t *ep, uint32_t byte_count)
1698 {
1699 dwc_ep_t *dwc_ep = &ep->dwc_ep;
1700 if(byte_count && dwc_ep->aligned_buf &&
1701 dwc_ep->dma_addr>=dwc_ep->aligned_dma_addr &&
1702 dwc_ep->dma_addr<=(dwc_ep->aligned_dma_addr+dwc_ep->aligned_buf_size))\
1703 {
1704 //aligned buf used, apply complete patch
1705 u32 offset=(dwc_ep->dma_addr-dwc_ep->aligned_dma_addr);
1706 memcpy(dwc_ep->start_xfer_buff+offset, dwc_ep->aligned_buf+offset, byte_count);
1707 }
1708 }
1709
1710 /**
1711 * This function completes the request for the EP. If there are
1712 * additional requests for the EP in the queue they will be started.
1713 */
1714 static void complete_ep(dwc_otg_pcd_ep_t *ep)
1715 {
1716 dwc_otg_core_if_t *core_if = GET_CORE_IF(ep->pcd);
1717 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
1718 dwc_otg_dev_in_ep_regs_t *in_ep_regs =
1719 dev_if->in_ep_regs[ep->dwc_ep.num];
1720 deptsiz_data_t deptsiz;
1721 desc_sts_data_t desc_sts;
1722 dwc_otg_pcd_request_t *req = 0;
1723 dwc_otg_dma_desc_t* dma_desc;
1724 uint32_t byte_count = 0;
1725 int is_last = 0;
1726 int i;
1727
1728 DWC_DEBUGPL(DBG_PCDV,"%s() %s-%s\n", __func__, ep->ep.name,
1729 (ep->dwc_ep.is_in?"IN":"OUT"));
1730
1731 /* Get any pending requests */
1732 if (!list_empty(&ep->queue)) {
1733 req = list_entry(ep->queue.next, dwc_otg_pcd_request_t,
1734 queue);
1735 if (!req) {
1736 printk("complete_ep 0x%p, req = NULL!\n", ep);
1737 return;
1738 }
1739 }
1740 else {
1741 printk("complete_ep 0x%p, ep->queue empty!\n", ep);
1742 return;
1743 }
1744 DWC_DEBUGPL(DBG_PCD, "Requests %d\n", ep->pcd->request_pending);
1745
1746 if (ep->dwc_ep.is_in) {
1747 deptsiz.d32 = dwc_read_reg32(&in_ep_regs->dieptsiz);
1748
1749 if (core_if->dma_enable) {
1750 //dma_unmap_single(NULL,ep->dwc_ep.dma_addr,ep->dwc_ep.xfer_count,DMA_NONE);
1751 if(core_if->dma_desc_enable == 0) {
1752 //dma_unmap_single(NULL,ep->dwc_ep.dma_addr,ep->dwc_ep.xfer_count,DMA_NONE);
1753 if (deptsiz.b.xfersize == 0 && deptsiz.b.pktcnt == 0) {
1754 byte_count = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count;
1755 DWC_DEBUGPL(DBG_PCDV,"byte_count(%.8x) = (ep->dwc_ep.xfer_len(%.8x) - ep->dwc_ep.xfer_count(%.8x)\n", byte_count ,ep->dwc_ep.xfer_len , ep->dwc_ep.xfer_count );
1756
1757 ep->dwc_ep.xfer_buff += byte_count;
1758 ep->dwc_ep.dma_addr += byte_count;
1759 ep->dwc_ep.xfer_count += byte_count;
1760
1761 DWC_DEBUGPL(DBG_PCDV, "%s len=%d xfersize=%d pktcnt=%d\n",
1762 ep->ep.name, ep->dwc_ep.xfer_len,
1763 deptsiz.b.xfersize, deptsiz.b.pktcnt);
1764
1765 if(ep->dwc_ep.xfer_len < ep->dwc_ep.total_len) {
1766 //dwc_otg_ep_start_transfer(core_if, &ep->dwc_ep);
1767 printk("Warning: transfer ended, but specified len is not accomplished!! ep->total_len=%.x,ep->dwc_ep.sent_zlp=%d, byte_count(%.8x) = (ep->dwc_ep.xfer_len(%.8x) - ep->dwc_ep.xfer_count(%.8x) - deptsiz.b.xfersize(%.8x)\n", ep->dwc_ep.total_len, ep->dwc_ep.sent_zlp, byte_count ,ep->dwc_ep.xfer_len , ep->dwc_ep.xfer_count , deptsiz.b.xfersize);
1768 } else if(ep->dwc_ep.sent_zlp) {
1769 /*
1770 * This fragment of code should initiate 0
1771 * length trasfer in case if it is queued
1772 * a trasfer with size divisible to EPs max
1773 * packet size and with usb_request zero field
1774 * is set, which means that after data is transfered,
1775 * it is also should be transfered
1776 * a 0 length packet at the end. For Slave and
1777 * Buffer DMA modes in this case SW has
1778 * to initiate 2 transfers one with transfer size,
1779 * and the second with 0 size. For Desriptor
1780 * DMA mode SW is able to initiate a transfer,
1781 * which will handle all the packets including
1782 * the last 0 legth.
1783 */
1784 ep->dwc_ep.sent_zlp = 0;
1785 dwc_otg_ep_start_zl_transfer(core_if, &ep->dwc_ep);
1786 } else {
1787 is_last = 1;
1788 }
1789 } else {
1790 DWC_WARN("Incomplete transfer (%s-%s [siz=%d pkt=%d])\n",
1791 ep->ep.name, (ep->dwc_ep.is_in?"IN":"OUT"),
1792 deptsiz.b.xfersize, deptsiz.b.pktcnt);
1793 }
1794 } else {
1795
1796 dma_desc = ep->dwc_ep.desc_addr;
1797 byte_count = 0;
1798 ep->dwc_ep.sent_zlp = 0;
1799
1800 for(i = 0; i < ep->dwc_ep.desc_cnt; ++i) {
1801 desc_sts.d32 = readl(dma_desc);
1802 byte_count += desc_sts.b.bytes;
1803 dma_desc++;
1804 }
1805
1806 if(byte_count == 0) {
1807 ep->dwc_ep.xfer_count = ep->dwc_ep.total_len;
1808 is_last = 1;
1809 } else {
1810 DWC_WARN("Incomplete transfer\n");
1811 }
1812 }
1813 } else {
1814 if (deptsiz.b.xfersize == 0 && deptsiz.b.pktcnt == 0) {
1815 /* Check if the whole transfer was completed,
1816 * if no, setup transfer for next portion of data
1817 */
1818 DWC_DEBUGPL(DBG_PCDV, "%s len=%d xfersize=%d pktcnt=%d\n",
1819 ep->ep.name, ep->dwc_ep.xfer_len,
1820 deptsiz.b.xfersize, deptsiz.b.pktcnt);
1821 if(ep->dwc_ep.xfer_len < ep->dwc_ep.total_len) {
1822 //dwc_otg_ep_start_transfer(core_if, &ep->dwc_ep);
1823 printk("Warning: transfer ended, but specified len is not accomplished!! ep->total_len=%.x,ep->dwc_ep.sent_zlp=%d, ep->dwc_ep.xfer_len(%.8x) \n", ep->dwc_ep.total_len, ep->dwc_ep.sent_zlp, ep->dwc_ep.xfer_len );
1824 } else if(ep->dwc_ep.sent_zlp) {
1825 /*
1826 * This fragment of code should initiate 0
1827 * length trasfer in case if it is queued
1828 * a trasfer with size divisible to EPs max
1829 * packet size and with usb_request zero field
1830 * is set, which means that after data is transfered,
1831 * it is also should be transfered
1832 * a 0 length packet at the end. For Slave and
1833 * Buffer DMA modes in this case SW has
1834 * to initiate 2 transfers one with transfer size,
1835 * and the second with 0 size. For Desriptor
1836 * DMA mode SW is able to initiate a transfer,
1837 * which will handle all the packets including
1838 * the last 0 legth.
1839 */
1840 ep->dwc_ep.sent_zlp = 0;
1841 dwc_otg_ep_start_zl_transfer(core_if, &ep->dwc_ep);
1842 } else {
1843 is_last = 1;
1844 }
1845 }
1846 else {
1847 DWC_WARN("Incomplete transfer (%s-%s [siz=%d pkt=%d])\n",
1848 ep->ep.name, (ep->dwc_ep.is_in?"IN":"OUT"),
1849 deptsiz.b.xfersize, deptsiz.b.pktcnt);
1850 }
1851 }
1852 } else {
1853 dwc_otg_dev_out_ep_regs_t *out_ep_regs =
1854 dev_if->out_ep_regs[ep->dwc_ep.num];
1855 desc_sts.d32 = 0;
1856 if(core_if->dma_enable) {
1857 //dma_unmap_single(NULL,ep->dwc_ep.dma_addr,ep->dwc_ep.xfer_count,DMA_FROM_DEVICE);
1858 if(core_if->dma_desc_enable) {
1859 DWC_WARN("\n\n%s: we need a cache invalidation here!!\n\n",__func__);
1860 dma_desc = ep->dwc_ep.desc_addr;
1861 byte_count = 0;
1862 ep->dwc_ep.sent_zlp = 0;
1863 for(i = 0; i < ep->dwc_ep.desc_cnt; ++i) {
1864 desc_sts.d32 = readl(dma_desc);
1865 byte_count += desc_sts.b.bytes;
1866 dma_desc++;
1867 }
1868
1869 ep->dwc_ep.xfer_count = ep->dwc_ep.total_len
1870 - byte_count + ((4 - (ep->dwc_ep.total_len & 0x3)) & 0x3);
1871
1872 //todo: invalidate cache & aligned buf patch on completion
1873 //
1874
1875 is_last = 1;
1876 } else {
1877 deptsiz.d32 = 0;
1878 deptsiz.d32 = dwc_read_reg32(&out_ep_regs->doeptsiz);
1879
1880 byte_count = (ep->dwc_ep.xfer_len -
1881 ep->dwc_ep.xfer_count - deptsiz.b.xfersize);
1882
1883 // dma_sync_single_for_device(NULL,ep->dwc_ep.dma_addr,byte_count,DMA_FROM_DEVICE);
1884
1885 DWC_DEBUGPL(DBG_PCDV,"ep->total_len=%.x,ep->dwc_ep.sent_zlp=%d, byte_count(%.8x) = (ep->dwc_ep.xfer_len(%.8x) - ep->dwc_ep.xfer_count(%.8x) - deptsiz.b.xfersize(%.8x)\n", ep->dwc_ep.total_len, ep->dwc_ep.sent_zlp, byte_count ,ep->dwc_ep.xfer_len , ep->dwc_ep.xfer_count , deptsiz.b.xfersize);
1886 //todo: invalidate cache & aligned buf patch on completion
1887 dma_sync_single_for_device(NULL,ep->dwc_ep.dma_addr,byte_count,DMA_FROM_DEVICE);
1888 aligned_buf_patch_on_buf_dma_oep_completion(ep,byte_count);
1889
1890 ep->dwc_ep.xfer_buff += byte_count;
1891 ep->dwc_ep.dma_addr += byte_count;
1892 ep->dwc_ep.xfer_count += byte_count;
1893
1894 /* Check if the whole transfer was completed,
1895 * if no, setup transfer for next portion of data
1896 */
1897 if(ep->dwc_ep.xfer_len < ep->dwc_ep.total_len) {
1898 //dwc_otg_ep_start_transfer(core_if, &ep->dwc_ep);
1899 printk("Warning: transfer ended, but specified len is not accomplished!! ep->total_len=%.x,ep->dwc_ep.sent_zlp=%d, byte_count(%.8x) = (ep->dwc_ep.xfer_len(%.8x) - ep->dwc_ep.xfer_count(%.8x) - deptsiz.b.xfersize(%.8x)\n", ep->dwc_ep.total_len, ep->dwc_ep.sent_zlp, byte_count ,ep->dwc_ep.xfer_len , ep->dwc_ep.xfer_count , deptsiz.b.xfersize);
1900 }
1901 else if(ep->dwc_ep.sent_zlp) {
1902 /*
1903 * This fragment of code should initiate 0
1904 * length trasfer in case if it is queued
1905 * a trasfer with size divisible to EPs max
1906 * packet size and with usb_request zero field
1907 * is set, which means that after data is transfered,
1908 * it is also should be transfered
1909 * a 0 length packet at the end. For Slave and
1910 * Buffer DMA modes in this case SW has
1911 * to initiate 2 transfers one with transfer size,
1912 * and the second with 0 size. For Desriptor
1913 * DMA mode SW is able to initiate a transfer,
1914 * which will handle all the packets including
1915 * the last 0 legth.
1916 */
1917 ep->dwc_ep.sent_zlp = 0;
1918 dwc_otg_ep_start_zl_transfer(core_if, &ep->dwc_ep);
1919 } else {
1920 is_last = 1;
1921 }
1922 }
1923 } else {
1924 /* Check if the whole transfer was completed,
1925 * if no, setup transfer for next portion of data
1926 */
1927 if(ep->dwc_ep.xfer_len < ep->dwc_ep.total_len) {
1928 //dwc_otg_ep_start_transfer(core_if, &ep->dwc_ep);
1929 printk("Warning: transfer ended, but specified len is not accomplished!! ep->total_len=%.x,ep->dwc_ep.sent_zlp=%d, ep->dwc_ep.xfer_len(%.8x) \n", ep->dwc_ep.total_len, ep->dwc_ep.sent_zlp, ep->dwc_ep.xfer_len );
1930 }
1931 else if(ep->dwc_ep.sent_zlp) {
1932 /*
1933 * This fragment of code should initiate 0
1934 * length trasfer in case if it is queued
1935 * a trasfer with size divisible to EPs max
1936 * packet size and with usb_request zero field
1937 * is set, which means that after data is transfered,
1938 * it is also should be transfered
1939 * a 0 length packet at the end. For Slave and
1940 * Buffer DMA modes in this case SW has
1941 * to initiate 2 transfers one with transfer size,
1942 * and the second with 0 size. For Desriptor
1943 * DMA mode SW is able to initiate a transfer,
1944 * which will handle all the packets including
1945 * the last 0 legth.
1946 */
1947 ep->dwc_ep.sent_zlp = 0;
1948 dwc_otg_ep_start_zl_transfer(core_if, &ep->dwc_ep);
1949 } else {
1950 is_last = 1;
1951 }
1952 }
1953
1954 #ifdef DEBUG
1955
1956 DWC_DEBUGPL(DBG_PCDV, "addr %p, %s len=%d cnt=%d xsize=%d pktcnt=%d\n",
1957 &out_ep_regs->doeptsiz, ep->ep.name, ep->dwc_ep.xfer_len,
1958 ep->dwc_ep.xfer_count,
1959 deptsiz.b.xfersize,
1960 deptsiz.b.pktcnt);
1961 #endif
1962 }
1963
1964 /* Complete the request */
1965 if (is_last) {
1966 req->req.actual = ep->dwc_ep.xfer_count;
1967
1968 dwc_otg_request_done(ep, req, 0);
1969
1970 ep->dwc_ep.start_xfer_buff = 0;
1971 ep->dwc_ep.xfer_buff = 0;
1972 ep->dwc_ep.xfer_len = 0;
1973
1974 /* If there is a request in the queue start it.*/
1975 start_next_request(ep);
1976 }
1977 }
1978
1979
1980 #ifdef DWC_EN_ISOC
1981
1982 /**
1983 * This function BNA interrupt for Isochronous EPs
1984 *
1985 */
1986 static void dwc_otg_pcd_handle_iso_bna(dwc_otg_pcd_ep_t *ep)
1987 {
1988 dwc_ep_t *dwc_ep = &ep->dwc_ep;
1989 volatile uint32_t *addr;
1990 depctl_data_t depctl = {.d32 = 0};
1991 dwc_otg_pcd_t *pcd = ep->pcd;
1992 dwc_otg_dma_desc_t *dma_desc;
1993 int i;
1994
1995 dma_desc = dwc_ep->iso_desc_addr + dwc_ep->desc_cnt * (dwc_ep->proc_buf_num);
1996
1997 if(dwc_ep->is_in) {
1998 desc_sts_data_t sts = {.d32 = 0};
1999 for(i = 0;i < dwc_ep->desc_cnt; ++i, ++dma_desc)
2000 {
2001 sts.d32 = readl(&dma_desc->status);
2002 sts.b_iso_in.bs = BS_HOST_READY;
2003 writel(sts.d32,&dma_desc->status);
2004 }
2005 }
2006 else {
2007 desc_sts_data_t sts = {.d32 = 0};
2008 for(i = 0;i < dwc_ep->desc_cnt; ++i, ++dma_desc)
2009 {
2010 sts.d32 = readl(&dma_desc->status);
2011 sts.b_iso_out.bs = BS_HOST_READY;
2012 writel(sts.d32,&dma_desc->status);
2013 }
2014 }
2015
2016 if(dwc_ep->is_in == 0){
2017 addr = &GET_CORE_IF(pcd)->dev_if->out_ep_regs[dwc_ep->num]->doepctl;
2018 }
2019 else{
2020 addr = &GET_CORE_IF(pcd)->dev_if->in_ep_regs[dwc_ep->num]->diepctl;
2021 }
2022 depctl.b.epena = 1;
2023 dwc_modify_reg32(addr,depctl.d32,depctl.d32);
2024 }
2025
2026 /**
2027 * This function sets latest iso packet information(non-PTI mode)
2028 *
2029 * @param core_if Programming view of DWC_otg controller.
2030 * @param ep The EP to start the transfer on.
2031 *
2032 */
2033 void set_current_pkt_info(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
2034 {
2035 deptsiz_data_t deptsiz = { .d32 = 0 };
2036 dma_addr_t dma_addr;
2037 uint32_t offset;
2038
2039 if(ep->proc_buf_num)
2040 dma_addr = ep->dma_addr1;
2041 else
2042 dma_addr = ep->dma_addr0;
2043
2044 if(ep->is_in) {
2045 deptsiz.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dieptsiz);
2046 offset = ep->data_per_frame;
2047 } else {
2048 deptsiz.d32 = dwc_read_reg32(&core_if->dev_if->out_ep_regs[ep->num]->doeptsiz);
2049 offset = ep->data_per_frame + (0x4 & (0x4 - (ep->data_per_frame & 0x3)));
2050 }
2051
2052 if(!deptsiz.b.xfersize) {
2053 ep->pkt_info[ep->cur_pkt].length = ep->data_per_frame;
2054 ep->pkt_info[ep->cur_pkt].offset = ep->cur_pkt_dma_addr - dma_addr;
2055 ep->pkt_info[ep->cur_pkt].status = 0;
2056 } else {
2057 ep->pkt_info[ep->cur_pkt].length = ep->data_per_frame;
2058 ep->pkt_info[ep->cur_pkt].offset = ep->cur_pkt_dma_addr - dma_addr;
2059 ep->pkt_info[ep->cur_pkt].status = -ENODATA;
2060 }
2061 ep->cur_pkt_addr += offset;
2062 ep->cur_pkt_dma_addr += offset;
2063 ep->cur_pkt++;
2064 }
2065
2066 /**
2067 * This function sets latest iso packet information(DDMA mode)
2068 *
2069 * @param core_if Programming view of DWC_otg controller.
2070 * @param dwc_ep The EP to start the transfer on.
2071 *
2072 */
2073 static void set_ddma_iso_pkts_info(dwc_otg_core_if_t *core_if, dwc_ep_t *dwc_ep)
2074 {
2075 dwc_otg_dma_desc_t* dma_desc;
2076 desc_sts_data_t sts = {.d32 = 0};
2077 iso_pkt_info_t *iso_packet;
2078 uint32_t data_per_desc;
2079 uint32_t offset;
2080 int i, j;
2081
2082 iso_packet = dwc_ep->pkt_info;
2083
2084 /** Reinit closed DMA Descriptors*/
2085 /** ISO OUT EP */
2086 if(dwc_ep->is_in == 0) {
2087 dma_desc = dwc_ep->iso_desc_addr + dwc_ep->desc_cnt * dwc_ep->proc_buf_num;
2088 offset = 0;
2089
2090 for(i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm; i+= dwc_ep->pkt_per_frm)
2091 {
2092 for(j = 0; j < dwc_ep->pkt_per_frm; ++j)
2093 {
2094 data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ?
2095 dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket;
2096 data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0;
2097
2098 sts.d32 = readl(&dma_desc->status);
2099
2100 /* Write status in iso_packet_decsriptor */
2101 iso_packet->status = sts.b_iso_out.rxsts + (sts.b_iso_out.bs^BS_DMA_DONE);
2102 if(iso_packet->status) {
2103 iso_packet->status = -ENODATA;
2104 }
2105
2106 /* Received data length */
2107 if(!sts.b_iso_out.rxbytes){
2108 iso_packet->length = data_per_desc - sts.b_iso_out.rxbytes;
2109 } else {
2110 iso_packet->length = data_per_desc - sts.b_iso_out.rxbytes +
2111 (4 - dwc_ep->data_per_frame % 4);
2112 }
2113
2114 iso_packet->offset = offset;
2115
2116 offset += data_per_desc;
2117 dma_desc ++;
2118 iso_packet ++;
2119 }
2120 }
2121
2122 for(j = 0; j < dwc_ep->pkt_per_frm - 1; ++j)
2123 {
2124 data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ?
2125 dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket;
2126 data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0;
2127
2128 sts.d32 = readl(&dma_desc->status);
2129
2130 /* Write status in iso_packet_decsriptor */
2131 iso_packet->status = sts.b_iso_out.rxsts + (sts.b_iso_out.bs^BS_DMA_DONE);
2132 if(iso_packet->status) {
2133 iso_packet->status = -ENODATA;
2134 }
2135
2136 /* Received data length */
2137 iso_packet->length = dwc_ep->data_per_frame - sts.b_iso_out.rxbytes;
2138
2139 iso_packet->offset = offset;
2140
2141 offset += data_per_desc;
2142 iso_packet++;
2143 dma_desc++;
2144 }
2145
2146 sts.d32 = readl(&dma_desc->status);
2147
2148 /* Write status in iso_packet_decsriptor */
2149 iso_packet->status = sts.b_iso_out.rxsts + (sts.b_iso_out.bs^BS_DMA_DONE);
2150 if(iso_packet->status) {
2151 iso_packet->status = -ENODATA;
2152 }
2153 /* Received data length */
2154 if(!sts.b_iso_out.rxbytes){
2155 iso_packet->length = dwc_ep->data_per_frame - sts.b_iso_out.rxbytes;
2156 } else {
2157 iso_packet->length = dwc_ep->data_per_frame - sts.b_iso_out.rxbytes +
2158 (4 - dwc_ep->data_per_frame % 4);
2159 }
2160
2161 iso_packet->offset = offset;
2162 }
2163 else /** ISO IN EP */
2164 {
2165 dma_desc = dwc_ep->iso_desc_addr + dwc_ep->desc_cnt * dwc_ep->proc_buf_num;
2166
2167 for(i = 0; i < dwc_ep->desc_cnt - 1; i++)
2168 {
2169 sts.d32 = readl(&dma_desc->status);
2170
2171 /* Write status in iso packet descriptor */
2172 iso_packet->status = sts.b_iso_in.txsts + (sts.b_iso_in.bs^BS_DMA_DONE);
2173 if(iso_packet->status != 0) {
2174 iso_packet->status = -ENODATA;
2175
2176 }
2177 /* Bytes has been transfered */
2178 iso_packet->length = dwc_ep->data_per_frame - sts.b_iso_in.txbytes;
2179
2180 dma_desc ++;
2181 iso_packet++;
2182 }
2183
2184 sts.d32 = readl(&dma_desc->status);
2185 while(sts.b_iso_in.bs == BS_DMA_BUSY) {
2186 sts.d32 = readl(&dma_desc->status);
2187 }
2188
2189 /* Write status in iso packet descriptor ??? do be done with ERROR codes*/
2190 iso_packet->status = sts.b_iso_in.txsts + (sts.b_iso_in.bs^BS_DMA_DONE);
2191 if(iso_packet->status != 0) {
2192 iso_packet->status = -ENODATA;
2193 }
2194
2195 /* Bytes has been transfered */
2196 iso_packet->length = dwc_ep->data_per_frame - sts.b_iso_in.txbytes;
2197 }
2198 }
2199
2200 /**
2201 * This function reinitialize DMA Descriptors for Isochronous transfer
2202 *
2203 * @param core_if Programming view of DWC_otg controller.
2204 * @param dwc_ep The EP to start the transfer on.
2205 *
2206 */
2207 static void reinit_ddma_iso_xfer(dwc_otg_core_if_t *core_if, dwc_ep_t *dwc_ep)
2208 {
2209 int i, j;
2210 dwc_otg_dma_desc_t* dma_desc;
2211 dma_addr_t dma_ad;
2212 volatile uint32_t *addr;
2213 desc_sts_data_t sts = { .d32 =0 };
2214 uint32_t data_per_desc;
2215
2216 if(dwc_ep->is_in == 0) {
2217 addr = &core_if->dev_if->out_ep_regs[dwc_ep->num]->doepctl;
2218 }
2219 else {
2220 addr = &core_if->dev_if->in_ep_regs[dwc_ep->num]->diepctl;
2221 }
2222
2223
2224 if(dwc_ep->proc_buf_num == 0) {
2225 /** Buffer 0 descriptors setup */
2226 dma_ad = dwc_ep->dma_addr0;
2227 }
2228 else {
2229 /** Buffer 1 descriptors setup */
2230 dma_ad = dwc_ep->dma_addr1;
2231 }
2232
2233 /** Reinit closed DMA Descriptors*/
2234 /** ISO OUT EP */
2235 if(dwc_ep->is_in == 0) {
2236 dma_desc = dwc_ep->iso_desc_addr + dwc_ep->desc_cnt * dwc_ep->proc_buf_num;
2237
2238 sts.b_iso_out.bs = BS_HOST_READY;
2239 sts.b_iso_out.rxsts = 0;
2240 sts.b_iso_out.l = 0;
2241 sts.b_iso_out.sp = 0;
2242 sts.b_iso_out.ioc = 0;
2243 sts.b_iso_out.pid = 0;
2244 sts.b_iso_out.framenum = 0;
2245
2246 for(i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm; i+= dwc_ep->pkt_per_frm)
2247 {
2248 for(j = 0; j < dwc_ep->pkt_per_frm; ++j)
2249 {
2250 data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ?
2251 dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket;
2252 data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0;
2253 sts.b_iso_out.rxbytes = data_per_desc;
2254 writel((uint32_t)dma_ad, &dma_desc->buf);
2255 writel(sts.d32, &dma_desc->status);
2256
2257 //(uint32_t)dma_ad += data_per_desc;
2258 dma_ad = (uint32_t)dma_ad + data_per_desc;
2259 dma_desc ++;
2260 }
2261 }
2262
2263 for(j = 0; j < dwc_ep->pkt_per_frm - 1; ++j)
2264 {
2265
2266 data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ?
2267 dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket;
2268 data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0;
2269 sts.b_iso_out.rxbytes = data_per_desc;
2270
2271 writel((uint32_t)dma_ad, &dma_desc->buf);
2272 writel(sts.d32, &dma_desc->status);
2273
2274 dma_desc++;
2275 //(uint32_t)dma_ad += data_per_desc;
2276 dma_ad = (uint32_t)dma_ad + data_per_desc;
2277 }
2278
2279 sts.b_iso_out.ioc = 1;
2280 sts.b_iso_out.l = dwc_ep->proc_buf_num;
2281
2282 data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ?
2283 dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket;
2284 data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0;
2285 sts.b_iso_out.rxbytes = data_per_desc;
2286
2287 writel((uint32_t)dma_ad, &dma_desc->buf);
2288 writel(sts.d32, &dma_desc->status);
2289 }
2290 else /** ISO IN EP */
2291 {
2292 dma_desc = dwc_ep->iso_desc_addr + dwc_ep->desc_cnt * dwc_ep->proc_buf_num;
2293
2294 sts.b_iso_in.bs = BS_HOST_READY;
2295 sts.b_iso_in.txsts = 0;
2296 sts.b_iso_in.sp = 0;
2297 sts.b_iso_in.ioc = 0;
2298 sts.b_iso_in.pid = dwc_ep->pkt_per_frm;
2299 sts.b_iso_in.framenum = dwc_ep->next_frame;
2300 sts.b_iso_in.txbytes = dwc_ep->data_per_frame;
2301 sts.b_iso_in.l = 0;
2302
2303 for(i = 0; i < dwc_ep->desc_cnt - 1; i++)
2304 {
2305 writel((uint32_t)dma_ad, &dma_desc->buf);
2306 writel(sts.d32, &dma_desc->status);
2307
2308 sts.b_iso_in.framenum += dwc_ep->bInterval;
2309 //(uint32_t)dma_ad += dwc_ep->data_per_frame;
2310 dma_ad = (uint32_t)dma_ad + dwc_ep->data_per_frame;
2311 dma_desc ++;
2312 }
2313
2314 sts.b_iso_in.ioc = 1;
2315 sts.b_iso_in.l = dwc_ep->proc_buf_num;
2316
2317 writel((uint32_t)dma_ad, &dma_desc->buf);
2318 writel(sts.d32, &dma_desc->status);
2319
2320 dwc_ep->next_frame = sts.b_iso_in.framenum + dwc_ep->bInterval * 1;
2321 }
2322 dwc_ep->proc_buf_num = (dwc_ep->proc_buf_num ^ 1) & 0x1;
2323 }
2324
2325
2326 /**
2327 * This function is to handle Iso EP transfer complete interrupt
2328 * in case Iso out packet was dropped
2329 *
2330 * @param core_if Programming view of DWC_otg controller.
2331 * @param dwc_ep The EP for wihich transfer complete was asserted
2332 *
2333 */
2334 static uint32_t handle_iso_out_pkt_dropped(dwc_otg_core_if_t *core_if, dwc_ep_t *dwc_ep)
2335 {
2336 uint32_t dma_addr;
2337 uint32_t drp_pkt;
2338 uint32_t drp_pkt_cnt;
2339 deptsiz_data_t deptsiz = { .d32 = 0 };
2340 depctl_data_t depctl = { .d32 = 0 };
2341 int i;
2342
2343 deptsiz.d32 = dwc_read_reg32(&core_if->dev_if->out_ep_regs[dwc_ep->num]->doeptsiz);
2344
2345 drp_pkt = dwc_ep->pkt_cnt - deptsiz.b.pktcnt;
2346 drp_pkt_cnt = dwc_ep->pkt_per_frm - (drp_pkt % dwc_ep->pkt_per_frm);
2347
2348 /* Setting dropped packets status */
2349 for(i = 0; i < drp_pkt_cnt; ++i) {
2350 dwc_ep->pkt_info[drp_pkt].status = -ENODATA;
2351 drp_pkt ++;
2352 deptsiz.b.pktcnt--;
2353 }
2354
2355
2356 if(deptsiz.b.pktcnt > 0) {
2357 deptsiz.b.xfersize = dwc_ep->xfer_len - (dwc_ep->pkt_cnt - deptsiz.b.pktcnt) * dwc_ep->maxpacket;
2358 } else {
2359 deptsiz.b.xfersize = 0;
2360 deptsiz.b.pktcnt = 0;
2361 }
2362
2363 dwc_write_reg32(&core_if->dev_if->out_ep_regs[dwc_ep->num]->doeptsiz, deptsiz.d32);
2364
2365 if(deptsiz.b.pktcnt > 0) {
2366 if(dwc_ep->proc_buf_num) {
2367 dma_addr = dwc_ep->dma_addr1 + dwc_ep->xfer_len - deptsiz.b.xfersize;
2368 } else {
2369 dma_addr = dwc_ep->dma_addr0 + dwc_ep->xfer_len - deptsiz.b.xfersize;;
2370 }
2371
2372 VERIFY_PCD_DMA_ADDR(dma_addr);
2373 dwc_write_reg32(&core_if->dev_if->out_ep_regs[dwc_ep->num]->doepdma, dma_addr);
2374
2375 /** Re-enable endpoint, clear nak */
2376 depctl.d32 = 0;
2377 depctl.b.epena = 1;
2378 depctl.b.cnak = 1;
2379
2380 dwc_modify_reg32(&core_if->dev_if->out_ep_regs[dwc_ep->num]->doepctl,
2381 depctl.d32,depctl.d32);
2382 return 0;
2383 } else {
2384 return 1;
2385 }
2386 }
2387
2388 /**
2389 * This function sets iso packets information(PTI mode)
2390 *
2391 * @param core_if Programming view of DWC_otg controller.
2392 * @param ep The EP to start the transfer on.
2393 *
2394 */
2395 static uint32_t set_iso_pkts_info(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
2396 {
2397 int i, j;
2398 dma_addr_t dma_ad;
2399 iso_pkt_info_t *packet_info = ep->pkt_info;
2400 uint32_t offset;
2401 uint32_t frame_data;
2402 deptsiz_data_t deptsiz;
2403
2404 if(ep->proc_buf_num == 0) {
2405 /** Buffer 0 descriptors setup */
2406 dma_ad = ep->dma_addr0;
2407 }
2408 else {
2409 /** Buffer 1 descriptors setup */
2410 dma_ad = ep->dma_addr1;
2411 }
2412
2413 if(ep->is_in) {
2414 deptsiz.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dieptsiz);
2415 } else {
2416 deptsiz.d32 = dwc_read_reg32(&core_if->dev_if->out_ep_regs[ep->num]->doeptsiz);
2417 }
2418
2419 if(!deptsiz.b.xfersize) {
2420 offset = 0;
2421 for(i = 0; i < ep->pkt_cnt; i += ep->pkt_per_frm)
2422 {
2423 frame_data = ep->data_per_frame;
2424 for(j = 0; j < ep->pkt_per_frm; ++j) {
2425
2426 /* Packet status - is not set as initially
2427 * it is set to 0 and if packet was sent
2428 successfully, status field will remain 0*/
2429
2430 /* Bytes has been transfered */
2431 packet_info->length = (ep->maxpacket < frame_data) ?
2432 ep->maxpacket : frame_data;
2433
2434 /* Received packet offset */
2435 packet_info->offset = offset;
2436 offset += packet_info->length;
2437 frame_data -= packet_info->length;
2438
2439 packet_info ++;
2440 }
2441 }
2442 return 1;
2443 } else {
2444 /* This is a workaround for in case of Transfer Complete with
2445 * PktDrpSts interrupts merging - in this case Transfer complete
2446 * interrupt for Isoc Out Endpoint is asserted without PktDrpSts
2447 * set and with DOEPTSIZ register non zero. Investigations showed,
2448 * that this happens when Out packet is dropped, but because of
2449 * interrupts merging during first interrupt handling PktDrpSts
2450 * bit is cleared and for next merged interrupts it is not reset.
2451 * In this case SW hadles the interrupt as if PktDrpSts bit is set.
2452 */
2453 if(ep->is_in) {
2454 return 1;
2455 } else {
2456 return handle_iso_out_pkt_dropped(core_if, ep);
2457 }
2458 }
2459 }
2460
2461 /**
2462 * This function is to handle Iso EP transfer complete interrupt
2463 *
2464 * @param ep The EP for which transfer complete was asserted
2465 *
2466 */
2467 static void complete_iso_ep(dwc_otg_pcd_ep_t *ep)
2468 {
2469 dwc_otg_core_if_t *core_if = GET_CORE_IF(ep->pcd);
2470 dwc_ep_t *dwc_ep = &ep->dwc_ep;
2471 uint8_t is_last = 0;
2472
2473 if(core_if->dma_enable) {
2474 if(core_if->dma_desc_enable) {
2475 set_ddma_iso_pkts_info(core_if, dwc_ep);
2476 reinit_ddma_iso_xfer(core_if, dwc_ep);
2477 is_last = 1;
2478 } else {
2479 if(core_if->pti_enh_enable) {
2480 if(set_iso_pkts_info(core_if, dwc_ep)) {
2481 dwc_ep->proc_buf_num = (dwc_ep->proc_buf_num ^ 1) & 0x1;
2482 dwc_otg_iso_ep_start_buf_transfer(core_if, dwc_ep);
2483 is_last = 1;
2484 }
2485 } else {
2486 set_current_pkt_info(core_if, dwc_ep);
2487 if(dwc_ep->cur_pkt >= dwc_ep->pkt_cnt) {
2488 is_last = 1;
2489 dwc_ep->cur_pkt = 0;
2490 dwc_ep->proc_buf_num = (dwc_ep->proc_buf_num ^ 1) & 0x1;
2491 if(dwc_ep->proc_buf_num) {
2492 dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff1;
2493 dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr1;
2494 } else {
2495 dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff0;
2496 dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr0;
2497 }
2498 }
2499 dwc_otg_iso_ep_start_frm_transfer(core_if, dwc_ep);
2500 }
2501 }
2502 } else {
2503 set_current_pkt_info(core_if, dwc_ep);
2504 if(dwc_ep->cur_pkt >= dwc_ep->pkt_cnt) {
2505 is_last = 1;
2506 dwc_ep->cur_pkt = 0;
2507 dwc_ep->proc_buf_num = (dwc_ep->proc_buf_num ^ 1) & 0x1;
2508 if(dwc_ep->proc_buf_num) {
2509 dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff1;
2510 dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr1;
2511 } else {
2512 dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff0;
2513 dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr0;
2514 }
2515 }
2516 dwc_otg_iso_ep_start_frm_transfer(core_if, dwc_ep);
2517 }
2518 if(is_last)
2519 dwc_otg_iso_buffer_done(ep, ep->iso_req);
2520 }
2521
2522 #endif //DWC_EN_ISOC
2523
2524
2525 /**
2526 * This function handles EP0 Control transfers.
2527 *
2528 * The state of the control tranfers are tracked in
2529 * <code>ep0state</code>.
2530 */
2531 static void handle_ep0(dwc_otg_pcd_t *pcd)
2532 {
2533 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
2534 dwc_otg_pcd_ep_t *ep0 = &pcd->ep0;
2535 desc_sts_data_t desc_sts;
2536 deptsiz0_data_t deptsiz;
2537 uint32_t byte_count;
2538
2539 #ifdef DEBUG_EP0
2540 DWC_DEBUGPL(DBG_PCDV, "%s()\n", __func__);
2541 print_ep0_state(pcd);
2542 #endif
2543
2544 switch (pcd->ep0state) {
2545 case EP0_DISCONNECT:
2546 break;
2547
2548 case EP0_IDLE:
2549 pcd->request_config = 0;
2550
2551 pcd_setup(pcd);
2552 break;
2553
2554 case EP0_IN_DATA_PHASE:
2555 #ifdef DEBUG_EP0
2556 DWC_DEBUGPL(DBG_PCD, "DATA_IN EP%d-%s: type=%d, mps=%d\n",
2557 ep0->dwc_ep.num, (ep0->dwc_ep.is_in ?"IN":"OUT"),
2558 ep0->dwc_ep.type, ep0->dwc_ep.maxpacket);
2559 #endif
2560
2561 if (core_if->dma_enable != 0) {
2562 /*
2563 * For EP0 we can only program 1 packet at a time so we
2564 * need to do the make calculations after each complete.
2565 * Call write_packet to make the calculations, as in
2566 * slave mode, and use those values to determine if we
2567 * can complete.
2568 */
2569 if(core_if->dma_desc_enable == 0) {
2570 deptsiz.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[0]->dieptsiz);
2571 byte_count = ep0->dwc_ep.xfer_len - deptsiz.b.xfersize;
2572 }
2573 else {
2574 desc_sts.d32 = readl(core_if->dev_if->in_desc_addr);
2575 byte_count = ep0->dwc_ep.xfer_len - desc_sts.b.bytes;
2576 }
2577
2578 ep0->dwc_ep.xfer_count += byte_count;
2579 ep0->dwc_ep.xfer_buff += byte_count;
2580 ep0->dwc_ep.dma_addr += byte_count;
2581 }
2582 if (ep0->dwc_ep.xfer_count < ep0->dwc_ep.total_len) {
2583 dwc_otg_ep0_continue_transfer (GET_CORE_IF(pcd), &ep0->dwc_ep);
2584 DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n");
2585 }
2586 else if(ep0->dwc_ep.sent_zlp) {
2587 dwc_otg_ep0_continue_transfer (GET_CORE_IF(pcd), &ep0->dwc_ep);
2588 ep0->dwc_ep.sent_zlp = 0;
2589 DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n");
2590 }
2591 else {
2592 ep0_complete_request(ep0);
2593 DWC_DEBUGPL(DBG_PCD, "COMPLETE TRANSFER\n");
2594 }
2595 break;
2596 case EP0_OUT_DATA_PHASE:
2597 #ifdef DEBUG_EP0
2598 DWC_DEBUGPL(DBG_PCD, "DATA_OUT EP%d-%s: type=%d, mps=%d\n",
2599 ep0->dwc_ep.num, (ep0->dwc_ep.is_in ?"IN":"OUT"),
2600 ep0->dwc_ep.type, ep0->dwc_ep.maxpacket);
2601 #endif
2602 if (core_if->dma_enable != 0) {
2603 if(core_if->dma_desc_enable == 0) {
2604 deptsiz.d32 = dwc_read_reg32(&core_if->dev_if->out_ep_regs[0]->doeptsiz);
2605 byte_count = ep0->dwc_ep.maxpacket - deptsiz.b.xfersize;
2606
2607 //todo: invalidate cache & aligned buf patch on completion
2608 dma_sync_single_for_device(NULL,ep0->dwc_ep.dma_addr,byte_count,DMA_FROM_DEVICE);
2609 aligned_buf_patch_on_buf_dma_oep_completion(ep0,byte_count);
2610 }
2611 else {
2612 desc_sts.d32 = readl(core_if->dev_if->out_desc_addr);
2613 byte_count = ep0->dwc_ep.maxpacket - desc_sts.b.bytes;
2614
2615 //todo: invalidate cache & aligned buf patch on completion
2616 //
2617
2618 }
2619 ep0->dwc_ep.xfer_count += byte_count;
2620 ep0->dwc_ep.xfer_buff += byte_count;
2621 ep0->dwc_ep.dma_addr += byte_count;
2622 }
2623 if (ep0->dwc_ep.xfer_count < ep0->dwc_ep.total_len) {
2624 dwc_otg_ep0_continue_transfer (GET_CORE_IF(pcd), &ep0->dwc_ep);
2625 DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n");
2626 }
2627 else if(ep0->dwc_ep.sent_zlp) {
2628 dwc_otg_ep0_continue_transfer (GET_CORE_IF(pcd), &ep0->dwc_ep);
2629 ep0->dwc_ep.sent_zlp = 0;
2630 DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n");
2631 }
2632 else {
2633 ep0_complete_request(ep0);
2634 DWC_DEBUGPL(DBG_PCD, "COMPLETE TRANSFER\n");
2635 }
2636 break;
2637
2638 case EP0_IN_STATUS_PHASE:
2639 case EP0_OUT_STATUS_PHASE:
2640 DWC_DEBUGPL(DBG_PCD, "CASE: EP0_STATUS\n");
2641 ep0_complete_request(ep0);
2642 pcd->ep0state = EP0_IDLE;
2643 ep0->stopped = 1;
2644 ep0->dwc_ep.is_in = 0; /* OUT for next SETUP */
2645
2646 /* Prepare for more SETUP Packets */
2647 if(core_if->dma_enable) {
2648 ep0_out_start(core_if, pcd);
2649 }
2650 break;
2651
2652 case EP0_STALL:
2653 DWC_ERROR("EP0 STALLed, should not get here pcd_setup()\n");
2654 break;
2655 }
2656 #ifdef DEBUG_EP0
2657 print_ep0_state(pcd);
2658 #endif
2659 }
2660
2661
2662 /**
2663 * Restart transfer
2664 */
2665 static void restart_transfer(dwc_otg_pcd_t *pcd, const uint32_t epnum)
2666 {
2667 dwc_otg_core_if_t *core_if;
2668 dwc_otg_dev_if_t *dev_if;
2669 deptsiz_data_t dieptsiz = {.d32=0};
2670 dwc_otg_pcd_ep_t *ep;
2671
2672 ep = get_in_ep(pcd, epnum);
2673
2674 #ifdef DWC_EN_ISOC
2675 if(ep->dwc_ep.type == DWC_OTG_EP_TYPE_ISOC) {
2676 return;
2677 }
2678 #endif /* DWC_EN_ISOC */
2679
2680 core_if = GET_CORE_IF(pcd);
2681 dev_if = core_if->dev_if;
2682
2683 dieptsiz.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dieptsiz);
2684
2685 DWC_DEBUGPL(DBG_PCD,"xfer_buff=%p xfer_count=%0x xfer_len=%0x"
2686 " stopped=%d\n", ep->dwc_ep.xfer_buff,
2687 ep->dwc_ep.xfer_count, ep->dwc_ep.xfer_len ,
2688 ep->stopped);
2689 /*
2690 * If xfersize is 0 and pktcnt in not 0, resend the last packet.
2691 */
2692 if (dieptsiz.b.pktcnt && dieptsiz.b.xfersize == 0 &&
2693 ep->dwc_ep.start_xfer_buff != 0) {
2694 if (ep->dwc_ep.total_len <= ep->dwc_ep.maxpacket) {
2695 ep->dwc_ep.xfer_count = 0;
2696 ep->dwc_ep.xfer_buff = ep->dwc_ep.start_xfer_buff;
2697 ep->dwc_ep.xfer_len = ep->dwc_ep.xfer_count;
2698 }
2699 else {
2700 ep->dwc_ep.xfer_count -= ep->dwc_ep.maxpacket;
2701 /* convert packet size to dwords. */
2702 ep->dwc_ep.xfer_buff -= ep->dwc_ep.maxpacket;
2703 ep->dwc_ep.xfer_len = ep->dwc_ep.xfer_count;
2704 }
2705 ep->stopped = 0;
2706 DWC_DEBUGPL(DBG_PCD,"xfer_buff=%p xfer_count=%0x "
2707 "xfer_len=%0x stopped=%d\n",
2708 ep->dwc_ep.xfer_buff,
2709 ep->dwc_ep.xfer_count, ep->dwc_ep.xfer_len ,
2710 ep->stopped
2711 );
2712 if (epnum == 0) {
2713 dwc_otg_ep0_start_transfer(core_if, &ep->dwc_ep);
2714 }
2715 else {
2716 dwc_otg_ep_start_transfer(core_if, &ep->dwc_ep);
2717 }
2718 }
2719 }
2720
2721
2722 /**
2723 * handle the IN EP disable interrupt.
2724 */
2725 static inline void handle_in_ep_disable_intr(dwc_otg_pcd_t *pcd,
2726 const uint32_t epnum)
2727 {
2728 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
2729 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
2730 deptsiz_data_t dieptsiz = {.d32=0};
2731 dctl_data_t dctl = {.d32=0};
2732 dwc_otg_pcd_ep_t *ep;
2733 dwc_ep_t *dwc_ep;
2734
2735 ep = get_in_ep(pcd, epnum);
2736 dwc_ep = &ep->dwc_ep;
2737
2738 if(dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) {
2739 dwc_otg_flush_tx_fifo(core_if, dwc_ep->tx_fifo_num);
2740 return;
2741 }
2742
2743 DWC_DEBUGPL(DBG_PCD,"diepctl%d=%0x\n", epnum,
2744 dwc_read_reg32(&dev_if->in_ep_regs[epnum]->diepctl));
2745 dieptsiz.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dieptsiz);
2746
2747 DWC_DEBUGPL(DBG_ANY, "pktcnt=%d size=%d\n",
2748 dieptsiz.b.pktcnt,
2749 dieptsiz.b.xfersize);
2750
2751 if (ep->stopped) {
2752 /* Flush the Tx FIFO */
2753 dwc_otg_flush_tx_fifo(core_if, dwc_ep->tx_fifo_num);
2754 /* Clear the Global IN NP NAK */
2755 dctl.d32 = 0;
2756 dctl.b.cgnpinnak = 1;
2757 dwc_modify_reg32(&dev_if->dev_global_regs->dctl,
2758 dctl.d32, 0);
2759 /* Restart the transaction */
2760 if (dieptsiz.b.pktcnt != 0 ||
2761 dieptsiz.b.xfersize != 0) {
2762 restart_transfer(pcd, epnum);
2763 }
2764 }
2765 else {
2766 /* Restart the transaction */
2767 if (dieptsiz.b.pktcnt != 0 ||
2768 dieptsiz.b.xfersize != 0) {
2769 restart_transfer(pcd, epnum);
2770 }
2771 DWC_DEBUGPL(DBG_ANY, "STOPPED!!!\n");
2772 }
2773 }
2774
2775 /**
2776 * Handler for the IN EP timeout handshake interrupt.
2777 */
2778 static inline void handle_in_ep_timeout_intr(dwc_otg_pcd_t *pcd,
2779 const uint32_t epnum)
2780 {
2781 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
2782 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
2783
2784 #ifdef DEBUG
2785 deptsiz_data_t dieptsiz = {.d32=0};
2786 uint32_t num = 0;
2787 #endif
2788 dctl_data_t dctl = {.d32=0};
2789 dwc_otg_pcd_ep_t *ep;
2790
2791 gintmsk_data_t intr_mask = {.d32 = 0};
2792
2793 ep = get_in_ep(pcd, epnum);
2794
2795 /* Disable the NP Tx Fifo Empty Interrrupt */
2796 if (!core_if->dma_enable) {
2797 intr_mask.b.nptxfempty = 1;
2798 dwc_modify_reg32(&core_if->core_global_regs->gintmsk, intr_mask.d32, 0);
2799 }
2800 /** @todo NGS Check EP type.
2801 * Implement for Periodic EPs */
2802 /*
2803 * Non-periodic EP
2804 */
2805 /* Enable the Global IN NAK Effective Interrupt */
2806 intr_mask.b.ginnakeff = 1;
2807 dwc_modify_reg32(&core_if->core_global_regs->gintmsk,
2808 0, intr_mask.d32);
2809
2810 /* Set Global IN NAK */
2811 dctl.b.sgnpinnak = 1;
2812 dwc_modify_reg32(&dev_if->dev_global_regs->dctl,
2813 dctl.d32, dctl.d32);
2814
2815 ep->stopped = 1;
2816
2817 #ifdef DEBUG
2818 dieptsiz.d32 = dwc_read_reg32(&dev_if->in_ep_regs[num]->dieptsiz);
2819 DWC_DEBUGPL(DBG_ANY, "pktcnt=%d size=%d\n",
2820 dieptsiz.b.pktcnt,
2821 dieptsiz.b.xfersize);
2822 #endif
2823
2824 #ifdef DISABLE_PERIODIC_EP
2825 /*
2826 * Set the NAK bit for this EP to
2827 * start the disable process.
2828 */
2829 diepctl.d32 = 0;
2830 diepctl.b.snak = 1;
2831 dwc_modify_reg32(&dev_if->in_ep_regs[num]->diepctl, diepctl.d32, diepctl.d32);
2832 ep->disabling = 1;
2833 ep->stopped = 1;
2834 #endif
2835 }
2836
2837 /**
2838 * Handler for the IN EP NAK interrupt.
2839 */
2840 static inline int32_t handle_in_ep_nak_intr(dwc_otg_pcd_t *pcd,
2841 const uint32_t epnum)
2842 {
2843 /** @todo implement ISR */
2844 dwc_otg_core_if_t* core_if;
2845 diepmsk_data_t intr_mask = { .d32 = 0};
2846
2847 DWC_PRINT("INTERRUPT Handler not implemented for %s\n", "IN EP NAK");
2848 core_if = GET_CORE_IF(pcd);
2849 intr_mask.b.nak = 1;
2850
2851 if(core_if->multiproc_int_enable) {
2852 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->diepeachintmsk[epnum],
2853 intr_mask.d32, 0);
2854 } else {
2855 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->diepmsk,
2856 intr_mask.d32, 0);
2857 }
2858
2859 return 1;
2860 }
2861
2862 /**
2863 * Handler for the OUT EP Babble interrupt.
2864 */
2865 static inline int32_t handle_out_ep_babble_intr(dwc_otg_pcd_t *pcd,
2866 const uint32_t epnum)
2867 {
2868 /** @todo implement ISR */
2869 dwc_otg_core_if_t* core_if;
2870 doepmsk_data_t intr_mask = { .d32 = 0};
2871
2872 DWC_PRINT("INTERRUPT Handler not implemented for %s\n", "OUT EP Babble");
2873 core_if = GET_CORE_IF(pcd);
2874 intr_mask.b.babble = 1;
2875
2876 if(core_if->multiproc_int_enable) {
2877 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepeachintmsk[epnum],
2878 intr_mask.d32, 0);
2879 } else {
2880 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepmsk,
2881 intr_mask.d32, 0);
2882 }
2883
2884 return 1;
2885 }
2886
2887 /**
2888 * Handler for the OUT EP NAK interrupt.
2889 */
2890 static inline int32_t handle_out_ep_nak_intr(dwc_otg_pcd_t *pcd,
2891 const uint32_t epnum)
2892 {
2893 /** @todo implement ISR */
2894 dwc_otg_core_if_t* core_if;
2895 doepmsk_data_t intr_mask = { .d32 = 0};
2896
2897 DWC_PRINT("INTERRUPT Handler not implemented for %s\n", "OUT EP NAK");
2898 core_if = GET_CORE_IF(pcd);
2899 intr_mask.b.nak = 1;
2900
2901 if(core_if->multiproc_int_enable) {
2902 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepeachintmsk[epnum],
2903 intr_mask.d32, 0);
2904 } else {
2905 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepmsk,
2906 intr_mask.d32, 0);
2907 }
2908
2909 return 1;
2910 }
2911
2912 /**
2913 * Handler for the OUT EP NYET interrupt.
2914 */
2915 static inline int32_t handle_out_ep_nyet_intr(dwc_otg_pcd_t *pcd,
2916 const uint32_t epnum)
2917 {
2918 /** @todo implement ISR */
2919 dwc_otg_core_if_t* core_if;
2920 doepmsk_data_t intr_mask = { .d32 = 0};
2921
2922 DWC_PRINT("INTERRUPT Handler not implemented for %s\n", "OUT EP NYET");
2923 core_if = GET_CORE_IF(pcd);
2924 intr_mask.b.nyet = 1;
2925
2926 if(core_if->multiproc_int_enable) {
2927 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepeachintmsk[epnum],
2928 intr_mask.d32, 0);
2929 } else {
2930 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepmsk,
2931 intr_mask.d32, 0);
2932 }
2933
2934 return 1;
2935 }
2936
2937 /**
2938 * This interrupt indicates that an IN EP has a pending Interrupt.
2939 * The sequence for handling the IN EP interrupt is shown below:
2940 * -# Read the Device All Endpoint Interrupt register
2941 * -# Repeat the following for each IN EP interrupt bit set (from
2942 * LSB to MSB).
2943 * -# Read the Device Endpoint Interrupt (DIEPINTn) register
2944 * -# If "Transfer Complete" call the request complete function
2945 * -# If "Endpoint Disabled" complete the EP disable procedure.
2946 * -# If "AHB Error Interrupt" log error
2947 * -# If "Time-out Handshake" log error
2948 * -# If "IN Token Received when TxFIFO Empty" write packet to Tx
2949 * FIFO.
2950 * -# If "IN Token EP Mismatch" (disable, this is handled by EP
2951 * Mismatch Interrupt)
2952 */
2953 static int32_t dwc_otg_pcd_handle_in_ep_intr(dwc_otg_pcd_t *pcd)
2954 {
2955 #define CLEAR_IN_EP_INTR(__core_if,__epnum,__intr) \
2956 do { \
2957 diepint_data_t diepint = {.d32=0}; \
2958 diepint.b.__intr = 1; \
2959 dwc_write_reg32(&__core_if->dev_if->in_ep_regs[__epnum]->diepint, \
2960 diepint.d32); \
2961 } while (0)
2962
2963 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
2964 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
2965 diepint_data_t diepint = {.d32=0};
2966 dctl_data_t dctl = {.d32=0};
2967 depctl_data_t depctl = {.d32=0};
2968 uint32_t ep_intr;
2969 uint32_t epnum = 0;
2970 dwc_otg_pcd_ep_t *ep;
2971 dwc_ep_t *dwc_ep;
2972 gintmsk_data_t intr_mask = {.d32 = 0};
2973
2974 DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, pcd);
2975
2976 /* Read in the device interrupt bits */
2977 ep_intr = dwc_otg_read_dev_all_in_ep_intr(core_if);
2978
2979 /* Service the Device IN interrupts for each endpoint */
2980 while(ep_intr) {
2981 if (ep_intr&0x1) {
2982 uint32_t empty_msk;
2983 /* Get EP pointer */
2984 ep = get_in_ep(pcd, epnum);
2985 dwc_ep = &ep->dwc_ep;
2986
2987 depctl.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->diepctl);
2988 empty_msk = dwc_read_reg32(&dev_if->dev_global_regs->dtknqr4_fifoemptymsk);
2989
2990 DWC_DEBUGPL(DBG_PCDV,
2991 "IN EP INTERRUPT - %d\nepmty_msk - %8x diepctl - %8x\n",
2992 epnum,
2993 empty_msk,
2994 depctl.d32);
2995
2996 DWC_DEBUGPL(DBG_PCD,
2997 "EP%d-%s: type=%d, mps=%d\n",
2998 dwc_ep->num, (dwc_ep->is_in ?"IN":"OUT"),
2999 dwc_ep->type, dwc_ep->maxpacket);
3000
3001 diepint.d32 = dwc_otg_read_dev_in_ep_intr(core_if, dwc_ep);
3002
3003 DWC_DEBUGPL(DBG_PCDV, "EP %d Interrupt Register - 0x%x\n", epnum, diepint.d32);
3004 /* Transfer complete */
3005 if (diepint.b.xfercompl) {
3006 /* Disable the NP Tx FIFO Empty
3007 * Interrrupt */
3008 if(core_if->en_multiple_tx_fifo == 0) {
3009 intr_mask.b.nptxfempty = 1;
3010 dwc_modify_reg32(&core_if->core_global_regs->gintmsk, intr_mask.d32, 0);
3011 }
3012 else {
3013 /* Disable the Tx FIFO Empty Interrupt for this EP */
3014 uint32_t fifoemptymsk = 0x1 << dwc_ep->num;
3015 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk,
3016 fifoemptymsk, 0);
3017 }
3018 /* Clear the bit in DIEPINTn for this interrupt */
3019 CLEAR_IN_EP_INTR(core_if,epnum,xfercompl);
3020
3021 /* Complete the transfer */
3022 if (epnum == 0) {
3023 handle_ep0(pcd);
3024 }
3025 #ifdef DWC_EN_ISOC
3026 else if(dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) {
3027 if(!ep->stopped)
3028 complete_iso_ep(ep);
3029 }
3030 #endif //DWC_EN_ISOC
3031 else {
3032
3033 complete_ep(ep);
3034 }
3035 }
3036 /* Endpoint disable */
3037 if (diepint.b.epdisabled) {
3038 DWC_DEBUGPL(DBG_ANY,"EP%d IN disabled\n", epnum);
3039 handle_in_ep_disable_intr(pcd, epnum);
3040
3041 /* Clear the bit in DIEPINTn for this interrupt */
3042 CLEAR_IN_EP_INTR(core_if,epnum,epdisabled);
3043 }
3044 /* AHB Error */
3045 if (diepint.b.ahberr) {
3046 DWC_DEBUGPL(DBG_ANY,"EP%d IN AHB Error\n", epnum);
3047 /* Clear the bit in DIEPINTn for this interrupt */
3048 CLEAR_IN_EP_INTR(core_if,epnum,ahberr);
3049 }
3050 /* TimeOUT Handshake (non-ISOC IN EPs) */
3051 if (diepint.b.timeout) {
3052 DWC_DEBUGPL(DBG_ANY,"EP%d IN Time-out\n", epnum);
3053 handle_in_ep_timeout_intr(pcd, epnum);
3054
3055 CLEAR_IN_EP_INTR(core_if,epnum,timeout);
3056 }
3057 /** IN Token received with TxF Empty */
3058 if (diepint.b.intktxfemp) {
3059 DWC_DEBUGPL(DBG_ANY,"EP%d IN TKN TxFifo Empty\n",
3060 epnum);
3061 if (!ep->stopped && epnum != 0) {
3062
3063 diepmsk_data_t diepmsk = { .d32 = 0};
3064 diepmsk.b.intktxfemp = 1;
3065
3066 if(core_if->multiproc_int_enable) {
3067 dwc_modify_reg32(&dev_if->dev_global_regs->diepeachintmsk[epnum],
3068 diepmsk.d32, 0);
3069 } else {
3070 dwc_modify_reg32(&dev_if->dev_global_regs->diepmsk, diepmsk.d32, 0);
3071 }
3072 start_next_request(ep);
3073 }
3074 else if(core_if->dma_desc_enable && epnum == 0 &&
3075 pcd->ep0state == EP0_OUT_STATUS_PHASE) {
3076 // EP0 IN set STALL
3077 depctl.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->diepctl);
3078
3079 /* set the disable and stall bits */
3080 if (depctl.b.epena) {
3081 depctl.b.epdis = 1;
3082 }
3083 depctl.b.stall = 1;
3084 dwc_write_reg32(&dev_if->in_ep_regs[epnum]->diepctl, depctl.d32);
3085 }
3086 CLEAR_IN_EP_INTR(core_if,epnum,intktxfemp);
3087 }
3088 /** IN Token Received with EP mismatch */
3089 if (diepint.b.intknepmis) {
3090 DWC_DEBUGPL(DBG_ANY,"EP%d IN TKN EP Mismatch\n", epnum);
3091 CLEAR_IN_EP_INTR(core_if,epnum,intknepmis);
3092 }
3093 /** IN Endpoint NAK Effective */
3094 if (diepint.b.inepnakeff) {
3095 DWC_DEBUGPL(DBG_ANY,"EP%d IN EP NAK Effective\n", epnum);
3096 /* Periodic EP */
3097 if (ep->disabling) {
3098 depctl.d32 = 0;
3099 depctl.b.snak = 1;
3100 depctl.b.epdis = 1;
3101 dwc_modify_reg32(&dev_if->in_ep_regs[epnum]->diepctl, depctl.d32, depctl.d32);
3102 }
3103 CLEAR_IN_EP_INTR(core_if,epnum,inepnakeff);
3104
3105 }
3106
3107 /** IN EP Tx FIFO Empty Intr */
3108 if (diepint.b.emptyintr) {
3109 DWC_DEBUGPL(DBG_ANY,"EP%d Tx FIFO Empty Intr \n", epnum);
3110 write_empty_tx_fifo(pcd, epnum);
3111
3112 CLEAR_IN_EP_INTR(core_if,epnum,emptyintr);
3113 }
3114
3115 /** IN EP BNA Intr */
3116 if (diepint.b.bna) {
3117 CLEAR_IN_EP_INTR(core_if,epnum,bna);
3118 if(core_if->dma_desc_enable) {
3119 #ifdef DWC_EN_ISOC
3120 if(dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) {
3121 /*
3122 * This checking is performed to prevent first "false" BNA
3123 * handling occuring right after reconnect
3124 */
3125 if(dwc_ep->next_frame != 0xffffffff)
3126 dwc_otg_pcd_handle_iso_bna(ep);
3127 }
3128 else
3129 #endif //DWC_EN_ISOC
3130 {
3131 dctl.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dctl);
3132
3133 /* If Global Continue on BNA is disabled - disable EP */
3134 if(!dctl.b.gcontbna) {
3135 depctl.d32 = 0;
3136 depctl.b.snak = 1;
3137 depctl.b.epdis = 1;
3138 dwc_modify_reg32(&dev_if->in_ep_regs[epnum]->diepctl, depctl.d32, depctl.d32);
3139 } else {
3140 start_next_request(ep);
3141 }
3142 }
3143 }
3144 }
3145 /* NAK Interrutp */
3146 if (diepint.b.nak) {
3147 DWC_DEBUGPL(DBG_ANY,"EP%d IN NAK Interrupt\n", epnum);
3148 handle_in_ep_nak_intr(pcd, epnum);
3149
3150 CLEAR_IN_EP_INTR(core_if,epnum,nak);
3151 }
3152 }
3153 epnum++;
3154 ep_intr >>=1;
3155 }
3156
3157 return 1;
3158 #undef CLEAR_IN_EP_INTR
3159 }
3160
3161 /**
3162 * This interrupt indicates that an OUT EP has a pending Interrupt.
3163 * The sequence for handling the OUT EP interrupt is shown below:
3164 * -# Read the Device All Endpoint Interrupt register
3165 * -# Repeat the following for each OUT EP interrupt bit set (from
3166 * LSB to MSB).
3167 * -# Read the Device Endpoint Interrupt (DOEPINTn) register
3168 * -# If "Transfer Complete" call the request complete function
3169 * -# If "Endpoint Disabled" complete the EP disable procedure.
3170 * -# If "AHB Error Interrupt" log error
3171 * -# If "Setup Phase Done" process Setup Packet (See Standard USB
3172 * Command Processing)
3173 */
3174 static int32_t dwc_otg_pcd_handle_out_ep_intr(dwc_otg_pcd_t *pcd)
3175 {
3176 #define CLEAR_OUT_EP_INTR(__core_if,__epnum,__intr) \
3177 do { \
3178 doepint_data_t doepint = {.d32=0}; \
3179 doepint.b.__intr = 1; \
3180 dwc_write_reg32(&__core_if->dev_if->out_ep_regs[__epnum]->doepint, \
3181 doepint.d32); \
3182 } while (0)
3183
3184 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
3185 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
3186 uint32_t ep_intr;
3187 doepint_data_t doepint = {.d32=0};
3188 dctl_data_t dctl = {.d32=0};
3189 depctl_data_t doepctl = {.d32=0};
3190 uint32_t epnum = 0;
3191 dwc_otg_pcd_ep_t *ep;
3192 dwc_ep_t *dwc_ep;
3193
3194 DWC_DEBUGPL(DBG_PCDV, "%s()\n", __func__);
3195
3196 /* Read in the device interrupt bits */
3197 ep_intr = dwc_otg_read_dev_all_out_ep_intr(core_if);
3198
3199 while(ep_intr) {
3200 if (ep_intr&0x1) {
3201 /* Get EP pointer */
3202 ep = get_out_ep(pcd, epnum);
3203 dwc_ep = &ep->dwc_ep;
3204
3205 #ifdef VERBOSE
3206 DWC_DEBUGPL(DBG_PCDV,
3207 "EP%d-%s: type=%d, mps=%d\n",
3208 dwc_ep->num, (dwc_ep->is_in ?"IN":"OUT"),
3209 dwc_ep->type, dwc_ep->maxpacket);
3210 #endif
3211 doepint.d32 = dwc_otg_read_dev_out_ep_intr(core_if, dwc_ep);
3212
3213 /* Transfer complete */
3214 if (doepint.b.xfercompl) {
3215 if (epnum == 0) {
3216 /* Clear the bit in DOEPINTn for this interrupt */
3217 CLEAR_OUT_EP_INTR(core_if,epnum,xfercompl);
3218 if(core_if->dma_desc_enable == 0 || pcd->ep0state != EP0_IDLE)
3219 handle_ep0(pcd);
3220 #ifdef DWC_EN_ISOC
3221 } else if(dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) {
3222 if (doepint.b.pktdrpsts == 0) {
3223 /* Clear the bit in DOEPINTn for this interrupt */
3224 CLEAR_OUT_EP_INTR(core_if,epnum,xfercompl);
3225 complete_iso_ep(ep);
3226 } else {
3227 doepint_data_t doepint = {.d32=0};
3228 doepint.b.xfercompl = 1;
3229 doepint.b.pktdrpsts = 1;
3230 dwc_write_reg32(&core_if->dev_if->out_ep_regs[epnum]->doepint,
3231 doepint.d32);
3232 if(handle_iso_out_pkt_dropped(core_if,dwc_ep)) {
3233 complete_iso_ep(ep);
3234 }
3235 }
3236 #endif //DWC_EN_ISOC
3237 } else {
3238 /* Clear the bit in DOEPINTn for this interrupt */
3239 CLEAR_OUT_EP_INTR(core_if,epnum,xfercompl);
3240 complete_ep(ep);
3241 }
3242
3243 }
3244
3245 /* Endpoint disable */
3246 if (doepint.b.epdisabled) {
3247 /* Clear the bit in DOEPINTn for this interrupt */
3248 CLEAR_OUT_EP_INTR(core_if,epnum,epdisabled);
3249 }
3250 /* AHB Error */
3251 if (doepint.b.ahberr) {
3252 DWC_DEBUGPL(DBG_PCD,"EP%d OUT AHB Error\n", epnum);
3253 DWC_DEBUGPL(DBG_PCD,"EP DMA REG %d \n", core_if->dev_if->out_ep_regs[epnum]->doepdma);
3254 CLEAR_OUT_EP_INTR(core_if,epnum,ahberr);
3255 }
3256 /* Setup Phase Done (contorl EPs) */
3257 if (doepint.b.setup) {
3258 #ifdef DEBUG_EP0
3259 DWC_DEBUGPL(DBG_PCD,"EP%d SETUP Done\n",
3260 epnum);
3261 #endif
3262 CLEAR_OUT_EP_INTR(core_if,epnum,setup);
3263 handle_ep0(pcd);
3264 }
3265
3266 /** OUT EP BNA Intr */
3267 if (doepint.b.bna) {
3268 CLEAR_OUT_EP_INTR(core_if,epnum,bna);
3269 if(core_if->dma_desc_enable) {
3270 #ifdef DWC_EN_ISOC
3271 if(dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) {
3272 /*
3273 * This checking is performed to prevent first "false" BNA
3274 * handling occuring right after reconnect
3275 */
3276 if(dwc_ep->next_frame != 0xffffffff)
3277 dwc_otg_pcd_handle_iso_bna(ep);
3278 }
3279 else
3280 #endif //DWC_EN_ISOC
3281 {
3282 dctl.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dctl);
3283
3284 /* If Global Continue on BNA is disabled - disable EP*/
3285 if(!dctl.b.gcontbna) {
3286 doepctl.d32 = 0;
3287 doepctl.b.snak = 1;
3288 doepctl.b.epdis = 1;
3289 dwc_modify_reg32(&dev_if->out_ep_regs[epnum]->doepctl, doepctl.d32, doepctl.d32);
3290 } else {
3291 start_next_request(ep);
3292 }
3293 }
3294 }
3295 }
3296 if (doepint.b.stsphsercvd) {
3297 CLEAR_OUT_EP_INTR(core_if,epnum,stsphsercvd);
3298 if(core_if->dma_desc_enable) {
3299 do_setup_in_status_phase(pcd);
3300 }
3301 }
3302 /* Babble Interrutp */
3303 if (doepint.b.babble) {
3304 DWC_DEBUGPL(DBG_ANY,"EP%d OUT Babble\n", epnum);
3305 handle_out_ep_babble_intr(pcd, epnum);
3306
3307 CLEAR_OUT_EP_INTR(core_if,epnum,babble);
3308 }
3309 /* NAK Interrutp */
3310 if (doepint.b.nak) {
3311 DWC_DEBUGPL(DBG_ANY,"EP%d OUT NAK\n", epnum);
3312 handle_out_ep_nak_intr(pcd, epnum);
3313
3314 CLEAR_OUT_EP_INTR(core_if,epnum,nak);
3315 }
3316 /* NYET Interrutp */
3317 if (doepint.b.nyet) {
3318 DWC_DEBUGPL(DBG_ANY,"EP%d OUT NYET\n", epnum);
3319 handle_out_ep_nyet_intr(pcd, epnum);
3320
3321 CLEAR_OUT_EP_INTR(core_if,epnum,nyet);
3322 }
3323 }
3324
3325 epnum++;
3326 ep_intr >>=1;
3327 }
3328
3329 return 1;
3330
3331 #undef CLEAR_OUT_EP_INTR
3332 }
3333
3334
3335 /**
3336 * Incomplete ISO IN Transfer Interrupt.
3337 * This interrupt indicates one of the following conditions occurred
3338 * while transmitting an ISOC transaction.
3339 * - Corrupted IN Token for ISOC EP.
3340 * - Packet not complete in FIFO.
3341 * The follow actions will be taken:
3342 * -# Determine the EP
3343 * -# Set incomplete flag in dwc_ep structure
3344 * -# Disable EP; when "Endpoint Disabled" interrupt is received
3345 * Flush FIFO
3346 */
3347 int32_t dwc_otg_pcd_handle_incomplete_isoc_in_intr(dwc_otg_pcd_t *pcd)
3348 {
3349 gintsts_data_t gintsts;
3350
3351
3352 #ifdef DWC_EN_ISOC
3353 dwc_otg_dev_if_t *dev_if;
3354 deptsiz_data_t deptsiz = { .d32 = 0};
3355 depctl_data_t depctl = { .d32 = 0};
3356 dsts_data_t dsts = { .d32 = 0};
3357 dwc_ep_t *dwc_ep;
3358 int i;
3359
3360 dev_if = GET_CORE_IF(pcd)->dev_if;
3361
3362 for(i = 1; i <= dev_if->num_in_eps; ++i) {
3363 dwc_ep = &pcd->in_ep[i].dwc_ep;
3364 if(dwc_ep->active &&
3365 dwc_ep->type == USB_ENDPOINT_XFER_ISOC)
3366 {
3367 deptsiz.d32 = dwc_read_reg32(&dev_if->in_ep_regs[i]->dieptsiz);
3368 depctl.d32 = dwc_read_reg32(&dev_if->in_ep_regs[i]->diepctl);
3369
3370 if(depctl.b.epdis && deptsiz.d32) {
3371 set_current_pkt_info(GET_CORE_IF(pcd), dwc_ep);
3372 if(dwc_ep->cur_pkt >= dwc_ep->pkt_cnt) {
3373 dwc_ep->cur_pkt = 0;
3374 dwc_ep->proc_buf_num = (dwc_ep->proc_buf_num ^ 1) & 0x1;
3375
3376 if(dwc_ep->proc_buf_num) {
3377 dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff1;
3378 dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr1;
3379 } else {
3380 dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff0;
3381 dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr0;
3382 }
3383 }
3384
3385 dsts.d32 = dwc_read_reg32(&GET_CORE_IF(pcd)->dev_if->dev_global_regs->dsts);
3386 dwc_ep->next_frame = dsts.b.soffn;
3387
3388 dwc_otg_iso_ep_start_frm_transfer(GET_CORE_IF(pcd), dwc_ep);
3389 }
3390 }
3391 }
3392
3393 #else
3394 gintmsk_data_t intr_mask = { .d32 = 0};
3395 DWC_PRINT("INTERRUPT Handler not implemented for %s\n",
3396 "IN ISOC Incomplete");
3397
3398 intr_mask.b.incomplisoin = 1;
3399 dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
3400 intr_mask.d32, 0);
3401 #endif //DWC_EN_ISOC
3402
3403 /* Clear interrupt */
3404 gintsts.d32 = 0;
3405 gintsts.b.incomplisoin = 1;
3406 dwc_write_reg32 (&GET_CORE_IF(pcd)->core_global_regs->gintsts,
3407 gintsts.d32);
3408
3409 return 1;
3410 }
3411
3412 /**
3413 * Incomplete ISO OUT Transfer Interrupt.
3414 *
3415 * This interrupt indicates that the core has dropped an ISO OUT
3416 * packet. The following conditions can be the cause:
3417 * - FIFO Full, the entire packet would not fit in the FIFO.
3418 * - CRC Error
3419 * - Corrupted Token
3420 * The follow actions will be taken:
3421 * -# Determine the EP
3422 * -# Set incomplete flag in dwc_ep structure
3423 * -# Read any data from the FIFO
3424 * -# Disable EP. when "Endpoint Disabled" interrupt is received
3425 * re-enable EP.
3426 */
3427 int32_t dwc_otg_pcd_handle_incomplete_isoc_out_intr(dwc_otg_pcd_t *pcd)
3428 {
3429 /* @todo implement ISR */
3430 gintsts_data_t gintsts;
3431
3432 #ifdef DWC_EN_ISOC
3433 dwc_otg_dev_if_t *dev_if;
3434 deptsiz_data_t deptsiz = { .d32 = 0};
3435 depctl_data_t depctl = { .d32 = 0};
3436 dsts_data_t dsts = { .d32 = 0};
3437 dwc_ep_t *dwc_ep;
3438 int i;
3439
3440 dev_if = GET_CORE_IF(pcd)->dev_if;
3441
3442 for(i = 1; i <= dev_if->num_out_eps; ++i) {
3443 dwc_ep = &pcd->in_ep[i].dwc_ep;
3444 if(pcd->out_ep[i].dwc_ep.active &&
3445 pcd->out_ep[i].dwc_ep.type == USB_ENDPOINT_XFER_ISOC)
3446 {
3447 deptsiz.d32 = dwc_read_reg32(&dev_if->out_ep_regs[i]->doeptsiz);
3448 depctl.d32 = dwc_read_reg32(&dev_if->out_ep_regs[i]->doepctl);
3449
3450 if(depctl.b.epdis && deptsiz.d32) {
3451 set_current_pkt_info(GET_CORE_IF(pcd), &pcd->out_ep[i].dwc_ep);
3452 if(dwc_ep->cur_pkt >= dwc_ep->pkt_cnt) {
3453 dwc_ep->cur_pkt = 0;
3454 dwc_ep->proc_buf_num = (dwc_ep->proc_buf_num ^ 1) & 0x1;
3455
3456 if(dwc_ep->proc_buf_num) {
3457 dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff1;
3458 dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr1;
3459 } else {
3460 dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff0;
3461 dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr0;
3462 }
3463 }
3464
3465 dsts.d32 = dwc_read_reg32(&GET_CORE_IF(pcd)->dev_if->dev_global_regs->dsts);
3466 dwc_ep->next_frame = dsts.b.soffn;
3467
3468 dwc_otg_iso_ep_start_frm_transfer(GET_CORE_IF(pcd), dwc_ep);
3469 }
3470 }
3471 }
3472 #else
3473 /** @todo implement ISR */
3474 gintmsk_data_t intr_mask = { .d32 = 0};
3475
3476 DWC_PRINT("INTERRUPT Handler not implemented for %s\n",
3477 "OUT ISOC Incomplete");
3478
3479 intr_mask.b.incomplisoout = 1;
3480 dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
3481 intr_mask.d32, 0);
3482
3483 #endif // DWC_EN_ISOC
3484
3485 /* Clear interrupt */
3486 gintsts.d32 = 0;
3487 gintsts.b.incomplisoout = 1;
3488 dwc_write_reg32 (&GET_CORE_IF(pcd)->core_global_regs->gintsts,
3489 gintsts.d32);
3490
3491 return 1;
3492 }
3493
3494 /**
3495 * This function handles the Global IN NAK Effective interrupt.
3496 *
3497 */
3498 int32_t dwc_otg_pcd_handle_in_nak_effective(dwc_otg_pcd_t *pcd)
3499 {
3500 dwc_otg_dev_if_t *dev_if = GET_CORE_IF(pcd)->dev_if;
3501 depctl_data_t diepctl = { .d32 = 0};
3502 depctl_data_t diepctl_rd = { .d32 = 0};
3503 gintmsk_data_t intr_mask = { .d32 = 0};
3504 gintsts_data_t gintsts;
3505 int i;
3506
3507 DWC_DEBUGPL(DBG_PCD, "Global IN NAK Effective\n");
3508
3509 /* Disable all active IN EPs */
3510 diepctl.b.epdis = 1;
3511 diepctl.b.snak = 1;
3512
3513 for (i=0; i <= dev_if->num_in_eps; i++)
3514 {
3515 diepctl_rd.d32 = dwc_read_reg32(&dev_if->in_ep_regs[i]->diepctl);
3516 if (diepctl_rd.b.epena) {
3517 dwc_write_reg32(&dev_if->in_ep_regs[i]->diepctl,
3518 diepctl.d32);
3519 }
3520 }
3521 /* Disable the Global IN NAK Effective Interrupt */
3522 intr_mask.b.ginnakeff = 1;
3523 dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
3524 intr_mask.d32, 0);
3525
3526 /* Clear interrupt */
3527 gintsts.d32 = 0;
3528 gintsts.b.ginnakeff = 1;
3529 dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
3530 gintsts.d32);
3531
3532 return 1;
3533 }
3534
3535 /**
3536 * OUT NAK Effective.
3537 *
3538 */
3539 int32_t dwc_otg_pcd_handle_out_nak_effective(dwc_otg_pcd_t *pcd)
3540 {
3541 gintmsk_data_t intr_mask = { .d32 = 0};
3542 gintsts_data_t gintsts;
3543
3544 DWC_PRINT("INTERRUPT Handler not implemented for %s\n",
3545 "Global IN NAK Effective\n");
3546 /* Disable the Global IN NAK Effective Interrupt */
3547 intr_mask.b.goutnakeff = 1;
3548 dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
3549 intr_mask.d32, 0);
3550
3551 /* Clear interrupt */
3552 gintsts.d32 = 0;
3553 gintsts.b.goutnakeff = 1;
3554 dwc_write_reg32 (&GET_CORE_IF(pcd)->core_global_regs->gintsts,
3555 gintsts.d32);
3556
3557 return 1;
3558 }
3559
3560
3561 /**
3562 * PCD interrupt handler.
3563 *
3564 * The PCD handles the device interrupts. Many conditions can cause a
3565 * device interrupt. When an interrupt occurs, the device interrupt
3566 * service routine determines the cause of the interrupt and
3567 * dispatches handling to the appropriate function. These interrupt
3568 * handling functions are described below.
3569 *
3570 * All interrupt registers are processed from LSB to MSB.
3571 *
3572 */
3573 int32_t dwc_otg_pcd_handle_intr(dwc_otg_pcd_t *pcd)
3574 {
3575 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
3576 #ifdef VERBOSE
3577 dwc_otg_core_global_regs_t *global_regs =
3578 core_if->core_global_regs;
3579 #endif
3580 gintsts_data_t gintr_status;
3581 int32_t retval = 0;
3582
3583
3584 #ifdef VERBOSE
3585 DWC_DEBUGPL(DBG_ANY, "%s() gintsts=%08x gintmsk=%08x\n",
3586 __func__,
3587 dwc_read_reg32(&global_regs->gintsts),
3588 dwc_read_reg32(&global_regs->gintmsk));
3589 #endif
3590
3591 if (dwc_otg_is_device_mode(core_if)) {
3592 SPIN_LOCK(&pcd->lock);
3593 #ifdef VERBOSE
3594 DWC_DEBUGPL(DBG_PCDV, "%s() gintsts=%08x gintmsk=%08x\n",
3595 __func__,
3596 dwc_read_reg32(&global_regs->gintsts),
3597 dwc_read_reg32(&global_regs->gintmsk));
3598 #endif
3599
3600 gintr_status.d32 = dwc_otg_read_core_intr(core_if);
3601 /*
3602 if (!gintr_status.d32) {
3603 SPIN_UNLOCK(&pcd->lock);
3604 return 0;
3605 }
3606 */
3607 DWC_DEBUGPL(DBG_PCDV, "%s: gintsts&gintmsk=%08x\n",
3608 __func__, gintr_status.d32);
3609
3610 if (gintr_status.b.sofintr) {
3611 retval |= dwc_otg_pcd_handle_sof_intr(pcd);
3612 }
3613 if (gintr_status.b.rxstsqlvl) {
3614 retval |= dwc_otg_pcd_handle_rx_status_q_level_intr(pcd);
3615 }
3616 if (gintr_status.b.nptxfempty) {
3617 retval |= dwc_otg_pcd_handle_np_tx_fifo_empty_intr(pcd);
3618 }
3619 if (gintr_status.b.ginnakeff) {
3620 retval |= dwc_otg_pcd_handle_in_nak_effective(pcd);
3621 }
3622 if (gintr_status.b.goutnakeff) {
3623 retval |= dwc_otg_pcd_handle_out_nak_effective(pcd);
3624 }
3625 if (gintr_status.b.i2cintr) {
3626 retval |= dwc_otg_pcd_handle_i2c_intr(pcd);
3627 }
3628 if (gintr_status.b.erlysuspend) {
3629 retval |= dwc_otg_pcd_handle_early_suspend_intr(pcd);
3630 }
3631 if (gintr_status.b.usbreset) {
3632 retval |= dwc_otg_pcd_handle_usb_reset_intr(pcd);
3633 }
3634 if (gintr_status.b.enumdone) {
3635 retval |= dwc_otg_pcd_handle_enum_done_intr(pcd);
3636 }
3637 if (gintr_status.b.isooutdrop) {
3638 retval |= dwc_otg_pcd_handle_isoc_out_packet_dropped_intr(pcd);
3639 }
3640 if (gintr_status.b.eopframe) {
3641 retval |= dwc_otg_pcd_handle_end_periodic_frame_intr(pcd);
3642 }
3643 if (gintr_status.b.epmismatch) {
3644 retval |= dwc_otg_pcd_handle_ep_mismatch_intr(core_if);
3645 }
3646 if (gintr_status.b.inepint) {
3647 if(!core_if->multiproc_int_enable) {
3648 retval |= dwc_otg_pcd_handle_in_ep_intr(pcd);
3649 }
3650 }
3651 if (gintr_status.b.outepintr) {
3652 if(!core_if->multiproc_int_enable) {
3653 retval |= dwc_otg_pcd_handle_out_ep_intr(pcd);
3654 }
3655 }
3656 if (gintr_status.b.incomplisoin) {
3657 retval |= dwc_otg_pcd_handle_incomplete_isoc_in_intr(pcd);
3658 }
3659 if (gintr_status.b.incomplisoout) {
3660 retval |= dwc_otg_pcd_handle_incomplete_isoc_out_intr(pcd);
3661 }
3662
3663 /* In MPI mode De vice Endpoints intterrupts are asserted
3664 * without setting outepintr and inepint bits set, so these
3665 * Interrupt handlers are called without checking these bit-fields
3666 */
3667 if(core_if->multiproc_int_enable) {
3668 retval |= dwc_otg_pcd_handle_in_ep_intr(pcd);
3669 retval |= dwc_otg_pcd_handle_out_ep_intr(pcd);
3670 }
3671 #ifdef VERBOSE
3672 DWC_DEBUGPL(DBG_PCDV, "%s() gintsts=%0x\n", __func__,
3673 dwc_read_reg32(&global_regs->gintsts));
3674 #endif
3675 SPIN_UNLOCK(&pcd->lock);
3676 }
3677 S3C2410X_CLEAR_EINTPEND();
3678
3679 return retval;
3680 }
3681
3682 #endif /* DWC_HOST_ONLY */