various dwc (OTG) driver fixups
[openwrt/staging/lynxis/omap.git] / target / linux / cns3xxx / files / drivers / usb / dwc / otg_hcd.c
1 /* ==========================================================================
2 * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_hcd.c $
3 * $Revision: #75 $
4 * $Date: 2008/07/15 $
5 * $Change: 1064940 $
6 *
7 * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
8 * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
9 * otherwise expressly agreed to in writing between Synopsys and you.
10 *
11 * The Software IS NOT an item of Licensed Software or Licensed Product under
12 * any End User Software License Agreement or Agreement for Licensed Product
13 * with Synopsys or any supplement thereto. You are permitted to use and
14 * redistribute this Software in source and binary forms, with or without
15 * modification, provided that redistributions of source code must retain this
16 * notice. You may not view, use, disclose, copy or distribute this file or
17 * any information contained herein except pursuant to this license grant from
18 * Synopsys. If you do not agree with this notice, including the disclaimer
19 * below, then you are not authorized to use the Software.
20 *
21 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
25 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
31 * DAMAGE.
32 * ========================================================================== */
33 #ifndef DWC_DEVICE_ONLY
34
35 /**
36 * @file
37 *
38 * This file contains the implementation of the HCD. In Linux, the HCD
39 * implements the hc_driver API.
40 */
41 #include <linux/kernel.h>
42 #include <linux/module.h>
43 #include <linux/moduleparam.h>
44 #include <linux/init.h>
45 #include <linux/device.h>
46 #include <linux/platform_device.h>
47 #include <linux/errno.h>
48 #include <linux/list.h>
49 #include <linux/interrupt.h>
50 #include <linux/string.h>
51 #include <linux/dma-mapping.h>
52 #include <linux/version.h>
53
54 #include <mach/irqs.h>
55
56 #include "otg_driver.h"
57 #include "otg_hcd.h"
58 #include "otg_regs.h"
59
60 static const char dwc_otg_hcd_name[] = "dwc_otg_hcd";
61
62 static const struct hc_driver dwc_otg_hc_driver = {
63
64 .description = dwc_otg_hcd_name,
65 .product_desc = "DWC OTG Controller",
66 .hcd_priv_size = sizeof(dwc_otg_hcd_t),
67 .irq = dwc_otg_hcd_irq,
68 .flags = HCD_MEMORY | HCD_USB2,
69 .start = dwc_otg_hcd_start,
70 .stop = dwc_otg_hcd_stop,
71 .urb_enqueue = dwc_otg_hcd_urb_enqueue,
72 .urb_dequeue = dwc_otg_hcd_urb_dequeue,
73 .endpoint_disable = dwc_otg_hcd_endpoint_disable,
74 .get_frame_number = dwc_otg_hcd_get_frame_number,
75 .hub_status_data = dwc_otg_hcd_hub_status_data,
76 .hub_control = dwc_otg_hcd_hub_control,
77 };
78
79 /**
80 * Work queue function for starting the HCD when A-Cable is connected.
81 * The dwc_otg_hcd_start() must be called in a process context.
82 */
83 static void hcd_start_func(struct work_struct *_work)
84 {
85 struct delayed_work *dw = container_of(_work, struct delayed_work, work);
86 struct dwc_otg_hcd *otg_hcd = container_of(dw, struct dwc_otg_hcd, start_work);
87 struct usb_hcd *usb_hcd = container_of((void *)otg_hcd, struct usb_hcd, hcd_priv);
88 DWC_DEBUGPL(DBG_HCDV, "%s() %p\n", __func__, usb_hcd);
89 if (usb_hcd) {
90 dwc_otg_hcd_start(usb_hcd);
91 }
92 }
93
94 /**
95 * HCD Callback function for starting the HCD when A-Cable is
96 * connected.
97 *
98 * @param p void pointer to the <code>struct usb_hcd</code>
99 */
100 static int32_t dwc_otg_hcd_start_cb(void *p)
101 {
102 dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(p);
103 dwc_otg_core_if_t *core_if = dwc_otg_hcd->core_if;
104 hprt0_data_t hprt0;
105
106 if (core_if->op_state == B_HOST) {
107 /*
108 * Reset the port. During a HNP mode switch the reset
109 * needs to occur within 1ms and have a duration of at
110 * least 50ms.
111 */
112 hprt0.d32 = dwc_otg_read_hprt0(core_if);
113 hprt0.b.prtrst = 1;
114 dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
115 ((struct usb_hcd *)p)->self.is_b_host = 1;
116 } else {
117 ((struct usb_hcd *)p)->self.is_b_host = 0;
118 }
119
120 /* Need to start the HCD in a non-interrupt context. */
121 // INIT_WORK(&dwc_otg_hcd->start_work, hcd_start_func);
122 INIT_DELAYED_WORK(&dwc_otg_hcd->start_work, hcd_start_func);
123 // schedule_work(&dwc_otg_hcd->start_work);
124 queue_delayed_work(core_if->wq_otg, &dwc_otg_hcd->start_work, 50 * HZ / 1000);
125
126 return 1;
127 }
128
129 /**
130 * HCD Callback function for stopping the HCD.
131 *
132 * @param p void pointer to the <code>struct usb_hcd</code>
133 */
134 static int32_t dwc_otg_hcd_stop_cb(void *p)
135 {
136 struct usb_hcd *usb_hcd = (struct usb_hcd *)p;
137 DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, p);
138 dwc_otg_hcd_stop(usb_hcd);
139 return 1;
140 }
141
142 static void del_xfer_timers(dwc_otg_hcd_t *hcd)
143 {
144 #ifdef DEBUG
145 int i;
146 int num_channels = hcd->core_if->core_params->host_channels;
147 for (i = 0; i < num_channels; i++) {
148 del_timer(&hcd->core_if->hc_xfer_timer[i]);
149 }
150 #endif
151 }
152
153 static void del_timers(dwc_otg_hcd_t *hcd)
154 {
155 del_xfer_timers(hcd);
156 del_timer(&hcd->conn_timer);
157 }
158
159 /**
160 * Processes all the URBs in a single list of QHs. Completes them with
161 * -ETIMEDOUT and frees the QTD.
162 */
163 static void kill_urbs_in_qh_list(dwc_otg_hcd_t *hcd, struct list_head *qh_list)
164 {
165 struct list_head *qh_item;
166 dwc_otg_qh_t *qh;
167 struct list_head *qtd_item;
168 dwc_otg_qtd_t *qtd;
169 unsigned long flags;
170
171 SPIN_LOCK_IRQSAVE(&hcd->lock, flags);
172 list_for_each(qh_item, qh_list) {
173 qh = list_entry(qh_item, dwc_otg_qh_t, qh_list_entry);
174 for (qtd_item = qh->qtd_list.next;
175 qtd_item != &qh->qtd_list;
176 qtd_item = qh->qtd_list.next) {
177 qtd = list_entry(qtd_item, dwc_otg_qtd_t, qtd_list_entry);
178 if (qtd->urb != NULL) {
179 SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags);
180 dwc_otg_hcd_complete_urb(hcd, qtd->urb,
181 -ETIMEDOUT);
182 SPIN_LOCK_IRQSAVE(&hcd->lock, flags);
183 }
184 dwc_otg_hcd_qtd_remove_and_free(hcd, qtd);
185 }
186 }
187 SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags);
188 }
189
190 /**
191 * Responds with an error status of ETIMEDOUT to all URBs in the non-periodic
192 * and periodic schedules. The QTD associated with each URB is removed from
193 * the schedule and freed. This function may be called when a disconnect is
194 * detected or when the HCD is being stopped.
195 */
196 static void kill_all_urbs(dwc_otg_hcd_t *hcd)
197 {
198 kill_urbs_in_qh_list(hcd, &hcd->non_periodic_sched_inactive);
199 kill_urbs_in_qh_list(hcd, &hcd->non_periodic_sched_active);
200 kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_inactive);
201 kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_ready);
202 kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_assigned);
203 kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_queued);
204 }
205
206 /**
207 * HCD Callback function for disconnect of the HCD.
208 *
209 * @param p void pointer to the <code>struct usb_hcd</code>
210 */
211 static int32_t dwc_otg_hcd_disconnect_cb(void *p)
212 {
213 gintsts_data_t intr;
214 dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(p);
215
216 //DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, p);
217
218 /*
219 * Set status flags for the hub driver.
220 */
221 dwc_otg_hcd->flags.b.port_connect_status_change = 1;
222 dwc_otg_hcd->flags.b.port_connect_status = 0;
223
224 /*
225 * Shutdown any transfers in process by clearing the Tx FIFO Empty
226 * interrupt mask and status bits and disabling subsequent host
227 * channel interrupts.
228 */
229 intr.d32 = 0;
230 intr.b.nptxfempty = 1;
231 intr.b.ptxfempty = 1;
232 intr.b.hcintr = 1;
233 dwc_modify_reg32(&dwc_otg_hcd->core_if->core_global_regs->gintmsk, intr.d32, 0);
234 dwc_modify_reg32(&dwc_otg_hcd->core_if->core_global_regs->gintsts, intr.d32, 0);
235
236 del_timers(dwc_otg_hcd);
237
238 /*
239 * Turn off the vbus power only if the core has transitioned to device
240 * mode. If still in host mode, need to keep power on to detect a
241 * reconnection.
242 */
243 if (dwc_otg_is_device_mode(dwc_otg_hcd->core_if)) {
244 if (dwc_otg_hcd->core_if->op_state != A_SUSPEND) {
245 hprt0_data_t hprt0 = { .d32=0 };
246 DWC_PRINT("Disconnect: PortPower off\n");
247 hprt0.b.prtpwr = 0;
248 dwc_write_reg32(dwc_otg_hcd->core_if->host_if->hprt0, hprt0.d32);
249 }
250
251 dwc_otg_disable_host_interrupts(dwc_otg_hcd->core_if);
252 }
253
254 /* Respond with an error status to all URBs in the schedule. */
255 kill_all_urbs(dwc_otg_hcd);
256
257 if (dwc_otg_is_host_mode(dwc_otg_hcd->core_if)) {
258 /* Clean up any host channels that were in use. */
259 int num_channels;
260 int i;
261 dwc_hc_t *channel;
262 dwc_otg_hc_regs_t *hc_regs;
263 hcchar_data_t hcchar;
264
265 num_channels = dwc_otg_hcd->core_if->core_params->host_channels;
266
267 if (!dwc_otg_hcd->core_if->dma_enable) {
268 /* Flush out any channel requests in slave mode. */
269 for (i = 0; i < num_channels; i++) {
270 channel = dwc_otg_hcd->hc_ptr_array[i];
271 if (list_empty(&channel->hc_list_entry)) {
272 hc_regs = dwc_otg_hcd->core_if->host_if->hc_regs[i];
273 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
274 if (hcchar.b.chen) {
275 hcchar.b.chen = 0;
276 hcchar.b.chdis = 1;
277 hcchar.b.epdir = 0;
278 dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
279 }
280 }
281 }
282 }
283
284 for (i = 0; i < num_channels; i++) {
285 channel = dwc_otg_hcd->hc_ptr_array[i];
286 if (list_empty(&channel->hc_list_entry)) {
287 hc_regs = dwc_otg_hcd->core_if->host_if->hc_regs[i];
288 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
289 if (hcchar.b.chen) {
290 /* Halt the channel. */
291 hcchar.b.chdis = 1;
292 dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
293 }
294
295 dwc_otg_hc_cleanup(dwc_otg_hcd->core_if, channel);
296 list_add_tail(&channel->hc_list_entry,
297 &dwc_otg_hcd->free_hc_list);
298 }
299 }
300 }
301
302 /* A disconnect will end the session so the B-Device is no
303 * longer a B-host. */
304 ((struct usb_hcd *)p)->self.is_b_host = 0;
305 return 1;
306 }
307
308 /**
309 * Connection timeout function. An OTG host is required to display a
310 * message if the device does not connect within 10 seconds.
311 */
312 void dwc_otg_hcd_connect_timeout(unsigned long ptr)
313 {
314 DWC_DEBUGPL(DBG_HCDV, "%s(%x)\n", __func__, (int)ptr);
315 DWC_PRINT("Connect Timeout\n");
316 DWC_ERROR("Device Not Connected/Responding\n");
317 }
318
319 /**
320 * Start the connection timer. An OTG host is required to display a
321 * message if the device does not connect within 10 seconds. The
322 * timer is deleted if a port connect interrupt occurs before the
323 * timer expires.
324 */
325 static void dwc_otg_hcd_start_connect_timer(dwc_otg_hcd_t *hcd)
326 {
327 init_timer(&hcd->conn_timer);
328 hcd->conn_timer.function = dwc_otg_hcd_connect_timeout;
329 hcd->conn_timer.data = 0;
330 hcd->conn_timer.expires = jiffies + (HZ * 10);
331 add_timer(&hcd->conn_timer);
332 }
333
334 /**
335 * HCD Callback function for disconnect of the HCD.
336 *
337 * @param p void pointer to the <code>struct usb_hcd</code>
338 */
339 static int32_t dwc_otg_hcd_session_start_cb(void *p)
340 {
341 dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(p);
342 DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, p);
343 dwc_otg_hcd_start_connect_timer(dwc_otg_hcd);
344 return 1;
345 }
346
347 /**
348 * HCD Callback structure for handling mode switching.
349 */
350 static dwc_otg_cil_callbacks_t hcd_cil_callbacks = {
351 .start = dwc_otg_hcd_start_cb,
352 .stop = dwc_otg_hcd_stop_cb,
353 .disconnect = dwc_otg_hcd_disconnect_cb,
354 .session_start = dwc_otg_hcd_session_start_cb,
355 .p = 0,
356 };
357
358 /**
359 * Reset tasklet function
360 */
361 static void reset_tasklet_func(unsigned long data)
362 {
363 dwc_otg_hcd_t *dwc_otg_hcd = (dwc_otg_hcd_t *)data;
364 dwc_otg_core_if_t *core_if = dwc_otg_hcd->core_if;
365 hprt0_data_t hprt0;
366
367 DWC_DEBUGPL(DBG_HCDV, "USB RESET tasklet called\n");
368
369 hprt0.d32 = dwc_otg_read_hprt0(core_if);
370 hprt0.b.prtrst = 1;
371 dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
372 mdelay(60);
373
374 hprt0.b.prtrst = 0;
375 dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
376 dwc_otg_hcd->flags.b.port_reset_change = 1;
377 }
378
379 static struct tasklet_struct reset_tasklet = {
380 .next = NULL,
381 .state = 0,
382 .count = ATOMIC_INIT(0),
383 .func = reset_tasklet_func,
384 .data = 0,
385 };
386
387 /**
388 * Initializes the HCD. This function allocates memory for and initializes the
389 * static parts of the usb_hcd and dwc_otg_hcd structures. It also registers the
390 * USB bus with the core and calls the hc_driver->start() function. It returns
391 * a negative error on failure.
392 */
393 int dwc_otg_hcd_init(struct platform_device *pdev)
394 {
395 struct usb_hcd *hcd = NULL;
396 dwc_otg_hcd_t *dwc_otg_hcd = NULL;
397 dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev);
398
399 int num_channels;
400 int i;
401 dwc_hc_t *channel;
402
403 int retval = 0;
404
405 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD INIT\n");
406
407 /* Set device flags indicating whether the HCD supports DMA. */
408 if (otg_dev->core_if->dma_enable) {
409 DWC_PRINT("Using DMA mode\n");
410
411 if (otg_dev->core_if->dma_desc_enable) {
412 DWC_PRINT("Device using Descriptor DMA mode\n");
413 } else {
414 DWC_PRINT("Device using Buffer DMA mode\n");
415 }
416 }
417 /*
418 * Allocate memory for the base HCD plus the DWC OTG HCD.
419 * Initialize the base HCD.
420 */
421
422 hcd = usb_create_hcd(&dwc_otg_hc_driver, &pdev->dev, "gadget");
423 if (!hcd) {
424 retval = -ENOMEM;
425 goto error1;
426 }
427
428 hcd->regs = otg_dev->base;
429 hcd->self.otg_port = 1;
430
431 /* Integrate TT in root hub, by default this is disbled. */
432 hcd->has_tt = 1;
433
434 /* Initialize the DWC OTG HCD. */
435 dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
436 dwc_otg_hcd->core_if = otg_dev->core_if;
437 otg_dev->hcd = dwc_otg_hcd;
438 init_hcd_usecs(dwc_otg_hcd);
439
440 /* */
441 spin_lock_init(&dwc_otg_hcd->lock);
442
443 /* Register the HCD CIL Callbacks */
444 dwc_otg_cil_register_hcd_callbacks(otg_dev->core_if,
445 &hcd_cil_callbacks, hcd);
446
447 /* Initialize the non-periodic schedule. */
448 INIT_LIST_HEAD(&dwc_otg_hcd->non_periodic_sched_inactive);
449 INIT_LIST_HEAD(&dwc_otg_hcd->non_periodic_sched_active);
450
451 /* Initialize the periodic schedule. */
452 INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_inactive);
453 INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_ready);
454 INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_assigned);
455 INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_queued);
456
457 /*
458 * Create a host channel descriptor for each host channel implemented
459 * in the controller. Initialize the channel descriptor array.
460 */
461 INIT_LIST_HEAD(&dwc_otg_hcd->free_hc_list);
462 num_channels = dwc_otg_hcd->core_if->core_params->host_channels;
463 memset(dwc_otg_hcd->hc_ptr_array, 0, sizeof(dwc_otg_hcd->hc_ptr_array));
464 for (i = 0; i < num_channels; i++) {
465 channel = kmalloc(sizeof(dwc_hc_t), GFP_KERNEL);
466 if (channel == NULL) {
467 retval = -ENOMEM;
468 DWC_ERROR("%s: host channel allocation failed\n", __func__);
469 goto error2;
470 }
471 memset(channel, 0, sizeof(dwc_hc_t));
472 channel->hc_num = i;
473 dwc_otg_hcd->hc_ptr_array[i] = channel;
474 #ifdef DEBUG
475 init_timer(&dwc_otg_hcd->core_if->hc_xfer_timer[i]);
476 #endif
477 DWC_DEBUGPL(DBG_HCDV, "HCD Added channel #%d, hc=%p\n", i, channel);
478 }
479
480 /* Initialize the Connection timeout timer. */
481 init_timer(&dwc_otg_hcd->conn_timer);
482
483 /* Initialize reset tasklet. */
484 reset_tasklet.data = (unsigned long) dwc_otg_hcd;
485 dwc_otg_hcd->reset_tasklet = &reset_tasklet;
486
487 /*
488 * Finish generic HCD initialization and start the HCD. This function
489 * allocates the DMA buffer pool, registers the USB bus, requests the
490 * IRQ line, and calls dwc_otg_hcd_start method.
491 */
492 retval = usb_add_hcd(hcd, otg_dev->irq, IRQF_SHARED);
493 if (retval < 0) {
494 goto error2;
495 }
496
497 /*
498 * Allocate space for storing data on status transactions. Normally no
499 * data is sent, but this space acts as a bit bucket. This must be
500 * done after usb_add_hcd since that function allocates the DMA buffer
501 * pool.
502 */
503 if (otg_dev->core_if->dma_enable) {
504 dwc_otg_hcd->status_buf =
505 dma_alloc_coherent(&pdev->dev,
506 DWC_OTG_HCD_STATUS_BUF_SIZE,
507 &dwc_otg_hcd->status_buf_dma,
508 GFP_KERNEL | GFP_DMA);
509 } else {
510 dwc_otg_hcd->status_buf = kmalloc(DWC_OTG_HCD_STATUS_BUF_SIZE,
511 GFP_KERNEL);
512 }
513 if (!dwc_otg_hcd->status_buf) {
514 retval = -ENOMEM;
515 DWC_ERROR("%s: status_buf allocation failed\n", __func__);
516 goto error3;
517 }
518
519 dwc_otg_hcd->otg_dev = otg_dev;
520
521 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Initialized HCD, usbbus=%d\n",
522 hcd->self.busnum);
523 return 0;
524
525 /* Error conditions */
526 error3:
527 usb_remove_hcd(hcd);
528 error2:
529 dwc_otg_hcd_free(hcd);
530 usb_put_hcd(hcd);
531 error1:
532 return retval;
533 }
534
535 /**
536 * Removes the HCD.
537 * Frees memory and resources associated with the HCD and deregisters the bus.
538 */
539 void dwc_otg_hcd_remove(struct platform_device *pdev)
540 {
541 dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev);
542 dwc_otg_hcd_t *dwc_otg_hcd;
543 struct usb_hcd *hcd;
544
545 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD REMOVE\n");
546
547 if (!otg_dev) {
548 DWC_DEBUGPL(DBG_ANY, "%s: otg_dev NULL!\n", __func__);
549 return;
550 }
551
552 dwc_otg_hcd = otg_dev->hcd;
553
554 if (!dwc_otg_hcd) {
555 DWC_DEBUGPL(DBG_ANY, "%s: otg_dev->hcd NULL!\n", __func__);
556 return;
557 }
558
559 hcd = dwc_otg_hcd_to_hcd(dwc_otg_hcd);
560
561 if (!hcd) {
562 DWC_DEBUGPL(DBG_ANY, "%s: dwc_otg_hcd_to_hcd(dwc_otg_hcd) NULL!\n", __func__);
563 return;
564 }
565
566 /* Turn off all interrupts */
567 dwc_write_reg32(&dwc_otg_hcd->core_if->core_global_regs->gintmsk, 0);
568 dwc_modify_reg32(&dwc_otg_hcd->core_if->core_global_regs->gahbcfg, 1, 0);
569
570 usb_remove_hcd(hcd);
571 dwc_otg_hcd_free(hcd);
572 usb_put_hcd(hcd);
573 }
574
575 /* =========================================================================
576 * Linux HC Driver Functions
577 * ========================================================================= */
578
579 /**
580 * Initializes dynamic portions of the DWC_otg HCD state.
581 */
582 static void hcd_reinit(dwc_otg_hcd_t *hcd)
583 {
584 struct list_head *item;
585 int num_channels;
586 int i;
587 dwc_hc_t *channel;
588
589 hcd->flags.d32 = 0;
590
591 hcd->non_periodic_qh_ptr = &hcd->non_periodic_sched_active;
592 hcd->non_periodic_channels = 0;
593 hcd->periodic_channels = 0;
594 hcd->nakking_channels = 0;
595
596 /*
597 * Put all channels in the free channel list and clean up channel
598 * states.
599 */
600 item = hcd->free_hc_list.next;
601 while (item != &hcd->free_hc_list) {
602 list_del(item);
603 item = hcd->free_hc_list.next;
604 }
605 num_channels = hcd->core_if->core_params->host_channels;
606 for (i = 0; i < num_channels; i++) {
607 channel = hcd->hc_ptr_array[i];
608 list_add_tail(&channel->hc_list_entry, &hcd->free_hc_list);
609 dwc_otg_hc_cleanup(hcd->core_if, channel);
610 }
611
612 /* Initialize the DWC core for host mode operation. */
613 dwc_otg_core_host_init(hcd->core_if);
614 }
615
616 /** Initializes the DWC_otg controller and its root hub and prepares it for host
617 * mode operation. Activates the root port. Returns 0 on success and a negative
618 * error code on failure. */
619 int dwc_otg_hcd_start(struct usb_hcd *hcd)
620 {
621 dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
622 dwc_otg_core_if_t *core_if = dwc_otg_hcd->core_if;
623 struct usb_bus *bus;
624
625
626 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD START\n");
627
628 bus = hcd_to_bus(hcd);
629
630 /* Initialize the bus state. If the core is in Device Mode
631 * HALT the USB bus and return. */
632 if (dwc_otg_is_device_mode(core_if)) {
633 hcd->state = HC_STATE_RUNNING;
634 return 0;
635 }
636 hcd->state = HC_STATE_RUNNING;
637
638 /* Initialize and connect root hub if one is not already attached */
639 if (bus->root_hub) {
640 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Has Root Hub\n");
641 /* Inform the HUB driver to resume. */
642 usb_hcd_resume_root_hub(hcd);
643 }
644 else {
645 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Does Not Have Root Hub\n");
646 }
647
648 hcd_reinit(dwc_otg_hcd);
649
650 return 0;
651 }
652
653 static void qh_list_free(dwc_otg_hcd_t *hcd, struct list_head *qh_list)
654 {
655 struct list_head *item;
656 dwc_otg_qh_t *qh;
657 unsigned long flags;
658
659 if (!qh_list->next) {
660 /* The list hasn't been initialized yet. */
661 return;
662 }
663
664 /* Ensure there are no QTDs or URBs left. */
665 kill_urbs_in_qh_list(hcd, qh_list);
666
667 SPIN_LOCK_IRQSAVE(&hcd->lock, flags);
668 for (item = qh_list->next; item != qh_list; item = qh_list->next) {
669 qh = list_entry(item, dwc_otg_qh_t, qh_list_entry);
670 dwc_otg_hcd_qh_remove_and_free(hcd, qh);
671 }
672 SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags);
673 }
674
675 /**
676 * Halts the DWC_otg host mode operations in a clean manner. USB transfers are
677 * stopped.
678 */
679 void dwc_otg_hcd_stop(struct usb_hcd *hcd)
680 {
681 dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
682 hprt0_data_t hprt0 = { .d32=0 };
683
684 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD STOP\n");
685
686 /* Turn off all host-specific interrupts. */
687 dwc_otg_disable_host_interrupts(dwc_otg_hcd->core_if);
688
689 /*
690 * The root hub should be disconnected before this function is called.
691 * The disconnect will clear the QTD lists (via ..._hcd_urb_dequeue)
692 * and the QH lists (via ..._hcd_endpoint_disable).
693 */
694
695 /* Turn off the vbus power */
696 DWC_PRINT("PortPower off\n");
697 hprt0.b.prtpwr = 0;
698 dwc_write_reg32(dwc_otg_hcd->core_if->host_if->hprt0, hprt0.d32);
699 }
700
701 /** Returns the current frame number. */
702 int dwc_otg_hcd_get_frame_number(struct usb_hcd *hcd)
703 {
704 dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
705 hfnum_data_t hfnum;
706
707 hfnum.d32 = dwc_read_reg32(&dwc_otg_hcd->core_if->
708 host_if->host_global_regs->hfnum);
709
710 #ifdef DEBUG_SOF
711 DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD GET FRAME NUMBER %d\n", hfnum.b.frnum);
712 #endif
713 return hfnum.b.frnum;
714 }
715
716 /**
717 * Frees secondary storage associated with the dwc_otg_hcd structure contained
718 * in the struct usb_hcd field.
719 */
720 void dwc_otg_hcd_free(struct usb_hcd *hcd)
721 {
722 dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
723 int i;
724
725 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD FREE\n");
726
727 del_timers(dwc_otg_hcd);
728
729 /* Free memory for QH/QTD lists */
730 qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->non_periodic_sched_inactive);
731 qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->non_periodic_sched_active);
732 qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_inactive);
733 qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_ready);
734 qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_assigned);
735 qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_queued);
736
737 /* Free memory for the host channels. */
738 for (i = 0; i < MAX_EPS_CHANNELS; i++) {
739 dwc_hc_t *hc = dwc_otg_hcd->hc_ptr_array[i];
740 if (hc != NULL) {
741 DWC_DEBUGPL(DBG_HCDV, "HCD Free channel #%i, hc=%p\n", i, hc);
742 kfree(hc);
743 }
744 }
745
746 if (dwc_otg_hcd->core_if->dma_enable) {
747 if (dwc_otg_hcd->status_buf_dma) {
748 dma_free_coherent(hcd->self.controller,
749 DWC_OTG_HCD_STATUS_BUF_SIZE,
750 dwc_otg_hcd->status_buf,
751 dwc_otg_hcd->status_buf_dma);
752 }
753 } else if (dwc_otg_hcd->status_buf != NULL) {
754 kfree(dwc_otg_hcd->status_buf);
755 }
756 }
757
758 #ifdef DEBUG
759 static void dump_urb_info(struct urb *urb, char* fn_name)
760 {
761 DWC_PRINT("%s, urb %p\n", fn_name, urb);
762 DWC_PRINT(" Device address: %d\n", usb_pipedevice(urb->pipe));
763 DWC_PRINT(" Endpoint: %d, %s\n", usb_pipeendpoint(urb->pipe),
764 (usb_pipein(urb->pipe) ? "IN" : "OUT"));
765 DWC_PRINT(" Endpoint type: %s\n",
766 ({char *pipetype;
767 switch (usb_pipetype(urb->pipe)) {
768 case PIPE_CONTROL: pipetype = "CONTROL"; break;
769 case PIPE_BULK: pipetype = "BULK"; break;
770 case PIPE_INTERRUPT: pipetype = "INTERRUPT"; break;
771 case PIPE_ISOCHRONOUS: pipetype = "ISOCHRONOUS"; break;
772 default: pipetype = "UNKNOWN"; break;
773 }; pipetype;}));
774 DWC_PRINT(" Speed: %s\n",
775 ({char *speed;
776 switch (urb->dev->speed) {
777 case USB_SPEED_HIGH: speed = "HIGH"; break;
778 case USB_SPEED_FULL: speed = "FULL"; break;
779 case USB_SPEED_LOW: speed = "LOW"; break;
780 default: speed = "UNKNOWN"; break;
781 }; speed;}));
782 DWC_PRINT(" Max packet size: %d\n",
783 usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)));
784 DWC_PRINT(" Data buffer length: %d\n", urb->transfer_buffer_length);
785 DWC_PRINT(" Transfer buffer: %p, Transfer DMA: %p\n",
786 urb->transfer_buffer, (void *)urb->transfer_dma);
787 DWC_PRINT(" Setup buffer: %p, Setup DMA: %p\n",
788 urb->setup_packet, (void *)urb->setup_dma);
789 DWC_PRINT(" Interval: %d\n", urb->interval);
790 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
791 int i;
792 for (i = 0; i < urb->number_of_packets; i++) {
793 DWC_PRINT(" ISO Desc %d:\n", i);
794 DWC_PRINT(" offset: %d, length %d\n",
795 urb->iso_frame_desc[i].offset,
796 urb->iso_frame_desc[i].length);
797 }
798 }
799 }
800
801 static void dump_channel_info(dwc_otg_hcd_t *hcd,
802 dwc_otg_qh_t *qh)
803 {
804 if (qh->channel != NULL) {
805 dwc_hc_t *hc = qh->channel;
806 struct list_head *item;
807 dwc_otg_qh_t *qh_item;
808 int num_channels = hcd->core_if->core_params->host_channels;
809 int i;
810
811 dwc_otg_hc_regs_t *hc_regs;
812 hcchar_data_t hcchar;
813 hcsplt_data_t hcsplt;
814 hctsiz_data_t hctsiz;
815 uint32_t hcdma;
816
817 hc_regs = hcd->core_if->host_if->hc_regs[hc->hc_num];
818 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
819 hcsplt.d32 = dwc_read_reg32(&hc_regs->hcsplt);
820 hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz);
821 hcdma = dwc_read_reg32(&hc_regs->hcdma);
822
823 DWC_PRINT(" Assigned to channel %p:\n", hc);
824 DWC_PRINT(" hcchar 0x%08x, hcsplt 0x%08x\n", hcchar.d32, hcsplt.d32);
825 DWC_PRINT(" hctsiz 0x%08x, hcdma 0x%08x\n", hctsiz.d32, hcdma);
826 DWC_PRINT(" dev_addr: %d, ep_num: %d, ep_is_in: %d\n",
827 hc->dev_addr, hc->ep_num, hc->ep_is_in);
828 DWC_PRINT(" ep_type: %d\n", hc->ep_type);
829 DWC_PRINT(" max_packet: %d\n", hc->max_packet);
830 DWC_PRINT(" data_pid_start: %d\n", hc->data_pid_start);
831 DWC_PRINT(" xfer_started: %d\n", hc->xfer_started);
832 DWC_PRINT(" halt_status: %d\n", hc->halt_status);
833 DWC_PRINT(" xfer_buff: %p\n", hc->xfer_buff);
834 DWC_PRINT(" xfer_len: %d\n", hc->xfer_len);
835 DWC_PRINT(" qh: %p\n", hc->qh);
836 DWC_PRINT(" NP inactive sched:\n");
837 list_for_each(item, &hcd->non_periodic_sched_inactive) {
838 qh_item = list_entry(item, dwc_otg_qh_t, qh_list_entry);
839 DWC_PRINT(" %p\n", qh_item);
840 }
841 DWC_PRINT(" NP active sched:\n");
842 list_for_each(item, &hcd->non_periodic_sched_active) {
843 qh_item = list_entry(item, dwc_otg_qh_t, qh_list_entry);
844 DWC_PRINT(" %p\n", qh_item);
845 }
846 DWC_PRINT(" Channels: \n");
847 for (i = 0; i < num_channels; i++) {
848 dwc_hc_t *hc = hcd->hc_ptr_array[i];
849 DWC_PRINT(" %2d: %p\n", i, hc);
850 }
851 }
852 }
853 #endif
854
855
856 //OTG host require the DMA addr is DWORD-aligned,
857 //patch it if the buffer is not DWORD-aligned
858 inline
859 int hcd_check_and_patch_dma_addr(struct urb *urb){
860
861 if((!urb->transfer_buffer)||!urb->transfer_dma||urb->transfer_dma==0xffffffff)
862 return 0;
863
864 if(((u32)urb->transfer_buffer)& 0x3){
865 /*
866 printk("%s: "
867 "urb(%.8x) "
868 "transfer_buffer=%.8x, "
869 "transfer_dma=%.8x, "
870 "transfer_buffer_length=%d, "
871 "actual_length=%d(%x), "
872 "\n",
873 ((urb->transfer_flags & URB_DIR_MASK)==URB_DIR_OUT)?"OUT":"IN",
874 urb,
875 urb->transfer_buffer,
876 urb->transfer_dma,
877 urb->transfer_buffer_length,
878 urb->actual_length,urb->actual_length
879 );
880 */
881 if(!urb->aligned_transfer_buffer||urb->aligned_transfer_buffer_length<urb->transfer_buffer_length){
882 urb->aligned_transfer_buffer_length=urb->transfer_buffer_length;
883 if(urb->aligned_transfer_buffer) {
884 kfree(urb->aligned_transfer_buffer);
885 }
886 urb->aligned_transfer_buffer=kmalloc(urb->aligned_transfer_buffer_length,GFP_KERNEL|GFP_DMA|GFP_ATOMIC);
887 if(!urb->aligned_transfer_buffer){
888 DWC_ERROR("Cannot alloc required buffer!!\n");
889 //BUG();
890 return -1;
891 }
892 urb->aligned_transfer_dma=dma_map_single(NULL,(void *)(urb->aligned_transfer_buffer),(urb->aligned_transfer_buffer_length),DMA_FROM_DEVICE);
893 //printk(" new allocated aligned_buf=%.8x aligned_buf_len=%d\n", (u32)urb->aligned_transfer_buffer, urb->aligned_transfer_buffer_length);
894 }
895 urb->transfer_dma=urb->aligned_transfer_dma;
896 if((urb->transfer_flags & URB_DIR_MASK)==URB_DIR_OUT) {
897 memcpy(urb->aligned_transfer_buffer,urb->transfer_buffer,urb->transfer_buffer_length);
898 dma_sync_single_for_device(NULL,urb->transfer_dma,urb->transfer_buffer_length,DMA_TO_DEVICE);
899 }
900 }
901 return 0;
902 }
903
904
905
906 /** Starts processing a USB transfer request specified by a USB Request Block
907 * (URB). mem_flags indicates the type of memory allocation to use while
908 * processing this URB. */
909 int dwc_otg_hcd_urb_enqueue(struct usb_hcd *hcd,
910 // struct usb_host_endpoint *ep,
911 struct urb *urb,
912 gfp_t mem_flags
913 )
914 {
915 int retval = 0;
916 dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
917 dwc_otg_qtd_t *qtd;
918 unsigned long flags;
919
920 SPIN_LOCK_IRQSAVE(&dwc_otg_hcd->lock, flags);
921
922 if (urb->hcpriv != NULL) {
923 SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags);
924 return -ENOMEM;
925
926 }
927 #ifdef DEBUG
928 if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
929 dump_urb_info(urb, "dwc_otg_hcd_urb_enqueue");
930 }
931 #endif
932 if (!dwc_otg_hcd->flags.b.port_connect_status) {
933 /* No longer connected. */
934 SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags);
935 return -ENODEV;
936 }
937
938 if (hcd_check_and_patch_dma_addr(urb)) {
939 DWC_ERROR("Unable to check and patch dma addr\n");
940 SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags);
941 return -ENOMEM;
942 }
943 qtd = dwc_otg_hcd_qtd_create(urb);
944 if (qtd == NULL) {
945 DWC_ERROR("DWC OTG HCD URB Enqueue failed creating QTD\n");
946 SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags);
947 return -ENOMEM;
948 }
949
950 retval = dwc_otg_hcd_qtd_add(qtd, dwc_otg_hcd);
951 if (retval < 0) {
952 DWC_ERROR("DWC OTG HCD URB Enqueue failed adding QTD. "
953 "Error status %d\n", retval);
954 dwc_otg_hcd_qtd_free(qtd);
955 }
956 SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags);
957 return retval;
958 }
959
960 /** Aborts/cancels a USB transfer request. Always returns 0 to indicate
961 * success. */
962 int dwc_otg_hcd_urb_dequeue(struct usb_hcd *hcd,
963 struct urb *urb, int status)
964 {
965 unsigned long flags;
966 dwc_otg_hcd_t *dwc_otg_hcd;
967 dwc_otg_qtd_t *urb_qtd;
968 dwc_otg_qh_t *qh;
969 struct usb_host_endpoint *ep = dwc_urb_to_endpoint(urb);
970 int rc;
971
972 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD URB Dequeue\n");
973
974 dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
975
976 SPIN_LOCK_IRQSAVE(&dwc_otg_hcd->lock, flags);
977
978 urb_qtd = (dwc_otg_qtd_t *)urb->hcpriv;
979 qh = (dwc_otg_qh_t *)ep->hcpriv;
980
981 #ifdef DEBUG
982 if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
983 dump_urb_info(urb, "dwc_otg_hcd_urb_dequeue");
984 if (urb_qtd == qh->qtd_in_process) {
985 dump_channel_info(dwc_otg_hcd, qh);
986 }
987 }
988 #endif
989
990 if (qh && urb_qtd == qh->qtd_in_process) {
991 /* The QTD is in process (it has been assigned to a channel). */
992
993 if (dwc_otg_hcd->flags.b.port_connect_status) {
994 /*
995 * If still connected (i.e. in host mode), halt the
996 * channel so it can be used for other transfers. If
997 * no longer connected, the host registers can't be
998 * written to halt the channel since the core is in
999 * device mode.
1000 */
1001 dwc_otg_hc_halt(dwc_otg_hcd, qh->channel,
1002 DWC_OTG_HC_XFER_URB_DEQUEUE);
1003 }
1004 }
1005
1006 /*
1007 * Free the QTD and clean up the associated QH. Leave the QH in the
1008 * schedule if it has any remaining QTDs.
1009 */
1010 dwc_otg_hcd_qtd_remove_and_free(dwc_otg_hcd, urb_qtd);
1011 if (qh && urb_qtd == qh->qtd_in_process) {
1012 dwc_otg_hcd_qh_deactivate(dwc_otg_hcd, qh, 0);
1013 qh->channel = NULL;
1014 qh->qtd_in_process = NULL;
1015 } else {
1016 if (qh && list_empty(&qh->qtd_list)) {
1017 dwc_otg_hcd_qh_remove(dwc_otg_hcd, qh);
1018 }
1019 }
1020
1021
1022 rc = usb_hcd_check_unlink_urb(hcd, urb, status);
1023
1024 if (!rc) {
1025 usb_hcd_unlink_urb_from_ep(hcd, urb);
1026 }
1027 urb->hcpriv = NULL;
1028 SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags);
1029
1030 if (!rc) {
1031 usb_hcd_giveback_urb(hcd, urb, status);
1032 }
1033 if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
1034 DWC_PRINT("Called usb_hcd_giveback_urb()\n");
1035 DWC_PRINT(" urb->status = %d\n", urb->status);
1036 }
1037
1038 return 0;
1039 }
1040
1041 /** Frees resources in the DWC_otg controller related to a given endpoint. Also
1042 * clears state in the HCD related to the endpoint. Any URBs for the endpoint
1043 * must already be dequeued. */
1044 void dwc_otg_hcd_endpoint_disable(struct usb_hcd *hcd,
1045 struct usb_host_endpoint *ep)
1046 {
1047 dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
1048 dwc_otg_qh_t *qh;
1049
1050 unsigned long flags;
1051 int retry = 0;
1052
1053 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD EP DISABLE: _bEndpointAddress=0x%02x, "
1054 "endpoint=%d\n", ep->desc.bEndpointAddress,
1055 dwc_ep_addr_to_endpoint(ep->desc.bEndpointAddress));
1056
1057 rescan:
1058 SPIN_LOCK_IRQSAVE(&dwc_otg_hcd->lock, flags);
1059 qh = (dwc_otg_qh_t *)(ep->hcpriv);
1060 if (!qh)
1061 goto done;
1062
1063 /** Check that the QTD list is really empty */
1064 if (!list_empty(&qh->qtd_list)) {
1065 if (retry++ < 250) {
1066 SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags);
1067 schedule_timeout_uninterruptible(1);
1068 goto rescan;
1069 }
1070
1071 DWC_WARN("DWC OTG HCD EP DISABLE:"
1072 " QTD List for this endpoint is not empty\n");
1073 }
1074
1075 dwc_otg_hcd_qh_remove_and_free(dwc_otg_hcd, qh);
1076 ep->hcpriv = NULL;
1077 done:
1078 SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags);
1079 }
1080
1081 /** Handles host mode interrupts for the DWC_otg controller. Returns IRQ_NONE if
1082 * there was no interrupt to handle. Returns IRQ_HANDLED if there was a valid
1083 * interrupt.
1084 *
1085 * This function is called by the USB core when an interrupt occurs */
1086 irqreturn_t dwc_otg_hcd_irq(struct usb_hcd *hcd)
1087 {
1088 int retVal = 0;
1089 dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
1090 retVal = dwc_otg_hcd_handle_intr(dwc_otg_hcd);
1091 if (dwc_otg_hcd->flags.b.port_connect_status_change == 1)
1092 usb_hcd_poll_rh_status(hcd);
1093 return IRQ_RETVAL(retVal);
1094 }
1095
1096 /** Creates Status Change bitmap for the root hub and root port. The bitmap is
1097 * returned in buf. Bit 0 is the status change indicator for the root hub. Bit 1
1098 * is the status change indicator for the single root port. Returns 1 if either
1099 * change indicator is 1, otherwise returns 0. */
1100 int dwc_otg_hcd_hub_status_data(struct usb_hcd *hcd, char *buf)
1101 {
1102 dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
1103
1104 buf[0] = 0;
1105 buf[0] |= (dwc_otg_hcd->flags.b.port_connect_status_change ||
1106 dwc_otg_hcd->flags.b.port_reset_change ||
1107 dwc_otg_hcd->flags.b.port_enable_change ||
1108 dwc_otg_hcd->flags.b.port_suspend_change ||
1109 dwc_otg_hcd->flags.b.port_over_current_change) << 1;
1110
1111 #ifdef DEBUG
1112 if (buf[0]) {
1113 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB STATUS DATA:"
1114 " Root port status changed\n");
1115 DWC_DEBUGPL(DBG_HCDV, " port_connect_status_change: %d\n",
1116 dwc_otg_hcd->flags.b.port_connect_status_change);
1117 DWC_DEBUGPL(DBG_HCDV, " port_reset_change: %d\n",
1118 dwc_otg_hcd->flags.b.port_reset_change);
1119 DWC_DEBUGPL(DBG_HCDV, " port_enable_change: %d\n",
1120 dwc_otg_hcd->flags.b.port_enable_change);
1121 DWC_DEBUGPL(DBG_HCDV, " port_suspend_change: %d\n",
1122 dwc_otg_hcd->flags.b.port_suspend_change);
1123 DWC_DEBUGPL(DBG_HCDV, " port_over_current_change: %d\n",
1124 dwc_otg_hcd->flags.b.port_over_current_change);
1125 }
1126 #endif
1127 return (buf[0] != 0);
1128 }
1129
1130 #ifdef DWC_HS_ELECT_TST
1131 /*
1132 * Quick and dirty hack to implement the HS Electrical Test
1133 * SINGLE_STEP_GET_DEVICE_DESCRIPTOR feature.
1134 *
1135 * This code was copied from our userspace app "hset". It sends a
1136 * Get Device Descriptor control sequence in two parts, first the
1137 * Setup packet by itself, followed some time later by the In and
1138 * Ack packets. Rather than trying to figure out how to add this
1139 * functionality to the normal driver code, we just hijack the
1140 * hardware, using these two function to drive the hardware
1141 * directly.
1142 */
1143
1144 dwc_otg_core_global_regs_t *global_regs;
1145 dwc_otg_host_global_regs_t *hc_global_regs;
1146 dwc_otg_hc_regs_t *hc_regs;
1147 uint32_t *data_fifo;
1148
1149 static void do_setup(void)
1150 {
1151 gintsts_data_t gintsts;
1152 hctsiz_data_t hctsiz;
1153 hcchar_data_t hcchar;
1154 haint_data_t haint;
1155 hcint_data_t hcint;
1156
1157 /* Enable HAINTs */
1158 dwc_write_reg32(&hc_global_regs->haintmsk, 0x0001);
1159
1160 /* Enable HCINTs */
1161 dwc_write_reg32(&hc_regs->hcintmsk, 0x04a3);
1162
1163 /* Read GINTSTS */
1164 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1165 //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
1166
1167 /* Read HAINT */
1168 haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
1169 //fprintf(stderr, "HAINT: %08x\n", haint.d32);
1170
1171 /* Read HCINT */
1172 hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
1173 //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
1174
1175 /* Read HCCHAR */
1176 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1177 //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
1178
1179 /* Clear HCINT */
1180 dwc_write_reg32(&hc_regs->hcint, hcint.d32);
1181
1182 /* Clear HAINT */
1183 dwc_write_reg32(&hc_global_regs->haint, haint.d32);
1184
1185 /* Clear GINTSTS */
1186 dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
1187
1188 /* Read GINTSTS */
1189 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1190 //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
1191
1192 /*
1193 * Send Setup packet (Get Device Descriptor)
1194 */
1195
1196 /* Make sure channel is disabled */
1197 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1198 if (hcchar.b.chen) {
1199 //fprintf(stderr, "Channel already enabled 1, HCCHAR = %08x\n", hcchar.d32);
1200 hcchar.b.chdis = 1;
1201 // hcchar.b.chen = 1;
1202 dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
1203 //sleep(1);
1204 mdelay(1000);
1205
1206 /* Read GINTSTS */
1207 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1208 //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
1209
1210 /* Read HAINT */
1211 haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
1212 //fprintf(stderr, "HAINT: %08x\n", haint.d32);
1213
1214 /* Read HCINT */
1215 hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
1216 //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
1217
1218 /* Read HCCHAR */
1219 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1220 //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
1221
1222 /* Clear HCINT */
1223 dwc_write_reg32(&hc_regs->hcint, hcint.d32);
1224
1225 /* Clear HAINT */
1226 dwc_write_reg32(&hc_global_regs->haint, haint.d32);
1227
1228 /* Clear GINTSTS */
1229 dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
1230
1231 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1232 //if (hcchar.b.chen) {
1233 // fprintf(stderr, "** Channel _still_ enabled 1, HCCHAR = %08x **\n", hcchar.d32);
1234 //}
1235 }
1236
1237 /* Set HCTSIZ */
1238 hctsiz.d32 = 0;
1239 hctsiz.b.xfersize = 8;
1240 hctsiz.b.pktcnt = 1;
1241 hctsiz.b.pid = DWC_OTG_HC_PID_SETUP;
1242 dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32);
1243
1244 /* Set HCCHAR */
1245 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1246 hcchar.b.eptype = DWC_OTG_EP_TYPE_CONTROL;
1247 hcchar.b.epdir = 0;
1248 hcchar.b.epnum = 0;
1249 hcchar.b.mps = 8;
1250 hcchar.b.chen = 1;
1251 dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
1252
1253 /* Fill FIFO with Setup data for Get Device Descriptor */
1254 data_fifo = (uint32_t *)((char *)global_regs + 0x1000);
1255 dwc_write_reg32(data_fifo++, 0x01000680);
1256 dwc_write_reg32(data_fifo++, 0x00080000);
1257
1258 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1259 //fprintf(stderr, "Waiting for HCINTR intr 1, GINTSTS = %08x\n", gintsts.d32);
1260
1261 /* Wait for host channel interrupt */
1262 do {
1263 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1264 } while (gintsts.b.hcintr == 0);
1265
1266 //fprintf(stderr, "Got HCINTR intr 1, GINTSTS = %08x\n", gintsts.d32);
1267
1268 /* Disable HCINTs */
1269 dwc_write_reg32(&hc_regs->hcintmsk, 0x0000);
1270
1271 /* Disable HAINTs */
1272 dwc_write_reg32(&hc_global_regs->haintmsk, 0x0000);
1273
1274 /* Read HAINT */
1275 haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
1276 //fprintf(stderr, "HAINT: %08x\n", haint.d32);
1277
1278 /* Read HCINT */
1279 hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
1280 //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
1281
1282 /* Read HCCHAR */
1283 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1284 //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
1285
1286 /* Clear HCINT */
1287 dwc_write_reg32(&hc_regs->hcint, hcint.d32);
1288
1289 /* Clear HAINT */
1290 dwc_write_reg32(&hc_global_regs->haint, haint.d32);
1291
1292 /* Clear GINTSTS */
1293 dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
1294
1295 /* Read GINTSTS */
1296 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1297 //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
1298 }
1299
1300 static void do_in_ack(void)
1301 {
1302 gintsts_data_t gintsts;
1303 hctsiz_data_t hctsiz;
1304 hcchar_data_t hcchar;
1305 haint_data_t haint;
1306 hcint_data_t hcint;
1307 host_grxsts_data_t grxsts;
1308
1309 /* Enable HAINTs */
1310 dwc_write_reg32(&hc_global_regs->haintmsk, 0x0001);
1311
1312 /* Enable HCINTs */
1313 dwc_write_reg32(&hc_regs->hcintmsk, 0x04a3);
1314
1315 /* Read GINTSTS */
1316 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1317 //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
1318
1319 /* Read HAINT */
1320 haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
1321 //fprintf(stderr, "HAINT: %08x\n", haint.d32);
1322
1323 /* Read HCINT */
1324 hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
1325 //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
1326
1327 /* Read HCCHAR */
1328 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1329 //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
1330
1331 /* Clear HCINT */
1332 dwc_write_reg32(&hc_regs->hcint, hcint.d32);
1333
1334 /* Clear HAINT */
1335 dwc_write_reg32(&hc_global_regs->haint, haint.d32);
1336
1337 /* Clear GINTSTS */
1338 dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
1339
1340 /* Read GINTSTS */
1341 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1342 //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
1343
1344 /*
1345 * Receive Control In packet
1346 */
1347
1348 /* Make sure channel is disabled */
1349 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1350 if (hcchar.b.chen) {
1351 //fprintf(stderr, "Channel already enabled 2, HCCHAR = %08x\n", hcchar.d32);
1352 hcchar.b.chdis = 1;
1353 hcchar.b.chen = 1;
1354 dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
1355 //sleep(1);
1356 mdelay(1000);
1357
1358 /* Read GINTSTS */
1359 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1360 //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
1361
1362 /* Read HAINT */
1363 haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
1364 //fprintf(stderr, "HAINT: %08x\n", haint.d32);
1365
1366 /* Read HCINT */
1367 hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
1368 //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
1369
1370 /* Read HCCHAR */
1371 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1372 //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
1373
1374 /* Clear HCINT */
1375 dwc_write_reg32(&hc_regs->hcint, hcint.d32);
1376
1377 /* Clear HAINT */
1378 dwc_write_reg32(&hc_global_regs->haint, haint.d32);
1379
1380 /* Clear GINTSTS */
1381 dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
1382
1383 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1384 //if (hcchar.b.chen) {
1385 // fprintf(stderr, "** Channel _still_ enabled 2, HCCHAR = %08x **\n", hcchar.d32);
1386 //}
1387 }
1388
1389 /* Set HCTSIZ */
1390 hctsiz.d32 = 0;
1391 hctsiz.b.xfersize = 8;
1392 hctsiz.b.pktcnt = 1;
1393 hctsiz.b.pid = DWC_OTG_HC_PID_DATA1;
1394 dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32);
1395
1396 /* Set HCCHAR */
1397 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1398 hcchar.b.eptype = DWC_OTG_EP_TYPE_CONTROL;
1399 hcchar.b.epdir = 1;
1400 hcchar.b.epnum = 0;
1401 hcchar.b.mps = 8;
1402 hcchar.b.chen = 1;
1403 dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
1404
1405 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1406 //fprintf(stderr, "Waiting for RXSTSQLVL intr 1, GINTSTS = %08x\n", gintsts.d32);
1407
1408 /* Wait for receive status queue interrupt */
1409 do {
1410 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1411 } while (gintsts.b.rxstsqlvl == 0);
1412
1413 //fprintf(stderr, "Got RXSTSQLVL intr 1, GINTSTS = %08x\n", gintsts.d32);
1414
1415 /* Read RXSTS */
1416 grxsts.d32 = dwc_read_reg32(&global_regs->grxstsp);
1417 //fprintf(stderr, "GRXSTS: %08x\n", grxsts.d32);
1418
1419 /* Clear RXSTSQLVL in GINTSTS */
1420 gintsts.d32 = 0;
1421 gintsts.b.rxstsqlvl = 1;
1422 dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
1423
1424 switch (grxsts.b.pktsts) {
1425 case DWC_GRXSTS_PKTSTS_IN:
1426 /* Read the data into the host buffer */
1427 if (grxsts.b.bcnt > 0) {
1428 int i;
1429 int word_count = (grxsts.b.bcnt + 3) / 4;
1430
1431 data_fifo = (uint32_t *)((char *)global_regs + 0x1000);
1432
1433 for (i = 0; i < word_count; i++) {
1434 (void)dwc_read_reg32(data_fifo++);
1435 }
1436 }
1437
1438 //fprintf(stderr, "Received %u bytes\n", (unsigned)grxsts.b.bcnt);
1439 break;
1440
1441 default:
1442 //fprintf(stderr, "** Unexpected GRXSTS packet status 1 **\n");
1443 break;
1444 }
1445
1446 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1447 //fprintf(stderr, "Waiting for RXSTSQLVL intr 2, GINTSTS = %08x\n", gintsts.d32);
1448
1449 /* Wait for receive status queue interrupt */
1450 do {
1451 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1452 } while (gintsts.b.rxstsqlvl == 0);
1453
1454 //fprintf(stderr, "Got RXSTSQLVL intr 2, GINTSTS = %08x\n", gintsts.d32);
1455
1456 /* Read RXSTS */
1457 grxsts.d32 = dwc_read_reg32(&global_regs->grxstsp);
1458 //fprintf(stderr, "GRXSTS: %08x\n", grxsts.d32);
1459
1460 /* Clear RXSTSQLVL in GINTSTS */
1461 gintsts.d32 = 0;
1462 gintsts.b.rxstsqlvl = 1;
1463 dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
1464
1465 switch (grxsts.b.pktsts) {
1466 case DWC_GRXSTS_PKTSTS_IN_XFER_COMP:
1467 break;
1468
1469 default:
1470 //fprintf(stderr, "** Unexpected GRXSTS packet status 2 **\n");
1471 break;
1472 }
1473
1474 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1475 //fprintf(stderr, "Waiting for HCINTR intr 2, GINTSTS = %08x\n", gintsts.d32);
1476
1477 /* Wait for host channel interrupt */
1478 do {
1479 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1480 } while (gintsts.b.hcintr == 0);
1481
1482 //fprintf(stderr, "Got HCINTR intr 2, GINTSTS = %08x\n", gintsts.d32);
1483
1484 /* Read HAINT */
1485 haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
1486 //fprintf(stderr, "HAINT: %08x\n", haint.d32);
1487
1488 /* Read HCINT */
1489 hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
1490 //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
1491
1492 /* Read HCCHAR */
1493 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1494 //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
1495
1496 /* Clear HCINT */
1497 dwc_write_reg32(&hc_regs->hcint, hcint.d32);
1498
1499 /* Clear HAINT */
1500 dwc_write_reg32(&hc_global_regs->haint, haint.d32);
1501
1502 /* Clear GINTSTS */
1503 dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
1504
1505 /* Read GINTSTS */
1506 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1507 //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
1508
1509 // usleep(100000);
1510 // mdelay(100);
1511 mdelay(1);
1512
1513 /*
1514 * Send handshake packet
1515 */
1516
1517 /* Read HAINT */
1518 haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
1519 //fprintf(stderr, "HAINT: %08x\n", haint.d32);
1520
1521 /* Read HCINT */
1522 hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
1523 //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
1524
1525 /* Read HCCHAR */
1526 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1527 //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
1528
1529 /* Clear HCINT */
1530 dwc_write_reg32(&hc_regs->hcint, hcint.d32);
1531
1532 /* Clear HAINT */
1533 dwc_write_reg32(&hc_global_regs->haint, haint.d32);
1534
1535 /* Clear GINTSTS */
1536 dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
1537
1538 /* Read GINTSTS */
1539 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1540 //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
1541
1542 /* Make sure channel is disabled */
1543 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1544 if (hcchar.b.chen) {
1545 //fprintf(stderr, "Channel already enabled 3, HCCHAR = %08x\n", hcchar.d32);
1546 hcchar.b.chdis = 1;
1547 hcchar.b.chen = 1;
1548 dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
1549 //sleep(1);
1550 mdelay(1000);
1551
1552 /* Read GINTSTS */
1553 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1554 //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
1555
1556 /* Read HAINT */
1557 haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
1558 //fprintf(stderr, "HAINT: %08x\n", haint.d32);
1559
1560 /* Read HCINT */
1561 hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
1562 //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
1563
1564 /* Read HCCHAR */
1565 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1566 //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
1567
1568 /* Clear HCINT */
1569 dwc_write_reg32(&hc_regs->hcint, hcint.d32);
1570
1571 /* Clear HAINT */
1572 dwc_write_reg32(&hc_global_regs->haint, haint.d32);
1573
1574 /* Clear GINTSTS */
1575 dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
1576
1577 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1578 //if (hcchar.b.chen) {
1579 // fprintf(stderr, "** Channel _still_ enabled 3, HCCHAR = %08x **\n", hcchar.d32);
1580 //}
1581 }
1582
1583 /* Set HCTSIZ */
1584 hctsiz.d32 = 0;
1585 hctsiz.b.xfersize = 0;
1586 hctsiz.b.pktcnt = 1;
1587 hctsiz.b.pid = DWC_OTG_HC_PID_DATA1;
1588 dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32);
1589
1590 /* Set HCCHAR */
1591 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1592 hcchar.b.eptype = DWC_OTG_EP_TYPE_CONTROL;
1593 hcchar.b.epdir = 0;
1594 hcchar.b.epnum = 0;
1595 hcchar.b.mps = 8;
1596 hcchar.b.chen = 1;
1597 dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
1598
1599 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1600 //fprintf(stderr, "Waiting for HCINTR intr 3, GINTSTS = %08x\n", gintsts.d32);
1601
1602 /* Wait for host channel interrupt */
1603 do {
1604 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1605 } while (gintsts.b.hcintr == 0);
1606
1607 //fprintf(stderr, "Got HCINTR intr 3, GINTSTS = %08x\n", gintsts.d32);
1608
1609 /* Disable HCINTs */
1610 dwc_write_reg32(&hc_regs->hcintmsk, 0x0000);
1611
1612 /* Disable HAINTs */
1613 dwc_write_reg32(&hc_global_regs->haintmsk, 0x0000);
1614
1615 /* Read HAINT */
1616 haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
1617 //fprintf(stderr, "HAINT: %08x\n", haint.d32);
1618
1619 /* Read HCINT */
1620 hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
1621 //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
1622
1623 /* Read HCCHAR */
1624 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1625 //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
1626
1627 /* Clear HCINT */
1628 dwc_write_reg32(&hc_regs->hcint, hcint.d32);
1629
1630 /* Clear HAINT */
1631 dwc_write_reg32(&hc_global_regs->haint, haint.d32);
1632
1633 /* Clear GINTSTS */
1634 dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
1635
1636 /* Read GINTSTS */
1637 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1638 //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
1639 }
1640 #endif /* DWC_HS_ELECT_TST */
1641
1642 /** Handles hub class-specific requests. */
1643 int dwc_otg_hcd_hub_control(struct usb_hcd *hcd,
1644 u16 typeReq,
1645 u16 wValue,
1646 u16 wIndex,
1647 char *buf,
1648 u16 wLength)
1649 {
1650 int retval = 0;
1651
1652 dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
1653 dwc_otg_core_if_t *core_if = hcd_to_dwc_otg_hcd(hcd)->core_if;
1654 struct usb_hub_descriptor *desc;
1655 hprt0_data_t hprt0 = {.d32 = 0};
1656
1657 uint32_t port_status;
1658
1659 switch (typeReq) {
1660 case ClearHubFeature:
1661 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1662 "ClearHubFeature 0x%x\n", wValue);
1663 switch (wValue) {
1664 case C_HUB_LOCAL_POWER:
1665 case C_HUB_OVER_CURRENT:
1666 /* Nothing required here */
1667 break;
1668 default:
1669 retval = -EINVAL;
1670 DWC_ERROR("DWC OTG HCD - "
1671 "ClearHubFeature request %xh unknown\n", wValue);
1672 }
1673 break;
1674 case ClearPortFeature:
1675 if (!wIndex || wIndex > 1)
1676 goto error;
1677
1678 switch (wValue) {
1679 case USB_PORT_FEAT_ENABLE:
1680 DWC_DEBUGPL(DBG_ANY, "DWC OTG HCD HUB CONTROL - "
1681 "ClearPortFeature USB_PORT_FEAT_ENABLE\n");
1682 hprt0.d32 = dwc_otg_read_hprt0(core_if);
1683 hprt0.b.prtena = 1;
1684 dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
1685 break;
1686 case USB_PORT_FEAT_SUSPEND:
1687 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1688 "ClearPortFeature USB_PORT_FEAT_SUSPEND\n");
1689 hprt0.d32 = dwc_otg_read_hprt0(core_if);
1690 hprt0.b.prtres = 1;
1691 dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
1692 /* Clear Resume bit */
1693 mdelay(100);
1694 hprt0.b.prtres = 0;
1695 dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
1696 break;
1697 case USB_PORT_FEAT_POWER:
1698 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1699 "ClearPortFeature USB_PORT_FEAT_POWER\n");
1700 hprt0.d32 = dwc_otg_read_hprt0(core_if);
1701 hprt0.b.prtpwr = 0;
1702 dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
1703 break;
1704 case USB_PORT_FEAT_INDICATOR:
1705 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1706 "ClearPortFeature USB_PORT_FEAT_INDICATOR\n");
1707 /* Port inidicator not supported */
1708 break;
1709 case USB_PORT_FEAT_C_CONNECTION:
1710 /* Clears drivers internal connect status change
1711 * flag */
1712 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1713 "ClearPortFeature USB_PORT_FEAT_C_CONNECTION\n");
1714 dwc_otg_hcd->flags.b.port_connect_status_change = 0;
1715 break;
1716 case USB_PORT_FEAT_C_RESET:
1717 /* Clears the driver's internal Port Reset Change
1718 * flag */
1719 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1720 "ClearPortFeature USB_PORT_FEAT_C_RESET\n");
1721 dwc_otg_hcd->flags.b.port_reset_change = 0;
1722 break;
1723 case USB_PORT_FEAT_C_ENABLE:
1724 /* Clears the driver's internal Port
1725 * Enable/Disable Change flag */
1726 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1727 "ClearPortFeature USB_PORT_FEAT_C_ENABLE\n");
1728 dwc_otg_hcd->flags.b.port_enable_change = 0;
1729 break;
1730 case USB_PORT_FEAT_C_SUSPEND:
1731 /* Clears the driver's internal Port Suspend
1732 * Change flag, which is set when resume signaling on
1733 * the host port is complete */
1734 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1735 "ClearPortFeature USB_PORT_FEAT_C_SUSPEND\n");
1736 dwc_otg_hcd->flags.b.port_suspend_change = 0;
1737 break;
1738 case USB_PORT_FEAT_C_OVER_CURRENT:
1739 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1740 "ClearPortFeature USB_PORT_FEAT_C_OVER_CURRENT\n");
1741 dwc_otg_hcd->flags.b.port_over_current_change = 0;
1742 break;
1743 default:
1744 retval = -EINVAL;
1745 DWC_ERROR("DWC OTG HCD - "
1746 "ClearPortFeature request %xh "
1747 "unknown or unsupported\n", wValue);
1748 }
1749 break;
1750 case GetHubDescriptor:
1751 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1752 "GetHubDescriptor\n");
1753 desc = (struct usb_hub_descriptor *)buf;
1754 desc->bDescLength = 9;
1755 desc->bDescriptorType = 0x29;
1756 desc->bNbrPorts = 1;
1757 desc->wHubCharacteristics = 0x08;
1758 desc->bPwrOn2PwrGood = 1;
1759 desc->bHubContrCurrent = 0;
1760 desc->u.hs.DeviceRemovable[0] = 0;
1761 desc->u.hs.DeviceRemovable[1] = 0xff;
1762 break;
1763 case GetHubStatus:
1764 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1765 "GetHubStatus\n");
1766 memset(buf, 0, 4);
1767 break;
1768 case GetPortStatus:
1769 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1770 "GetPortStatus\n");
1771
1772 if (!wIndex || wIndex > 1)
1773 goto error;
1774
1775 port_status = 0;
1776
1777 if (dwc_otg_hcd->flags.b.port_connect_status_change)
1778 port_status |= (1 << USB_PORT_FEAT_C_CONNECTION);
1779
1780 if (dwc_otg_hcd->flags.b.port_enable_change)
1781 port_status |= (1 << USB_PORT_FEAT_C_ENABLE);
1782
1783 if (dwc_otg_hcd->flags.b.port_suspend_change)
1784 port_status |= (1 << USB_PORT_FEAT_C_SUSPEND);
1785
1786 if (dwc_otg_hcd->flags.b.port_reset_change)
1787 port_status |= (1 << USB_PORT_FEAT_C_RESET);
1788
1789 if (dwc_otg_hcd->flags.b.port_over_current_change) {
1790 DWC_ERROR("Device Not Supported\n");
1791 port_status |= (1 << USB_PORT_FEAT_C_OVER_CURRENT);
1792 }
1793
1794 if (!dwc_otg_hcd->flags.b.port_connect_status) {
1795 /*
1796 * The port is disconnected, which means the core is
1797 * either in device mode or it soon will be. Just
1798 * return 0's for the remainder of the port status
1799 * since the port register can't be read if the core
1800 * is in device mode.
1801 */
1802 *((__le32 *) buf) = cpu_to_le32(port_status);
1803 break;
1804 }
1805
1806 hprt0.d32 = dwc_read_reg32(core_if->host_if->hprt0);
1807 DWC_DEBUGPL(DBG_HCDV, " HPRT0: 0x%08x\n", hprt0.d32);
1808
1809 if (hprt0.b.prtconnsts)
1810 port_status |= (1 << USB_PORT_FEAT_CONNECTION);
1811
1812 if (hprt0.b.prtena)
1813 port_status |= (1 << USB_PORT_FEAT_ENABLE);
1814
1815 if (hprt0.b.prtsusp)
1816 port_status |= (1 << USB_PORT_FEAT_SUSPEND);
1817
1818 if (hprt0.b.prtovrcurract)
1819 port_status |= (1 << USB_PORT_FEAT_OVER_CURRENT);
1820
1821 if (hprt0.b.prtrst)
1822 port_status |= (1 << USB_PORT_FEAT_RESET);
1823
1824 if (hprt0.b.prtpwr)
1825 port_status |= (1 << USB_PORT_FEAT_POWER);
1826
1827 if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_HIGH_SPEED)
1828 port_status |= (USB_PORT_STAT_HIGH_SPEED);
1829 else if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_LOW_SPEED)
1830 port_status |= (USB_PORT_STAT_LOW_SPEED);
1831
1832 if (hprt0.b.prttstctl)
1833 port_status |= (1 << USB_PORT_FEAT_TEST);
1834
1835 /* USB_PORT_FEAT_INDICATOR unsupported always 0 */
1836
1837 *((__le32 *) buf) = cpu_to_le32(port_status);
1838
1839 break;
1840 case SetHubFeature:
1841 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1842 "SetHubFeature\n");
1843 /* No HUB features supported */
1844 break;
1845 case SetPortFeature:
1846 if (wValue != USB_PORT_FEAT_TEST && (!wIndex || wIndex > 1))
1847 goto error;
1848
1849 if (!dwc_otg_hcd->flags.b.port_connect_status) {
1850 /*
1851 * The port is disconnected, which means the core is
1852 * either in device mode or it soon will be. Just
1853 * return without doing anything since the port
1854 * register can't be written if the core is in device
1855 * mode.
1856 */
1857 break;
1858 }
1859
1860 switch (wValue) {
1861 case USB_PORT_FEAT_SUSPEND:
1862 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1863 "SetPortFeature - USB_PORT_FEAT_SUSPEND\n");
1864 if (hcd->self.otg_port == wIndex &&
1865 hcd->self.b_hnp_enable) {
1866 gotgctl_data_t gotgctl = {.d32=0};
1867 gotgctl.b.hstsethnpen = 1;
1868 dwc_modify_reg32(&core_if->core_global_regs->gotgctl,
1869 0, gotgctl.d32);
1870 core_if->op_state = A_SUSPEND;
1871 }
1872 hprt0.d32 = dwc_otg_read_hprt0(core_if);
1873 hprt0.b.prtsusp = 1;
1874 dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
1875 //DWC_PRINT("SUSPEND: HPRT0=%0x\n", hprt0.d32);
1876 /* Suspend the Phy Clock */
1877 {
1878 pcgcctl_data_t pcgcctl = {.d32=0};
1879 pcgcctl.b.stoppclk = 1;
1880 dwc_write_reg32(core_if->pcgcctl, pcgcctl.d32);
1881 }
1882
1883 /* For HNP the bus must be suspended for at least 200ms. */
1884 if (hcd->self.b_hnp_enable) {
1885 mdelay(200);
1886 //DWC_PRINT("SUSPEND: wait complete! (%d)\n", _hcd->state);
1887 }
1888 break;
1889 case USB_PORT_FEAT_POWER:
1890 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1891 "SetPortFeature - USB_PORT_FEAT_POWER\n");
1892 hprt0.d32 = dwc_otg_read_hprt0(core_if);
1893 hprt0.b.prtpwr = 1;
1894 dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
1895 break;
1896 case USB_PORT_FEAT_RESET:
1897 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1898 "SetPortFeature - USB_PORT_FEAT_RESET\n");
1899 hprt0.d32 = dwc_otg_read_hprt0(core_if);
1900 /* When B-Host the Port reset bit is set in
1901 * the Start HCD Callback function, so that
1902 * the reset is started within 1ms of the HNP
1903 * success interrupt. */
1904 if (!hcd->self.is_b_host) {
1905 hprt0.b.prtrst = 1;
1906 dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
1907 }
1908 /* Clear reset bit in 10ms (FS/LS) or 50ms (HS) */
1909 MDELAY(60);
1910 hprt0.b.prtrst = 0;
1911 dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
1912 break;
1913
1914 #ifdef DWC_HS_ELECT_TST
1915 case USB_PORT_FEAT_TEST:
1916 {
1917 uint32_t t;
1918 gintmsk_data_t gintmsk;
1919
1920 t = (wIndex >> 8); /* MSB wIndex USB */
1921 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1922 "SetPortFeature - USB_PORT_FEAT_TEST %d\n", t);
1923 warn("USB_PORT_FEAT_TEST %d\n", t);
1924 if (t < 6) {
1925 hprt0.d32 = dwc_otg_read_hprt0(core_if);
1926 hprt0.b.prttstctl = t;
1927 dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
1928 } else {
1929 /* Setup global vars with reg addresses (quick and
1930 * dirty hack, should be cleaned up)
1931 */
1932 global_regs = core_if->core_global_regs;
1933 hc_global_regs = core_if->host_if->host_global_regs;
1934 hc_regs = (dwc_otg_hc_regs_t *)((char *)global_regs + 0x500);
1935 data_fifo = (uint32_t *)((char *)global_regs + 0x1000);
1936
1937 if (t == 6) { /* HS_HOST_PORT_SUSPEND_RESUME */
1938 /* Save current interrupt mask */
1939 gintmsk.d32 = dwc_read_reg32(&global_regs->gintmsk);
1940
1941 /* Disable all interrupts while we muck with
1942 * the hardware directly
1943 */
1944 dwc_write_reg32(&global_regs->gintmsk, 0);
1945
1946 /* 15 second delay per the test spec */
1947 mdelay(15000);
1948
1949 /* Drive suspend on the root port */
1950 hprt0.d32 = dwc_otg_read_hprt0(core_if);
1951 hprt0.b.prtsusp = 1;
1952 hprt0.b.prtres = 0;
1953 dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
1954
1955 /* 15 second delay per the test spec */
1956 mdelay(15000);
1957
1958 /* Drive resume on the root port */
1959 hprt0.d32 = dwc_otg_read_hprt0(core_if);
1960 hprt0.b.prtsusp = 0;
1961 hprt0.b.prtres = 1;
1962 dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
1963 mdelay(100);
1964
1965 /* Clear the resume bit */
1966 hprt0.b.prtres = 0;
1967 dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
1968
1969 /* Restore interrupts */
1970 dwc_write_reg32(&global_regs->gintmsk, gintmsk.d32);
1971 } else if (t == 7) { /* SINGLE_STEP_GET_DEVICE_DESCRIPTOR setup */
1972 /* Save current interrupt mask */
1973 gintmsk.d32 = dwc_read_reg32(&global_regs->gintmsk);
1974
1975 /* Disable all interrupts while we muck with
1976 * the hardware directly
1977 */
1978 dwc_write_reg32(&global_regs->gintmsk, 0);
1979
1980 /* 15 second delay per the test spec */
1981 mdelay(15000);
1982
1983 /* Send the Setup packet */
1984 do_setup();
1985
1986 /* 15 second delay so nothing else happens for awhile */
1987 mdelay(15000);
1988
1989 /* Restore interrupts */
1990 dwc_write_reg32(&global_regs->gintmsk, gintmsk.d32);
1991 } else if (t == 8) { /* SINGLE_STEP_GET_DEVICE_DESCRIPTOR execute */
1992 /* Save current interrupt mask */
1993 gintmsk.d32 = dwc_read_reg32(&global_regs->gintmsk);
1994
1995 /* Disable all interrupts while we muck with
1996 * the hardware directly
1997 */
1998 dwc_write_reg32(&global_regs->gintmsk, 0);
1999
2000 /* Send the Setup packet */
2001 do_setup();
2002
2003 /* 15 second delay so nothing else happens for awhile */
2004 mdelay(15000);
2005
2006 /* Send the In and Ack packets */
2007 do_in_ack();
2008
2009 /* 15 second delay so nothing else happens for awhile */
2010 mdelay(15000);
2011
2012 /* Restore interrupts */
2013 dwc_write_reg32(&global_regs->gintmsk, gintmsk.d32);
2014 }
2015 }
2016 break;
2017 }
2018 #endif /* DWC_HS_ELECT_TST */
2019
2020 case USB_PORT_FEAT_INDICATOR:
2021 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
2022 "SetPortFeature - USB_PORT_FEAT_INDICATOR\n");
2023 /* Not supported */
2024 break;
2025 default:
2026 retval = -EINVAL;
2027 DWC_ERROR("DWC OTG HCD - "
2028 "SetPortFeature request %xh "
2029 "unknown or unsupported\n", wValue);
2030 break;
2031 }
2032 break;
2033 default:
2034 error:
2035 retval = -EINVAL;
2036 DWC_WARN("DWC OTG HCD - "
2037 "Unknown hub control request type or invalid typeReq: %xh wIndex: %xh wValue: %xh\n",
2038 typeReq, wIndex, wValue);
2039 break;
2040 }
2041
2042 return retval;
2043 }
2044
2045 /**
2046 * Assigns transactions from a QTD to a free host channel and initializes the
2047 * host channel to perform the transactions. The host channel is removed from
2048 * the free list.
2049 *
2050 * @param hcd The HCD state structure.
2051 * @param qh Transactions from the first QTD for this QH are selected and
2052 * assigned to a free host channel.
2053 */
2054 static void assign_and_init_hc(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh)
2055 {
2056 dwc_hc_t *hc;
2057 dwc_otg_qtd_t *qtd;
2058 struct urb *urb;
2059
2060 DWC_DEBUGPL(DBG_HCD_FLOOD, "%s(%p,%p)\n", __func__, hcd, qh);
2061 hc = list_entry(hcd->free_hc_list.next, dwc_hc_t, hc_list_entry);
2062
2063 qtd = list_entry(qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry);
2064 urb = qtd->urb;
2065
2066 if (!urb){
2067 return;
2068 }
2069
2070 /* Remove the host channel from the free list. */
2071 list_del_init(&hc->hc_list_entry);
2072
2073 qh->channel = hc;
2074 qh->qtd_in_process = qtd;
2075
2076 /*
2077 * Use usb_pipedevice to determine device address. This address is
2078 * 0 before the SET_ADDRESS command and the correct address afterward.
2079 */
2080 hc->dev_addr = usb_pipedevice(urb->pipe);
2081 hc->ep_num = usb_pipeendpoint(urb->pipe);
2082
2083 if (urb->dev->speed == USB_SPEED_LOW) {
2084 hc->speed = DWC_OTG_EP_SPEED_LOW;
2085 } else if (urb->dev->speed == USB_SPEED_FULL) {
2086 hc->speed = DWC_OTG_EP_SPEED_FULL;
2087 } else {
2088 hc->speed = DWC_OTG_EP_SPEED_HIGH;
2089 }
2090
2091 hc->max_packet = dwc_max_packet(qh->maxp);
2092
2093 hc->xfer_started = 0;
2094 hc->halt_status = DWC_OTG_HC_XFER_NO_HALT_STATUS;
2095 hc->error_state = (qtd->error_count > 0);
2096 hc->halt_on_queue = 0;
2097 hc->halt_pending = 0;
2098 hc->requests = 0;
2099
2100 /*
2101 * The following values may be modified in the transfer type section
2102 * below. The xfer_len value may be reduced when the transfer is
2103 * started to accommodate the max widths of the XferSize and PktCnt
2104 * fields in the HCTSIZn register.
2105 */
2106 hc->do_ping = qh->ping_state;
2107 hc->ep_is_in = (usb_pipein(urb->pipe) != 0);
2108 hc->data_pid_start = qh->data_toggle;
2109 hc->multi_count = 1;
2110
2111 if (hcd->core_if->dma_enable) {
2112 hc->xfer_buff = (uint8_t *)urb->transfer_dma + urb->actual_length;
2113 } else {
2114 hc->xfer_buff = (uint8_t *)urb->transfer_buffer + urb->actual_length;
2115 }
2116 hc->xfer_len = urb->transfer_buffer_length - urb->actual_length;
2117 hc->xfer_count = 0;
2118
2119 /*
2120 * Set the split attributes
2121 */
2122 hc->do_split = 0;
2123 if (qh->do_split) {
2124 hc->do_split = 1;
2125 hc->xact_pos = qtd->isoc_split_pos;
2126 hc->complete_split = qtd->complete_split;
2127 hc->hub_addr = urb->dev->tt->hub->devnum;
2128 hc->port_addr = urb->dev->ttport;
2129 }
2130
2131 switch (usb_pipetype(urb->pipe)) {
2132 case PIPE_CONTROL:
2133 hc->ep_type = DWC_OTG_EP_TYPE_CONTROL;
2134 switch (qtd->control_phase) {
2135 case DWC_OTG_CONTROL_SETUP:
2136 DWC_DEBUGPL(DBG_HCDV, " Control setup transaction\n");
2137 hc->do_ping = 0;
2138 hc->ep_is_in = 0;
2139 hc->data_pid_start = DWC_OTG_HC_PID_SETUP;
2140 if (hcd->core_if->dma_enable) {
2141 hc->xfer_buff = (uint8_t *)urb->setup_dma;
2142 } else {
2143 hc->xfer_buff = (uint8_t *)urb->setup_packet;
2144 }
2145 hc->xfer_len = 8;
2146 break;
2147 case DWC_OTG_CONTROL_DATA:
2148 DWC_DEBUGPL(DBG_HCDV, " Control data transaction\n");
2149 hc->data_pid_start = qtd->data_toggle;
2150 break;
2151 case DWC_OTG_CONTROL_STATUS:
2152 /*
2153 * Direction is opposite of data direction or IN if no
2154 * data.
2155 */
2156 DWC_DEBUGPL(DBG_HCDV, " Control status transaction\n");
2157 if (urb->transfer_buffer_length == 0) {
2158 hc->ep_is_in = 1;
2159 } else {
2160 hc->ep_is_in = (usb_pipein(urb->pipe) != USB_DIR_IN);
2161 }
2162 if (hc->ep_is_in) {
2163 hc->do_ping = 0;
2164 }
2165 hc->data_pid_start = DWC_OTG_HC_PID_DATA1;
2166 hc->xfer_len = 0;
2167 if (hcd->core_if->dma_enable) {
2168 hc->xfer_buff = (uint8_t *)hcd->status_buf_dma;
2169 } else {
2170 hc->xfer_buff = (uint8_t *)hcd->status_buf;
2171 }
2172 break;
2173 }
2174 break;
2175 case PIPE_BULK:
2176 hc->ep_type = DWC_OTG_EP_TYPE_BULK;
2177 break;
2178 case PIPE_INTERRUPT:
2179 hc->ep_type = DWC_OTG_EP_TYPE_INTR;
2180 break;
2181 case PIPE_ISOCHRONOUS:
2182 {
2183 struct usb_iso_packet_descriptor *frame_desc;
2184 frame_desc = &urb->iso_frame_desc[qtd->isoc_frame_index];
2185 hc->ep_type = DWC_OTG_EP_TYPE_ISOC;
2186 if (hcd->core_if->dma_enable) {
2187 hc->xfer_buff = (uint8_t *)urb->transfer_dma;
2188 } else {
2189 hc->xfer_buff = (uint8_t *)urb->transfer_buffer;
2190 }
2191 hc->xfer_buff += frame_desc->offset + qtd->isoc_split_offset;
2192 hc->xfer_len = frame_desc->length - qtd->isoc_split_offset;
2193
2194 if (hc->xact_pos == DWC_HCSPLIT_XACTPOS_ALL) {
2195 if (hc->xfer_len <= 188) {
2196 hc->xact_pos = DWC_HCSPLIT_XACTPOS_ALL;
2197 }
2198 else {
2199 hc->xact_pos = DWC_HCSPLIT_XACTPOS_BEGIN;
2200 }
2201 }
2202 }
2203 break;
2204 }
2205
2206 if (hc->ep_type == DWC_OTG_EP_TYPE_INTR ||
2207 hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
2208 /*
2209 * This value may be modified when the transfer is started to
2210 * reflect the actual transfer length.
2211 */
2212 hc->multi_count = dwc_hb_mult(qh->maxp);
2213 }
2214
2215 dwc_otg_hc_init(hcd->core_if, hc);
2216 hc->qh = qh;
2217 }
2218
2219 /**
2220 * This function selects transactions from the HCD transfer schedule and
2221 * assigns them to available host channels. It is called from HCD interrupt
2222 * handler functions.
2223 *
2224 * @param hcd The HCD state structure.
2225 *
2226 * @return The types of new transactions that were assigned to host channels.
2227 */
2228 dwc_otg_transaction_type_e dwc_otg_hcd_select_transactions(dwc_otg_hcd_t *hcd)
2229 {
2230 struct list_head *qh_ptr;
2231 dwc_otg_qh_t *qh = NULL;
2232 int num_channels;
2233 dwc_otg_transaction_type_e ret_val = DWC_OTG_TRANSACTION_NONE;
2234 uint16_t cur_frame = dwc_otg_hcd_get_frame_number(dwc_otg_hcd_to_hcd(hcd));
2235 unsigned long flags;
2236 int include_nakd, channels_full;
2237 /* This condition has once been observed, but the cause was
2238 * never determined. Check for it here, to collect debug data if
2239 * it occurs again. */
2240 WARN_ON_ONCE(hcd->non_periodic_channels < 0);
2241 check_nakking(hcd, __FUNCTION__, "start");
2242
2243 #ifdef DEBUG_SOF
2244 DWC_DEBUGPL(DBG_HCD, " Select Transactions\n");
2245 #endif
2246
2247 SPIN_LOCK_IRQSAVE(&hcd->lock, flags);
2248 /* Process entries in the periodic ready list. */
2249 qh_ptr = hcd->periodic_sched_ready.next;
2250 while (qh_ptr != &hcd->periodic_sched_ready &&
2251 !list_empty(&hcd->free_hc_list)) {
2252
2253 qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list_entry);
2254 assign_and_init_hc(hcd, qh);
2255
2256 /*
2257 * Move the QH from the periodic ready schedule to the
2258 * periodic assigned schedule.
2259 */
2260 qh_ptr = qh_ptr->next;
2261 list_move(&qh->qh_list_entry, &hcd->periodic_sched_assigned);
2262
2263 ret_val = DWC_OTG_TRANSACTION_PERIODIC;
2264 }
2265
2266 /*
2267 * Process entries in the inactive portion of the non-periodic
2268 * schedule. Some free host channels may not be used if they are
2269 * reserved for periodic transfers.
2270 */
2271 num_channels = hcd->core_if->core_params->host_channels;
2272
2273 /* Go over the queue twice: Once while not including nak'd
2274 * entries, one while including them. This is so a retransmit of
2275 * an entry that has received a nak is scheduled only after all
2276 * new entries.
2277 */
2278 channels_full = 0;
2279 for (include_nakd = 0; include_nakd < 2 && !channels_full; ++include_nakd) {
2280 qh_ptr = hcd->non_periodic_sched_inactive.next;
2281 while (qh_ptr != &hcd->non_periodic_sched_inactive) {
2282 qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list_entry);
2283 qh_ptr = qh_ptr->next;
2284
2285 /* If a nak'd frame is in the queue for 100ms, forget
2286 * about its nak status, to prevent the situation where
2287 * a nak'd frame never gets resubmitted because there
2288 * are continously non-nakking tranfsfers available.
2289 */
2290 if (qh->nak_frame != 0xffff &&
2291 dwc_frame_num_gt(cur_frame, qh->nak_frame + 800))
2292 qh->nak_frame = 0xffff;
2293
2294 /* In the first pass, ignore NAK'd retransmit
2295 * alltogether, to give them lower priority. */
2296 if (!include_nakd && qh->nak_frame != 0xffff)
2297 continue;
2298
2299 /*
2300 * Check to see if this is a NAK'd retransmit, in which case ignore for retransmission
2301 * we hold off on bulk retransmissions to reduce NAK interrupt overhead for
2302 * cheeky devices that just hold off using NAKs
2303 */
2304 if (dwc_full_frame_num(qh->nak_frame) == dwc_full_frame_num(dwc_otg_hcd_get_frame_number(dwc_otg_hcd_to_hcd(hcd))))
2305 continue;
2306
2307 /* Ok, we found a candidate for scheduling. Is there a
2308 * free channel? */
2309 if (hcd->non_periodic_channels >=
2310 num_channels - hcd->periodic_channels ||
2311 list_empty(&hcd->free_hc_list)) {
2312 channels_full = 1;
2313 break;
2314 }
2315
2316 /* When retrying a NAK'd transfer, we give it a fair
2317 * chance of completing again. */
2318 qh->nak_frame = 0xffff;
2319 assign_and_init_hc(hcd, qh);
2320
2321 /*
2322 * Move the QH from the non-periodic inactive schedule to the
2323 * non-periodic active schedule.
2324 */
2325 list_move(&qh->qh_list_entry, &hcd->non_periodic_sched_active);
2326
2327 if (ret_val == DWC_OTG_TRANSACTION_NONE) {
2328 ret_val = DWC_OTG_TRANSACTION_NON_PERIODIC;
2329 } else {
2330 ret_val = DWC_OTG_TRANSACTION_ALL;
2331 }
2332
2333 hcd->non_periodic_channels++;
2334 }
2335 if (hcd->core_if->dma_enable && channels_full &&
2336 hcd->periodic_channels + hcd->nakking_channels >= num_channels) {
2337 /* There are items queued, but all channels are either
2338 * reserved for periodic or have received NAKs. This
2339 * means that it could take an indefinite amount of time
2340 * before a channel is actually freed (since in DMA
2341 * mode, the hardware takes care of retries), so we take
2342 * action here by forcing a nakking channel to halt to
2343 * give other transfers a chance to run. */
2344 dwc_otg_qtd_t *qtd = list_entry(qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry);
2345 struct urb *urb = qtd->urb;
2346 dwc_hc_t *hc = dwc_otg_halt_nakking_channel(hcd);
2347
2348 if (hc)
2349 DWC_DEBUGPL(DBG_HCD "Out of Host Channels for non-periodic transfer - Halting channel %d (dev %d ep%d%s) to service qh %p (dev %d ep%d%s)\n", hc->hc_num, hc->dev_addr, hc->ep_num, (hc->ep_is_in ? "in" : "out"), qh, usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe), (usb_pipein(urb->pipe) != 0) ? "in" : "out");
2350
2351 }
2352 }
2353
2354 SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags);
2355
2356 return ret_val;
2357 }
2358
2359 /**
2360 * Halt a bulk channel that is blocking on NAKs to free up space.
2361 *
2362 * This will decrement hcd->nakking_channels immediately, but
2363 * hcd->non_periodic_channels is not decremented until the channel is
2364 * actually halted.
2365 *
2366 * Returns the halted channel.
2367 */
2368 dwc_hc_t *dwc_otg_halt_nakking_channel(dwc_otg_hcd_t *hcd) {
2369 int num_channels, i;
2370 uint16_t cur_frame;
2371
2372 cur_frame = dwc_otg_hcd_get_frame_number(dwc_otg_hcd_to_hcd(hcd));
2373 num_channels = hcd->core_if->core_params->host_channels;
2374
2375 for (i = 0; i < num_channels; i++) {
2376 int channel = (hcd->last_channel_halted + 1 + i) % num_channels;
2377 dwc_hc_t *hc = hcd->hc_ptr_array[channel];
2378 if (hc->xfer_started
2379 && !hc->halt_on_queue
2380 && !hc->halt_pending
2381 && hc->qh->nak_frame != 0xffff) {
2382 dwc_otg_hc_halt(hcd, hc, DWC_OTG_HC_XFER_NAK);
2383 /* Store the last channel halted to
2384 * fairly rotate the channel to halt.
2385 * This prevent the scenario where there
2386 * are three blocking endpoints and only
2387 * two free host channels, where the
2388 * blocking endpoint that gets hc 3 will
2389 * never be halted, while the other two
2390 * endpoints will be fighting over the
2391 * other host channel. */
2392 hcd->last_channel_halted = channel;
2393 /* Update nak_frame, so this frame is
2394 * kept at low priority for a period of
2395 * time starting now. */
2396 hc->qh->nak_frame = cur_frame;
2397 return hc;
2398 }
2399 }
2400 dwc_otg_hcd_dump_state(hcd);
2401 return NULL;
2402 }
2403
2404 /**
2405 * Attempts to queue a single transaction request for a host channel
2406 * associated with either a periodic or non-periodic transfer. This function
2407 * assumes that there is space available in the appropriate request queue. For
2408 * an OUT transfer or SETUP transaction in Slave mode, it checks whether space
2409 * is available in the appropriate Tx FIFO.
2410 *
2411 * @param hcd The HCD state structure.
2412 * @param hc Host channel descriptor associated with either a periodic or
2413 * non-periodic transfer.
2414 * @param fifo_dwords_avail Number of DWORDs available in the periodic Tx
2415 * FIFO for periodic transfers or the non-periodic Tx FIFO for non-periodic
2416 * transfers.
2417 *
2418 * @return 1 if a request is queued and more requests may be needed to
2419 * complete the transfer, 0 if no more requests are required for this
2420 * transfer, -1 if there is insufficient space in the Tx FIFO.
2421 */
2422 static int queue_transaction(dwc_otg_hcd_t *hcd,
2423 dwc_hc_t *hc,
2424 uint16_t fifo_dwords_avail)
2425 {
2426 int retval;
2427
2428 if (hcd->core_if->dma_enable) {
2429 if (!hc->xfer_started) {
2430 dwc_otg_hc_start_transfer(hcd->core_if, hc);
2431 hc->qh->ping_state = 0;
2432 }
2433 retval = 0;
2434 } else if (hc->halt_pending) {
2435 /* Don't queue a request if the channel has been halted. */
2436 retval = 0;
2437 } else if (hc->halt_on_queue) {
2438 dwc_otg_hc_halt(hcd, hc, hc->halt_status);
2439 retval = 0;
2440 } else if (hc->do_ping) {
2441 if (!hc->xfer_started) {
2442 dwc_otg_hc_start_transfer(hcd->core_if, hc);
2443 }
2444 retval = 0;
2445 } else if (!hc->ep_is_in ||
2446 hc->data_pid_start == DWC_OTG_HC_PID_SETUP) {
2447 if ((fifo_dwords_avail * 4) >= hc->max_packet) {
2448 if (!hc->xfer_started) {
2449 dwc_otg_hc_start_transfer(hcd->core_if, hc);
2450 retval = 1;
2451 } else {
2452 retval = dwc_otg_hc_continue_transfer(hcd->core_if, hc);
2453 }
2454 } else {
2455 retval = -1;
2456 }
2457 } else {
2458 if (!hc->xfer_started) {
2459 dwc_otg_hc_start_transfer(hcd->core_if, hc);
2460 retval = 1;
2461 } else {
2462 retval = dwc_otg_hc_continue_transfer(hcd->core_if, hc);
2463 }
2464 }
2465
2466 return retval;
2467 }
2468
2469 /**
2470 * Processes active non-periodic channels and queues transactions for these
2471 * channels to the DWC_otg controller. After queueing transactions, the NP Tx
2472 * FIFO Empty interrupt is enabled if there are more transactions to queue as
2473 * NP Tx FIFO or request queue space becomes available. Otherwise, the NP Tx
2474 * FIFO Empty interrupt is disabled.
2475 */
2476 static void process_non_periodic_channels(dwc_otg_hcd_t *hcd)
2477 {
2478 gnptxsts_data_t tx_status;
2479 struct list_head *orig_qh_ptr;
2480 dwc_otg_qh_t *qh;
2481 int status;
2482 int no_queue_space = 0;
2483 int no_fifo_space = 0;
2484 int more_to_do = 0;
2485
2486 dwc_otg_core_global_regs_t *global_regs = hcd->core_if->core_global_regs;
2487
2488 DWC_DEBUGPL(DBG_HCDV, "Queue non-periodic transactions\n");
2489 #ifdef DEBUG
2490 tx_status.d32 = dwc_read_reg32(&global_regs->gnptxsts);
2491 DWC_DEBUGPL(DBG_HCDV, " NP Tx Req Queue Space Avail (before queue): %d\n",
2492 tx_status.b.nptxqspcavail);
2493 DWC_DEBUGPL(DBG_HCDV, " NP Tx FIFO Space Avail (before queue): %d\n",
2494 tx_status.b.nptxfspcavail);
2495 #endif
2496 /*
2497 * Keep track of the starting point. Skip over the start-of-list
2498 * entry.
2499 */
2500 if (hcd->non_periodic_qh_ptr == &hcd->non_periodic_sched_active) {
2501 hcd->non_periodic_qh_ptr = hcd->non_periodic_qh_ptr->next;
2502 }
2503 orig_qh_ptr = hcd->non_periodic_qh_ptr;
2504
2505 /*
2506 * Process once through the active list or until no more space is
2507 * available in the request queue or the Tx FIFO.
2508 */
2509 do {
2510 tx_status.d32 = dwc_read_reg32(&global_regs->gnptxsts);
2511 if (!hcd->core_if->dma_enable && tx_status.b.nptxqspcavail == 0) {
2512 no_queue_space = 1;
2513 break;
2514 }
2515
2516 qh = list_entry(hcd->non_periodic_qh_ptr, dwc_otg_qh_t, qh_list_entry);
2517 status = queue_transaction(hcd, qh->channel, tx_status.b.nptxfspcavail);
2518
2519 if (status > 0) {
2520 more_to_do = 1;
2521 } else if (status < 0) {
2522 no_fifo_space = 1;
2523 break;
2524 }
2525
2526 /* Advance to next QH, skipping start-of-list entry. */
2527 hcd->non_periodic_qh_ptr = hcd->non_periodic_qh_ptr->next;
2528 if (hcd->non_periodic_qh_ptr == &hcd->non_periodic_sched_active) {
2529 hcd->non_periodic_qh_ptr = hcd->non_periodic_qh_ptr->next;
2530 }
2531
2532 } while (hcd->non_periodic_qh_ptr != orig_qh_ptr);
2533
2534 if (!hcd->core_if->dma_enable) {
2535 gintmsk_data_t intr_mask = {.d32 = 0};
2536 intr_mask.b.nptxfempty = 1;
2537
2538 #ifdef DEBUG
2539 tx_status.d32 = dwc_read_reg32(&global_regs->gnptxsts);
2540 DWC_DEBUGPL(DBG_HCDV, " NP Tx Req Queue Space Avail (after queue): %d\n",
2541 tx_status.b.nptxqspcavail);
2542 DWC_DEBUGPL(DBG_HCDV, " NP Tx FIFO Space Avail (after queue): %d\n",
2543 tx_status.b.nptxfspcavail);
2544 #endif
2545 if (more_to_do || no_queue_space || no_fifo_space) {
2546 /*
2547 * May need to queue more transactions as the request
2548 * queue or Tx FIFO empties. Enable the non-periodic
2549 * Tx FIFO empty interrupt. (Always use the half-empty
2550 * level to ensure that new requests are loaded as
2551 * soon as possible.)
2552 */
2553 dwc_modify_reg32(&global_regs->gintmsk, 0, intr_mask.d32);
2554 } else {
2555 /*
2556 * Disable the Tx FIFO empty interrupt since there are
2557 * no more transactions that need to be queued right
2558 * now. This function is called from interrupt
2559 * handlers to queue more transactions as transfer
2560 * states change.
2561 */
2562 dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, 0);
2563 }
2564 }
2565 }
2566
2567 /**
2568 * Processes periodic channels for the next frame and queues transactions for
2569 * these channels to the DWC_otg controller. After queueing transactions, the
2570 * Periodic Tx FIFO Empty interrupt is enabled if there are more transactions
2571 * to queue as Periodic Tx FIFO or request queue space becomes available.
2572 * Otherwise, the Periodic Tx FIFO Empty interrupt is disabled.
2573 */
2574 static void process_periodic_channels(dwc_otg_hcd_t *hcd)
2575 {
2576 hptxsts_data_t tx_status;
2577 struct list_head *qh_ptr;
2578 dwc_otg_qh_t *qh;
2579 int status;
2580 int no_queue_space = 0;
2581 int no_fifo_space = 0;
2582
2583 dwc_otg_host_global_regs_t *host_regs;
2584 host_regs = hcd->core_if->host_if->host_global_regs;
2585
2586 DWC_DEBUGPL(DBG_HCD_FLOOD, "Queue periodic transactions\n");
2587 #ifdef DEBUG
2588 tx_status.d32 = dwc_read_reg32(&host_regs->hptxsts);
2589 DWC_DEBUGPL(DBG_HCD_FLOOD, " P Tx Req Queue Space Avail (before queue): %d\n",
2590 tx_status.b.ptxqspcavail);
2591 DWC_DEBUGPL(DBG_HCD_FLOOD, " P Tx FIFO Space Avail (before queue): %d\n",
2592 tx_status.b.ptxfspcavail);
2593 #endif
2594
2595 qh_ptr = hcd->periodic_sched_assigned.next;
2596 while (qh_ptr != &hcd->periodic_sched_assigned) {
2597 tx_status.d32 = dwc_read_reg32(&host_regs->hptxsts);
2598 if (tx_status.b.ptxqspcavail == 0) {
2599 no_queue_space = 1;
2600 break;
2601 }
2602
2603 qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list_entry);
2604
2605 /*
2606 * Set a flag if we're queuing high-bandwidth in slave mode.
2607 * The flag prevents any halts to get into the request queue in
2608 * the middle of multiple high-bandwidth packets getting queued.
2609 */
2610 if (!hcd->core_if->dma_enable &&
2611 qh->channel->multi_count > 1)
2612 {
2613 hcd->core_if->queuing_high_bandwidth = 1;
2614 }
2615
2616 status = queue_transaction(hcd, qh->channel, tx_status.b.ptxfspcavail);
2617 if (status < 0) {
2618 no_fifo_space = 1;
2619 break;
2620 }
2621
2622 /*
2623 * In Slave mode, stay on the current transfer until there is
2624 * nothing more to do or the high-bandwidth request count is
2625 * reached. In DMA mode, only need to queue one request. The
2626 * controller automatically handles multiple packets for
2627 * high-bandwidth transfers.
2628 */
2629 if (hcd->core_if->dma_enable || status == 0 ||
2630 qh->channel->requests == qh->channel->multi_count) {
2631 qh_ptr = qh_ptr->next;
2632 /*
2633 * Move the QH from the periodic assigned schedule to
2634 * the periodic queued schedule.
2635 */
2636 list_move(&qh->qh_list_entry, &hcd->periodic_sched_queued);
2637
2638 /* done queuing high bandwidth */
2639 hcd->core_if->queuing_high_bandwidth = 0;
2640 }
2641 }
2642
2643 if (!hcd->core_if->dma_enable) {
2644 dwc_otg_core_global_regs_t *global_regs;
2645 gintmsk_data_t intr_mask = {.d32 = 0};
2646
2647 global_regs = hcd->core_if->core_global_regs;
2648 intr_mask.b.ptxfempty = 1;
2649 #ifdef DEBUG
2650 tx_status.d32 = dwc_read_reg32(&host_regs->hptxsts);
2651 DWC_DEBUGPL(DBG_HCDV, " P Tx Req Queue Space Avail (after queue): %d\n",
2652 tx_status.b.ptxqspcavail);
2653 DWC_DEBUGPL(DBG_HCDV, " P Tx FIFO Space Avail (after queue): %d\n",
2654 tx_status.b.ptxfspcavail);
2655 #endif
2656 if (!list_empty(&hcd->periodic_sched_assigned) ||
2657 no_queue_space || no_fifo_space) {
2658 /*
2659 * May need to queue more transactions as the request
2660 * queue or Tx FIFO empties. Enable the periodic Tx
2661 * FIFO empty interrupt. (Always use the half-empty
2662 * level to ensure that new requests are loaded as
2663 * soon as possible.)
2664 */
2665 dwc_modify_reg32(&global_regs->gintmsk, 0, intr_mask.d32);
2666 } else {
2667 /*
2668 * Disable the Tx FIFO empty interrupt since there are
2669 * no more transactions that need to be queued right
2670 * now. This function is called from interrupt
2671 * handlers to queue more transactions as transfer
2672 * states change.
2673 */
2674 dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, 0);
2675 }
2676 }
2677 }
2678
2679 /**
2680 * This function processes the currently active host channels and queues
2681 * transactions for these channels to the DWC_otg controller. It is called
2682 * from HCD interrupt handler functions.
2683 *
2684 * @param hcd The HCD state structure.
2685 * @param tr_type The type(s) of transactions to queue (non-periodic,
2686 * periodic, or both).
2687 */
2688 void dwc_otg_hcd_queue_transactions(dwc_otg_hcd_t *hcd,
2689 dwc_otg_transaction_type_e tr_type)
2690 {
2691 #ifdef DEBUG_SOF
2692 DWC_DEBUGPL(DBG_HCD, "Queue Transactions\n");
2693 #endif
2694 /* Process host channels associated with periodic transfers. */
2695 if ((tr_type == DWC_OTG_TRANSACTION_PERIODIC ||
2696 tr_type == DWC_OTG_TRANSACTION_ALL) &&
2697 !list_empty(&hcd->periodic_sched_assigned)) {
2698
2699 process_periodic_channels(hcd);
2700 }
2701
2702 /* Process host channels associated with non-periodic transfers. */
2703 if (tr_type == DWC_OTG_TRANSACTION_NON_PERIODIC ||
2704 tr_type == DWC_OTG_TRANSACTION_ALL) {
2705 if (!list_empty(&hcd->non_periodic_sched_active)) {
2706 process_non_periodic_channels(hcd);
2707 } else {
2708 /*
2709 * Ensure NP Tx FIFO empty interrupt is disabled when
2710 * there are no non-periodic transfers to process.
2711 */
2712 gintmsk_data_t gintmsk = {.d32 = 0};
2713 gintmsk.b.nptxfempty = 1;
2714 dwc_modify_reg32(&hcd->core_if->core_global_regs->gintmsk,
2715 gintmsk.d32, 0);
2716 }
2717 }
2718 }
2719
2720 /**
2721 * Sets the final status of an URB and returns it to the device driver. Any
2722 * required cleanup of the URB is performed.
2723 */
2724 void dwc_otg_hcd_complete_urb(dwc_otg_hcd_t *hcd, struct urb *urb, int status)
2725 {
2726 unsigned long flags;
2727
2728 SPIN_LOCK_IRQSAVE(&hcd->lock, flags);
2729
2730 #ifdef DEBUG
2731
2732 if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
2733 DWC_PRINT("%s: urb %p, device %d, ep %d %s, status=%d\n",
2734 __func__, urb, usb_pipedevice(urb->pipe),
2735 usb_pipeendpoint(urb->pipe),
2736 usb_pipein(urb->pipe) ? "IN" : "OUT", status);
2737 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
2738 int i;
2739 for (i = 0; i < urb->number_of_packets; i++) {
2740 DWC_PRINT(" ISO Desc %d status: %d\n",
2741 i, urb->iso_frame_desc[i].status);
2742 }
2743 }
2744 }
2745 #endif
2746
2747 //if we use the aligned buffer instead of the original unaligned buffer,
2748 //for IN data, we have to move the data to the original buffer
2749 if((urb->transfer_dma==urb->aligned_transfer_dma)&&((urb->transfer_flags & URB_DIR_MASK)==URB_DIR_IN)){
2750 dma_sync_single_for_device(NULL,urb->transfer_dma,urb->actual_length,DMA_FROM_DEVICE);
2751 memcpy(urb->transfer_buffer,urb->aligned_transfer_buffer,urb->actual_length);
2752 }
2753
2754 usb_hcd_unlink_urb_from_ep(dwc_otg_hcd_to_hcd(hcd), urb);
2755 urb->status = status;
2756 urb->hcpriv = NULL;
2757 SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags);
2758 usb_hcd_giveback_urb(dwc_otg_hcd_to_hcd(hcd), urb, status);
2759
2760 }
2761
2762 /*
2763 * Returns the Queue Head for an URB.
2764 */
2765 dwc_otg_qh_t *dwc_urb_to_qh(struct urb *urb)
2766 {
2767 struct usb_host_endpoint *ep = dwc_urb_to_endpoint(urb);
2768 return (dwc_otg_qh_t *)ep->hcpriv;
2769 }
2770
2771 #ifdef DEBUG
2772 void dwc_print_setup_data(uint8_t *setup)
2773 {
2774 int i;
2775 if (CHK_DEBUG_LEVEL(DBG_HCD)){
2776 DWC_PRINT("Setup Data = MSB ");
2777 for (i = 7; i >= 0; i--) DWC_PRINT("%02x ", setup[i]);
2778 DWC_PRINT("\n");
2779 DWC_PRINT(" bmRequestType Tranfer = %s\n", (setup[0] & 0x80) ? "Device-to-Host" : "Host-to-Device");
2780 DWC_PRINT(" bmRequestType Type = ");
2781 switch ((setup[0] & 0x60) >> 5) {
2782 case 0: DWC_PRINT("Standard\n"); break;
2783 case 1: DWC_PRINT("Class\n"); break;
2784 case 2: DWC_PRINT("Vendor\n"); break;
2785 case 3: DWC_PRINT("Reserved\n"); break;
2786 }
2787 DWC_PRINT(" bmRequestType Recipient = ");
2788 switch (setup[0] & 0x1f) {
2789 case 0: DWC_PRINT("Device\n"); break;
2790 case 1: DWC_PRINT("Interface\n"); break;
2791 case 2: DWC_PRINT("Endpoint\n"); break;
2792 case 3: DWC_PRINT("Other\n"); break;
2793 default: DWC_PRINT("Reserved\n"); break;
2794 }
2795 DWC_PRINT(" bRequest = 0x%0x\n", setup[1]);
2796 DWC_PRINT(" wValue = 0x%0x\n", *((uint16_t *)&setup[2]));
2797 DWC_PRINT(" wIndex = 0x%0x\n", *((uint16_t *)&setup[4]));
2798 DWC_PRINT(" wLength = 0x%0x\n\n", *((uint16_t *)&setup[6]));
2799 }
2800 }
2801 #endif
2802
2803 void dwc_otg_hcd_dump_frrem(dwc_otg_hcd_t *hcd) {
2804 }
2805
2806 void dwc_otg_hcd_dump_state(dwc_otg_hcd_t *hcd)
2807 {
2808 #ifdef DEBUG
2809 int num_channels;
2810 int i;
2811 gnptxsts_data_t np_tx_status;
2812 hptxsts_data_t p_tx_status;
2813
2814 num_channels = hcd->core_if->core_params->host_channels;
2815 DWC_PRINT("\n");
2816 DWC_PRINT("************************************************************\n");
2817 DWC_PRINT("HCD State:\n");
2818 DWC_PRINT(" Num channels: %d\n", num_channels);
2819 for (i = 0; i < num_channels; i++) {
2820 dwc_hc_t *hc = hcd->hc_ptr_array[i];
2821 DWC_PRINT(" Channel %d: %p\n", i, hc);
2822 DWC_PRINT(" dev_addr: %d, ep_num: %d, ep_is_in: %d\n",
2823 hc->dev_addr, hc->ep_num, hc->ep_is_in);
2824 DWC_PRINT(" speed: %d\n", hc->speed);
2825 DWC_PRINT(" ep_type: %d\n", hc->ep_type);
2826 DWC_PRINT(" max_packet: %d\n", hc->max_packet);
2827 DWC_PRINT(" data_pid_start: %d\n", hc->data_pid_start);
2828 DWC_PRINT(" multi_count: %d\n", hc->multi_count);
2829 DWC_PRINT(" xfer_started: %d\n", hc->xfer_started);
2830 DWC_PRINT(" xfer_buff: %p\n", hc->xfer_buff);
2831 DWC_PRINT(" xfer_len: %d\n", hc->xfer_len);
2832 DWC_PRINT(" xfer_count: %d\n", hc->xfer_count);
2833 DWC_PRINT(" halt_on_queue: %d\n", hc->halt_on_queue);
2834 DWC_PRINT(" halt_pending: %d\n", hc->halt_pending);
2835 DWC_PRINT(" halt_status: %d\n", hc->halt_status);
2836 DWC_PRINT(" do_split: %d\n", hc->do_split);
2837 DWC_PRINT(" complete_split: %d\n", hc->complete_split);
2838 DWC_PRINT(" hub_addr: %d\n", hc->hub_addr);
2839 DWC_PRINT(" port_addr: %d\n", hc->port_addr);
2840 DWC_PRINT(" xact_pos: %d\n", hc->xact_pos);
2841 DWC_PRINT(" requests: %d\n", hc->requests);
2842 DWC_PRINT(" qh: %p\n", hc->qh);
2843 if (hc->qh)
2844 DWC_PRINT(" nak_frame: %x\n", hc->qh->nak_frame);
2845 if (hc->xfer_started) {
2846 hfnum_data_t hfnum;
2847 hcchar_data_t hcchar;
2848 hctsiz_data_t hctsiz;
2849 hcint_data_t hcint;
2850 hcintmsk_data_t hcintmsk;
2851 hfnum.d32 = dwc_read_reg32(&hcd->core_if->host_if->host_global_regs->hfnum);
2852 hcchar.d32 = dwc_read_reg32(&hcd->core_if->host_if->hc_regs[i]->hcchar);
2853 hctsiz.d32 = dwc_read_reg32(&hcd->core_if->host_if->hc_regs[i]->hctsiz);
2854 hcint.d32 = dwc_read_reg32(&hcd->core_if->host_if->hc_regs[i]->hcint);
2855 hcintmsk.d32 = dwc_read_reg32(&hcd->core_if->host_if->hc_regs[i]->hcintmsk);
2856 DWC_PRINT(" hfnum: 0x%08x\n", hfnum.d32);
2857 DWC_PRINT(" hcchar: 0x%08x\n", hcchar.d32);
2858 DWC_PRINT(" hctsiz: 0x%08x\n", hctsiz.d32);
2859 DWC_PRINT(" hcint: 0x%08x\n", hcint.d32);
2860 DWC_PRINT(" hcintmsk: 0x%08x\n", hcintmsk.d32);
2861 }
2862 if (hc->xfer_started && hc->qh && hc->qh->qtd_in_process) {
2863 dwc_otg_qtd_t *qtd;
2864 struct urb *urb;
2865 qtd = hc->qh->qtd_in_process;
2866 urb = qtd->urb;
2867 DWC_PRINT(" URB Info:\n");
2868 DWC_PRINT(" qtd: %p, urb: %p\n", qtd, urb);
2869 if (urb) {
2870 DWC_PRINT(" Dev: %d, EP: %d %s\n",
2871 usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe),
2872 usb_pipein(urb->pipe) ? "IN" : "OUT");
2873 DWC_PRINT(" Max packet size: %d\n",
2874 usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)));
2875 DWC_PRINT(" transfer_buffer: %p\n", urb->transfer_buffer);
2876 DWC_PRINT(" transfer_dma: %p\n", (void *)urb->transfer_dma);
2877 DWC_PRINT(" transfer_buffer_length: %d\n", urb->transfer_buffer_length);
2878 DWC_PRINT(" actual_length: %d\n", urb->actual_length);
2879 }
2880 }
2881 }
2882 DWC_PRINT(" non_periodic_channels: %d\n", hcd->non_periodic_channels);
2883 DWC_PRINT(" periodic_channels: %d\n", hcd->periodic_channels);
2884 DWC_PRINT(" nakking_channels: %d\n", hcd->nakking_channels);
2885 DWC_PRINT(" last_channel_halted: %d\n", hcd->last_channel_halted);
2886 DWC_PRINT(" periodic_usecs: %d\n", hcd->periodic_usecs);
2887 np_tx_status.d32 = dwc_read_reg32(&hcd->core_if->core_global_regs->gnptxsts);
2888 DWC_PRINT(" NP Tx Req Queue Space Avail: %d\n", np_tx_status.b.nptxqspcavail);
2889 DWC_PRINT(" NP Tx FIFO Space Avail: %d\n", np_tx_status.b.nptxfspcavail);
2890 p_tx_status.d32 = dwc_read_reg32(&hcd->core_if->host_if->host_global_regs->hptxsts);
2891 DWC_PRINT(" P Tx Req Queue Space Avail: %d\n", p_tx_status.b.ptxqspcavail);
2892 DWC_PRINT(" P Tx FIFO Space Avail: %d\n", p_tx_status.b.ptxfspcavail);
2893 dwc_otg_hcd_dump_frrem(hcd);
2894 dwc_otg_dump_global_registers(hcd->core_if);
2895 dwc_otg_dump_host_registers(hcd->core_if);
2896 DWC_PRINT("************************************************************\n");
2897 DWC_PRINT("\n");
2898 #endif
2899 }
2900 #endif /* DWC_DEVICE_ONLY */