cns3xxx: update to linux 3.10
[openwrt/svn-archive/archive.git] / target / linux / cns3xxx / files / drivers / usb / dwc / otg_hcd.c
1 /* ==========================================================================
2 * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_hcd.c $
3 * $Revision: #75 $
4 * $Date: 2008/07/15 $
5 * $Change: 1064940 $
6 *
7 * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
8 * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
9 * otherwise expressly agreed to in writing between Synopsys and you.
10 *
11 * The Software IS NOT an item of Licensed Software or Licensed Product under
12 * any End User Software License Agreement or Agreement for Licensed Product
13 * with Synopsys or any supplement thereto. You are permitted to use and
14 * redistribute this Software in source and binary forms, with or without
15 * modification, provided that redistributions of source code must retain this
16 * notice. You may not view, use, disclose, copy or distribute this file or
17 * any information contained herein except pursuant to this license grant from
18 * Synopsys. If you do not agree with this notice, including the disclaimer
19 * below, then you are not authorized to use the Software.
20 *
21 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
25 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
31 * DAMAGE.
32 * ========================================================================== */
33 #ifndef DWC_DEVICE_ONLY
34
35 /**
36 * @file
37 *
38 * This file contains the implementation of the HCD. In Linux, the HCD
39 * implements the hc_driver API.
40 */
41 #include <linux/kernel.h>
42 #include <linux/module.h>
43 #include <linux/moduleparam.h>
44 #include <linux/init.h>
45 #include <linux/device.h>
46 #include <linux/platform_device.h>
47 #include <linux/errno.h>
48 #include <linux/list.h>
49 #include <linux/interrupt.h>
50 #include <linux/string.h>
51 #include <linux/dma-mapping.h>
52 #include <linux/version.h>
53
54 #include "otg_driver.h"
55 #include "otg_hcd.h"
56 #include "otg_regs.h"
57
58 static const char dwc_otg_hcd_name[] = "dwc_otg_hcd";
59
60 static const struct hc_driver dwc_otg_hc_driver = {
61
62 .description = dwc_otg_hcd_name,
63 .product_desc = "DWC OTG Controller",
64 .hcd_priv_size = sizeof(dwc_otg_hcd_t),
65 .irq = dwc_otg_hcd_irq,
66 .flags = HCD_MEMORY | HCD_USB2,
67 .start = dwc_otg_hcd_start,
68 .stop = dwc_otg_hcd_stop,
69 .urb_enqueue = dwc_otg_hcd_urb_enqueue,
70 .urb_dequeue = dwc_otg_hcd_urb_dequeue,
71 .endpoint_disable = dwc_otg_hcd_endpoint_disable,
72 .get_frame_number = dwc_otg_hcd_get_frame_number,
73 .hub_status_data = dwc_otg_hcd_hub_status_data,
74 .hub_control = dwc_otg_hcd_hub_control,
75 };
76
77 /**
78 * Work queue function for starting the HCD when A-Cable is connected.
79 * The dwc_otg_hcd_start() must be called in a process context.
80 */
81 static void hcd_start_func(struct work_struct *_work)
82 {
83 struct delayed_work *dw = container_of(_work, struct delayed_work, work);
84 struct dwc_otg_hcd *otg_hcd = container_of(dw, struct dwc_otg_hcd, start_work);
85 struct usb_hcd *usb_hcd = container_of((void *)otg_hcd, struct usb_hcd, hcd_priv);
86 DWC_DEBUGPL(DBG_HCDV, "%s() %p\n", __func__, usb_hcd);
87 if (usb_hcd) {
88 dwc_otg_hcd_start(usb_hcd);
89 }
90 }
91
92 /**
93 * HCD Callback function for starting the HCD when A-Cable is
94 * connected.
95 *
96 * @param p void pointer to the <code>struct usb_hcd</code>
97 */
98 static int32_t dwc_otg_hcd_start_cb(void *p)
99 {
100 dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(p);
101 dwc_otg_core_if_t *core_if = dwc_otg_hcd->core_if;
102 hprt0_data_t hprt0;
103
104 if (core_if->op_state == B_HOST) {
105 /*
106 * Reset the port. During a HNP mode switch the reset
107 * needs to occur within 1ms and have a duration of at
108 * least 50ms.
109 */
110 hprt0.d32 = dwc_otg_read_hprt0(core_if);
111 hprt0.b.prtrst = 1;
112 dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
113 ((struct usb_hcd *)p)->self.is_b_host = 1;
114 } else {
115 ((struct usb_hcd *)p)->self.is_b_host = 0;
116 }
117
118 /* Need to start the HCD in a non-interrupt context. */
119 // INIT_WORK(&dwc_otg_hcd->start_work, hcd_start_func);
120 INIT_DELAYED_WORK(&dwc_otg_hcd->start_work, hcd_start_func);
121 // schedule_work(&dwc_otg_hcd->start_work);
122 queue_delayed_work(core_if->wq_otg, &dwc_otg_hcd->start_work, 50 * HZ / 1000);
123
124 return 1;
125 }
126
127 /**
128 * HCD Callback function for stopping the HCD.
129 *
130 * @param p void pointer to the <code>struct usb_hcd</code>
131 */
132 static int32_t dwc_otg_hcd_stop_cb(void *p)
133 {
134 struct usb_hcd *usb_hcd = (struct usb_hcd *)p;
135 DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, p);
136 dwc_otg_hcd_stop(usb_hcd);
137 return 1;
138 }
139
140 static void del_xfer_timers(dwc_otg_hcd_t *hcd)
141 {
142 #ifdef DEBUG
143 int i;
144 int num_channels = hcd->core_if->core_params->host_channels;
145 for (i = 0; i < num_channels; i++) {
146 del_timer(&hcd->core_if->hc_xfer_timer[i]);
147 }
148 #endif
149 }
150
151 static void del_timers(dwc_otg_hcd_t *hcd)
152 {
153 del_xfer_timers(hcd);
154 del_timer(&hcd->conn_timer);
155 }
156
157 /**
158 * Processes all the URBs in a single list of QHs. Completes them with
159 * -ETIMEDOUT and frees the QTD.
160 */
161 static void kill_urbs_in_qh_list(dwc_otg_hcd_t *hcd, struct list_head *qh_list)
162 {
163 struct list_head *qh_item;
164 dwc_otg_qh_t *qh;
165 struct list_head *qtd_item;
166 dwc_otg_qtd_t *qtd;
167 unsigned long flags;
168
169 SPIN_LOCK_IRQSAVE(&hcd->lock, flags);
170 list_for_each(qh_item, qh_list) {
171 qh = list_entry(qh_item, dwc_otg_qh_t, qh_list_entry);
172 for (qtd_item = qh->qtd_list.next;
173 qtd_item != &qh->qtd_list;
174 qtd_item = qh->qtd_list.next) {
175 qtd = list_entry(qtd_item, dwc_otg_qtd_t, qtd_list_entry);
176 if (qtd->urb != NULL) {
177 SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags);
178 dwc_otg_hcd_complete_urb(hcd, qtd->urb,
179 -ETIMEDOUT);
180 SPIN_LOCK_IRQSAVE(&hcd->lock, flags);
181 }
182 dwc_otg_hcd_qtd_remove_and_free(hcd, qtd);
183 }
184 }
185 SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags);
186 }
187
188 /**
189 * Responds with an error status of ETIMEDOUT to all URBs in the non-periodic
190 * and periodic schedules. The QTD associated with each URB is removed from
191 * the schedule and freed. This function may be called when a disconnect is
192 * detected or when the HCD is being stopped.
193 */
194 static void kill_all_urbs(dwc_otg_hcd_t *hcd)
195 {
196 kill_urbs_in_qh_list(hcd, &hcd->non_periodic_sched_inactive);
197 kill_urbs_in_qh_list(hcd, &hcd->non_periodic_sched_active);
198 kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_inactive);
199 kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_ready);
200 kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_assigned);
201 kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_queued);
202 }
203
204 /**
205 * HCD Callback function for disconnect of the HCD.
206 *
207 * @param p void pointer to the <code>struct usb_hcd</code>
208 */
209 static int32_t dwc_otg_hcd_disconnect_cb(void *p)
210 {
211 gintsts_data_t intr;
212 dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(p);
213
214 //DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, p);
215
216 /*
217 * Set status flags for the hub driver.
218 */
219 dwc_otg_hcd->flags.b.port_connect_status_change = 1;
220 dwc_otg_hcd->flags.b.port_connect_status = 0;
221
222 /*
223 * Shutdown any transfers in process by clearing the Tx FIFO Empty
224 * interrupt mask and status bits and disabling subsequent host
225 * channel interrupts.
226 */
227 intr.d32 = 0;
228 intr.b.nptxfempty = 1;
229 intr.b.ptxfempty = 1;
230 intr.b.hcintr = 1;
231 dwc_modify_reg32(&dwc_otg_hcd->core_if->core_global_regs->gintmsk, intr.d32, 0);
232 dwc_modify_reg32(&dwc_otg_hcd->core_if->core_global_regs->gintsts, intr.d32, 0);
233
234 del_timers(dwc_otg_hcd);
235
236 /*
237 * Turn off the vbus power only if the core has transitioned to device
238 * mode. If still in host mode, need to keep power on to detect a
239 * reconnection.
240 */
241 if (dwc_otg_is_device_mode(dwc_otg_hcd->core_if)) {
242 if (dwc_otg_hcd->core_if->op_state != A_SUSPEND) {
243 hprt0_data_t hprt0 = { .d32=0 };
244 DWC_PRINT("Disconnect: PortPower off\n");
245 hprt0.b.prtpwr = 0;
246 dwc_write_reg32(dwc_otg_hcd->core_if->host_if->hprt0, hprt0.d32);
247 }
248
249 dwc_otg_disable_host_interrupts(dwc_otg_hcd->core_if);
250 }
251
252 /* Respond with an error status to all URBs in the schedule. */
253 kill_all_urbs(dwc_otg_hcd);
254
255 if (dwc_otg_is_host_mode(dwc_otg_hcd->core_if)) {
256 /* Clean up any host channels that were in use. */
257 int num_channels;
258 int i;
259 dwc_hc_t *channel;
260 dwc_otg_hc_regs_t *hc_regs;
261 hcchar_data_t hcchar;
262
263 num_channels = dwc_otg_hcd->core_if->core_params->host_channels;
264
265 if (!dwc_otg_hcd->core_if->dma_enable) {
266 /* Flush out any channel requests in slave mode. */
267 for (i = 0; i < num_channels; i++) {
268 channel = dwc_otg_hcd->hc_ptr_array[i];
269 if (list_empty(&channel->hc_list_entry)) {
270 hc_regs = dwc_otg_hcd->core_if->host_if->hc_regs[i];
271 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
272 if (hcchar.b.chen) {
273 hcchar.b.chen = 0;
274 hcchar.b.chdis = 1;
275 hcchar.b.epdir = 0;
276 dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
277 }
278 }
279 }
280 }
281
282 for (i = 0; i < num_channels; i++) {
283 channel = dwc_otg_hcd->hc_ptr_array[i];
284 if (list_empty(&channel->hc_list_entry)) {
285 hc_regs = dwc_otg_hcd->core_if->host_if->hc_regs[i];
286 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
287 if (hcchar.b.chen) {
288 /* Halt the channel. */
289 hcchar.b.chdis = 1;
290 dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
291 }
292
293 dwc_otg_hc_cleanup(dwc_otg_hcd->core_if, channel);
294 list_add_tail(&channel->hc_list_entry,
295 &dwc_otg_hcd->free_hc_list);
296 }
297 }
298 }
299
300 /* A disconnect will end the session so the B-Device is no
301 * longer a B-host. */
302 ((struct usb_hcd *)p)->self.is_b_host = 0;
303 return 1;
304 }
305
306 /**
307 * Connection timeout function. An OTG host is required to display a
308 * message if the device does not connect within 10 seconds.
309 */
310 void dwc_otg_hcd_connect_timeout(unsigned long ptr)
311 {
312 DWC_DEBUGPL(DBG_HCDV, "%s(%x)\n", __func__, (int)ptr);
313 DWC_PRINT("Connect Timeout\n");
314 DWC_ERROR("Device Not Connected/Responding\n");
315 }
316
317 /**
318 * Start the connection timer. An OTG host is required to display a
319 * message if the device does not connect within 10 seconds. The
320 * timer is deleted if a port connect interrupt occurs before the
321 * timer expires.
322 */
323 static void dwc_otg_hcd_start_connect_timer(dwc_otg_hcd_t *hcd)
324 {
325 init_timer(&hcd->conn_timer);
326 hcd->conn_timer.function = dwc_otg_hcd_connect_timeout;
327 hcd->conn_timer.data = 0;
328 hcd->conn_timer.expires = jiffies + (HZ * 10);
329 add_timer(&hcd->conn_timer);
330 }
331
332 /**
333 * HCD Callback function for disconnect of the HCD.
334 *
335 * @param p void pointer to the <code>struct usb_hcd</code>
336 */
337 static int32_t dwc_otg_hcd_session_start_cb(void *p)
338 {
339 dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(p);
340 DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, p);
341 dwc_otg_hcd_start_connect_timer(dwc_otg_hcd);
342 return 1;
343 }
344
345 /**
346 * HCD Callback structure for handling mode switching.
347 */
348 static dwc_otg_cil_callbacks_t hcd_cil_callbacks = {
349 .start = dwc_otg_hcd_start_cb,
350 .stop = dwc_otg_hcd_stop_cb,
351 .disconnect = dwc_otg_hcd_disconnect_cb,
352 .session_start = dwc_otg_hcd_session_start_cb,
353 .p = 0,
354 };
355
356 /**
357 * Reset tasklet function
358 */
359 static void reset_tasklet_func(unsigned long data)
360 {
361 dwc_otg_hcd_t *dwc_otg_hcd = (dwc_otg_hcd_t *)data;
362 dwc_otg_core_if_t *core_if = dwc_otg_hcd->core_if;
363 hprt0_data_t hprt0;
364
365 DWC_DEBUGPL(DBG_HCDV, "USB RESET tasklet called\n");
366
367 hprt0.d32 = dwc_otg_read_hprt0(core_if);
368 hprt0.b.prtrst = 1;
369 dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
370 mdelay(60);
371
372 hprt0.b.prtrst = 0;
373 dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
374 dwc_otg_hcd->flags.b.port_reset_change = 1;
375 }
376
377 static struct tasklet_struct reset_tasklet = {
378 .next = NULL,
379 .state = 0,
380 .count = ATOMIC_INIT(0),
381 .func = reset_tasklet_func,
382 .data = 0,
383 };
384
385 /**
386 * Initializes the HCD. This function allocates memory for and initializes the
387 * static parts of the usb_hcd and dwc_otg_hcd structures. It also registers the
388 * USB bus with the core and calls the hc_driver->start() function. It returns
389 * a negative error on failure.
390 */
391 int dwc_otg_hcd_init(struct platform_device *pdev)
392 {
393 struct usb_hcd *hcd = NULL;
394 dwc_otg_hcd_t *dwc_otg_hcd = NULL;
395 dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev);
396
397 int num_channels;
398 int i;
399 dwc_hc_t *channel;
400
401 int retval = 0;
402
403 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD INIT\n");
404
405 /* Set device flags indicating whether the HCD supports DMA. */
406 if (otg_dev->core_if->dma_enable) {
407 DWC_PRINT("Using DMA mode\n");
408
409 if (otg_dev->core_if->dma_desc_enable) {
410 DWC_PRINT("Device using Descriptor DMA mode\n");
411 } else {
412 DWC_PRINT("Device using Buffer DMA mode\n");
413 }
414 }
415 /*
416 * Allocate memory for the base HCD plus the DWC OTG HCD.
417 * Initialize the base HCD.
418 */
419
420 hcd = usb_create_hcd(&dwc_otg_hc_driver, &pdev->dev, "gadget");
421 if (!hcd) {
422 retval = -ENOMEM;
423 goto error1;
424 }
425
426 hcd->regs = otg_dev->base;
427 hcd->self.otg_port = 1;
428
429 /* Integrate TT in root hub, by default this is disbled. */
430 hcd->has_tt = 1;
431
432 /* Initialize the DWC OTG HCD. */
433 dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
434 dwc_otg_hcd->core_if = otg_dev->core_if;
435 otg_dev->hcd = dwc_otg_hcd;
436 init_hcd_usecs(dwc_otg_hcd);
437
438 /* */
439 spin_lock_init(&dwc_otg_hcd->lock);
440
441 /* Register the HCD CIL Callbacks */
442 dwc_otg_cil_register_hcd_callbacks(otg_dev->core_if,
443 &hcd_cil_callbacks, hcd);
444
445 /* Initialize the non-periodic schedule. */
446 INIT_LIST_HEAD(&dwc_otg_hcd->non_periodic_sched_inactive);
447 INIT_LIST_HEAD(&dwc_otg_hcd->non_periodic_sched_active);
448
449 /* Initialize the periodic schedule. */
450 INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_inactive);
451 INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_ready);
452 INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_assigned);
453 INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_queued);
454
455 /*
456 * Create a host channel descriptor for each host channel implemented
457 * in the controller. Initialize the channel descriptor array.
458 */
459 INIT_LIST_HEAD(&dwc_otg_hcd->free_hc_list);
460 num_channels = dwc_otg_hcd->core_if->core_params->host_channels;
461 memset(dwc_otg_hcd->hc_ptr_array, 0, sizeof(dwc_otg_hcd->hc_ptr_array));
462 for (i = 0; i < num_channels; i++) {
463 channel = kmalloc(sizeof(dwc_hc_t), GFP_KERNEL);
464 if (channel == NULL) {
465 retval = -ENOMEM;
466 DWC_ERROR("%s: host channel allocation failed\n", __func__);
467 goto error2;
468 }
469 memset(channel, 0, sizeof(dwc_hc_t));
470 channel->hc_num = i;
471 dwc_otg_hcd->hc_ptr_array[i] = channel;
472 #ifdef DEBUG
473 init_timer(&dwc_otg_hcd->core_if->hc_xfer_timer[i]);
474 #endif
475 DWC_DEBUGPL(DBG_HCDV, "HCD Added channel #%d, hc=%p\n", i, channel);
476 }
477
478 /* Initialize the Connection timeout timer. */
479 init_timer(&dwc_otg_hcd->conn_timer);
480
481 /* Initialize reset tasklet. */
482 reset_tasklet.data = (unsigned long) dwc_otg_hcd;
483 dwc_otg_hcd->reset_tasklet = &reset_tasklet;
484
485 /*
486 * Finish generic HCD initialization and start the HCD. This function
487 * allocates the DMA buffer pool, registers the USB bus, requests the
488 * IRQ line, and calls dwc_otg_hcd_start method.
489 */
490 retval = usb_add_hcd(hcd, otg_dev->irq, IRQF_SHARED);
491 if (retval < 0) {
492 goto error2;
493 }
494
495 /*
496 * Allocate space for storing data on status transactions. Normally no
497 * data is sent, but this space acts as a bit bucket. This must be
498 * done after usb_add_hcd since that function allocates the DMA buffer
499 * pool.
500 */
501 if (otg_dev->core_if->dma_enable) {
502 dwc_otg_hcd->status_buf =
503 dma_alloc_coherent(&pdev->dev,
504 DWC_OTG_HCD_STATUS_BUF_SIZE,
505 &dwc_otg_hcd->status_buf_dma,
506 GFP_KERNEL | GFP_DMA);
507 } else {
508 dwc_otg_hcd->status_buf = kmalloc(DWC_OTG_HCD_STATUS_BUF_SIZE,
509 GFP_KERNEL);
510 }
511 if (!dwc_otg_hcd->status_buf) {
512 retval = -ENOMEM;
513 DWC_ERROR("%s: status_buf allocation failed\n", __func__);
514 goto error3;
515 }
516
517 dwc_otg_hcd->otg_dev = otg_dev;
518
519 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Initialized HCD, usbbus=%d\n",
520 hcd->self.busnum);
521 return 0;
522
523 /* Error conditions */
524 error3:
525 usb_remove_hcd(hcd);
526 error2:
527 dwc_otg_hcd_free(hcd);
528 usb_put_hcd(hcd);
529 error1:
530 return retval;
531 }
532
533 /**
534 * Removes the HCD.
535 * Frees memory and resources associated with the HCD and deregisters the bus.
536 */
537 void dwc_otg_hcd_remove(struct platform_device *pdev)
538 {
539 dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev);
540 dwc_otg_hcd_t *dwc_otg_hcd;
541 struct usb_hcd *hcd;
542
543 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD REMOVE\n");
544
545 if (!otg_dev) {
546 DWC_DEBUGPL(DBG_ANY, "%s: otg_dev NULL!\n", __func__);
547 return;
548 }
549
550 dwc_otg_hcd = otg_dev->hcd;
551
552 if (!dwc_otg_hcd) {
553 DWC_DEBUGPL(DBG_ANY, "%s: otg_dev->hcd NULL!\n", __func__);
554 return;
555 }
556
557 hcd = dwc_otg_hcd_to_hcd(dwc_otg_hcd);
558
559 if (!hcd) {
560 DWC_DEBUGPL(DBG_ANY, "%s: dwc_otg_hcd_to_hcd(dwc_otg_hcd) NULL!\n", __func__);
561 return;
562 }
563
564 /* Turn off all interrupts */
565 dwc_write_reg32(&dwc_otg_hcd->core_if->core_global_regs->gintmsk, 0);
566 dwc_modify_reg32(&dwc_otg_hcd->core_if->core_global_regs->gahbcfg, 1, 0);
567
568 usb_remove_hcd(hcd);
569 dwc_otg_hcd_free(hcd);
570 usb_put_hcd(hcd);
571 }
572
573 /* =========================================================================
574 * Linux HC Driver Functions
575 * ========================================================================= */
576
577 /**
578 * Initializes dynamic portions of the DWC_otg HCD state.
579 */
580 static void hcd_reinit(dwc_otg_hcd_t *hcd)
581 {
582 struct list_head *item;
583 int num_channels;
584 int i;
585 dwc_hc_t *channel;
586
587 hcd->flags.d32 = 0;
588
589 hcd->non_periodic_qh_ptr = &hcd->non_periodic_sched_active;
590 hcd->non_periodic_channels = 0;
591 hcd->periodic_channels = 0;
592 hcd->nakking_channels = 0;
593
594 /*
595 * Put all channels in the free channel list and clean up channel
596 * states.
597 */
598 item = hcd->free_hc_list.next;
599 while (item != &hcd->free_hc_list) {
600 list_del(item);
601 item = hcd->free_hc_list.next;
602 }
603 num_channels = hcd->core_if->core_params->host_channels;
604 for (i = 0; i < num_channels; i++) {
605 channel = hcd->hc_ptr_array[i];
606 list_add_tail(&channel->hc_list_entry, &hcd->free_hc_list);
607 dwc_otg_hc_cleanup(hcd->core_if, channel);
608 }
609
610 /* Initialize the DWC core for host mode operation. */
611 dwc_otg_core_host_init(hcd->core_if);
612 }
613
614 /** Initializes the DWC_otg controller and its root hub and prepares it for host
615 * mode operation. Activates the root port. Returns 0 on success and a negative
616 * error code on failure. */
617 int dwc_otg_hcd_start(struct usb_hcd *hcd)
618 {
619 dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
620 dwc_otg_core_if_t *core_if = dwc_otg_hcd->core_if;
621 struct usb_bus *bus;
622
623
624 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD START\n");
625
626 bus = hcd_to_bus(hcd);
627
628 /* Initialize the bus state. If the core is in Device Mode
629 * HALT the USB bus and return. */
630 if (dwc_otg_is_device_mode(core_if)) {
631 hcd->state = HC_STATE_RUNNING;
632 return 0;
633 }
634 hcd->state = HC_STATE_RUNNING;
635
636 /* Initialize and connect root hub if one is not already attached */
637 if (bus->root_hub) {
638 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Has Root Hub\n");
639 /* Inform the HUB driver to resume. */
640 usb_hcd_resume_root_hub(hcd);
641 }
642 else {
643 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Does Not Have Root Hub\n");
644 }
645
646 hcd_reinit(dwc_otg_hcd);
647
648 return 0;
649 }
650
651 static void qh_list_free(dwc_otg_hcd_t *hcd, struct list_head *qh_list)
652 {
653 struct list_head *item;
654 dwc_otg_qh_t *qh;
655 unsigned long flags;
656
657 if (!qh_list->next) {
658 /* The list hasn't been initialized yet. */
659 return;
660 }
661
662 /* Ensure there are no QTDs or URBs left. */
663 kill_urbs_in_qh_list(hcd, qh_list);
664
665 SPIN_LOCK_IRQSAVE(&hcd->lock, flags);
666 for (item = qh_list->next; item != qh_list; item = qh_list->next) {
667 qh = list_entry(item, dwc_otg_qh_t, qh_list_entry);
668 dwc_otg_hcd_qh_remove_and_free(hcd, qh);
669 }
670 SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags);
671 }
672
673 /**
674 * Halts the DWC_otg host mode operations in a clean manner. USB transfers are
675 * stopped.
676 */
677 void dwc_otg_hcd_stop(struct usb_hcd *hcd)
678 {
679 dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
680 hprt0_data_t hprt0 = { .d32=0 };
681
682 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD STOP\n");
683
684 /* Turn off all host-specific interrupts. */
685 dwc_otg_disable_host_interrupts(dwc_otg_hcd->core_if);
686
687 /*
688 * The root hub should be disconnected before this function is called.
689 * The disconnect will clear the QTD lists (via ..._hcd_urb_dequeue)
690 * and the QH lists (via ..._hcd_endpoint_disable).
691 */
692
693 /* Turn off the vbus power */
694 DWC_PRINT("PortPower off\n");
695 hprt0.b.prtpwr = 0;
696 dwc_write_reg32(dwc_otg_hcd->core_if->host_if->hprt0, hprt0.d32);
697 }
698
699 /** Returns the current frame number. */
700 int dwc_otg_hcd_get_frame_number(struct usb_hcd *hcd)
701 {
702 dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
703 hfnum_data_t hfnum;
704
705 hfnum.d32 = dwc_read_reg32(&dwc_otg_hcd->core_if->
706 host_if->host_global_regs->hfnum);
707
708 #ifdef DEBUG_SOF
709 DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD GET FRAME NUMBER %d\n", hfnum.b.frnum);
710 #endif
711 return hfnum.b.frnum;
712 }
713
714 /**
715 * Frees secondary storage associated with the dwc_otg_hcd structure contained
716 * in the struct usb_hcd field.
717 */
718 void dwc_otg_hcd_free(struct usb_hcd *hcd)
719 {
720 dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
721 int i;
722
723 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD FREE\n");
724
725 del_timers(dwc_otg_hcd);
726
727 /* Free memory for QH/QTD lists */
728 qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->non_periodic_sched_inactive);
729 qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->non_periodic_sched_active);
730 qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_inactive);
731 qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_ready);
732 qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_assigned);
733 qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_queued);
734
735 /* Free memory for the host channels. */
736 for (i = 0; i < MAX_EPS_CHANNELS; i++) {
737 dwc_hc_t *hc = dwc_otg_hcd->hc_ptr_array[i];
738 if (hc != NULL) {
739 DWC_DEBUGPL(DBG_HCDV, "HCD Free channel #%i, hc=%p\n", i, hc);
740 kfree(hc);
741 }
742 }
743
744 if (dwc_otg_hcd->core_if->dma_enable) {
745 if (dwc_otg_hcd->status_buf_dma) {
746 dma_free_coherent(hcd->self.controller,
747 DWC_OTG_HCD_STATUS_BUF_SIZE,
748 dwc_otg_hcd->status_buf,
749 dwc_otg_hcd->status_buf_dma);
750 }
751 } else if (dwc_otg_hcd->status_buf != NULL) {
752 kfree(dwc_otg_hcd->status_buf);
753 }
754 }
755
756 #ifdef DEBUG
757 static void dump_urb_info(struct urb *urb, char* fn_name)
758 {
759 DWC_PRINT("%s, urb %p\n", fn_name, urb);
760 DWC_PRINT(" Device address: %d\n", usb_pipedevice(urb->pipe));
761 DWC_PRINT(" Endpoint: %d, %s\n", usb_pipeendpoint(urb->pipe),
762 (usb_pipein(urb->pipe) ? "IN" : "OUT"));
763 DWC_PRINT(" Endpoint type: %s\n",
764 ({char *pipetype;
765 switch (usb_pipetype(urb->pipe)) {
766 case PIPE_CONTROL: pipetype = "CONTROL"; break;
767 case PIPE_BULK: pipetype = "BULK"; break;
768 case PIPE_INTERRUPT: pipetype = "INTERRUPT"; break;
769 case PIPE_ISOCHRONOUS: pipetype = "ISOCHRONOUS"; break;
770 default: pipetype = "UNKNOWN"; break;
771 }; pipetype;}));
772 DWC_PRINT(" Speed: %s\n",
773 ({char *speed;
774 switch (urb->dev->speed) {
775 case USB_SPEED_HIGH: speed = "HIGH"; break;
776 case USB_SPEED_FULL: speed = "FULL"; break;
777 case USB_SPEED_LOW: speed = "LOW"; break;
778 default: speed = "UNKNOWN"; break;
779 }; speed;}));
780 DWC_PRINT(" Max packet size: %d\n",
781 usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)));
782 DWC_PRINT(" Data buffer length: %d\n", urb->transfer_buffer_length);
783 DWC_PRINT(" Transfer buffer: %p, Transfer DMA: %p\n",
784 urb->transfer_buffer, (void *)urb->transfer_dma);
785 DWC_PRINT(" Setup buffer: %p, Setup DMA: %p\n",
786 urb->setup_packet, (void *)urb->setup_dma);
787 DWC_PRINT(" Interval: %d\n", urb->interval);
788 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
789 int i;
790 for (i = 0; i < urb->number_of_packets; i++) {
791 DWC_PRINT(" ISO Desc %d:\n", i);
792 DWC_PRINT(" offset: %d, length %d\n",
793 urb->iso_frame_desc[i].offset,
794 urb->iso_frame_desc[i].length);
795 }
796 }
797 }
798
799 static void dump_channel_info(dwc_otg_hcd_t *hcd,
800 dwc_otg_qh_t *qh)
801 {
802 if (qh->channel != NULL) {
803 dwc_hc_t *hc = qh->channel;
804 struct list_head *item;
805 dwc_otg_qh_t *qh_item;
806 int num_channels = hcd->core_if->core_params->host_channels;
807 int i;
808
809 dwc_otg_hc_regs_t *hc_regs;
810 hcchar_data_t hcchar;
811 hcsplt_data_t hcsplt;
812 hctsiz_data_t hctsiz;
813 uint32_t hcdma;
814
815 hc_regs = hcd->core_if->host_if->hc_regs[hc->hc_num];
816 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
817 hcsplt.d32 = dwc_read_reg32(&hc_regs->hcsplt);
818 hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz);
819 hcdma = dwc_read_reg32(&hc_regs->hcdma);
820
821 DWC_PRINT(" Assigned to channel %p:\n", hc);
822 DWC_PRINT(" hcchar 0x%08x, hcsplt 0x%08x\n", hcchar.d32, hcsplt.d32);
823 DWC_PRINT(" hctsiz 0x%08x, hcdma 0x%08x\n", hctsiz.d32, hcdma);
824 DWC_PRINT(" dev_addr: %d, ep_num: %d, ep_is_in: %d\n",
825 hc->dev_addr, hc->ep_num, hc->ep_is_in);
826 DWC_PRINT(" ep_type: %d\n", hc->ep_type);
827 DWC_PRINT(" max_packet: %d\n", hc->max_packet);
828 DWC_PRINT(" data_pid_start: %d\n", hc->data_pid_start);
829 DWC_PRINT(" xfer_started: %d\n", hc->xfer_started);
830 DWC_PRINT(" halt_status: %d\n", hc->halt_status);
831 DWC_PRINT(" xfer_buff: %p\n", hc->xfer_buff);
832 DWC_PRINT(" xfer_len: %d\n", hc->xfer_len);
833 DWC_PRINT(" qh: %p\n", hc->qh);
834 DWC_PRINT(" NP inactive sched:\n");
835 list_for_each(item, &hcd->non_periodic_sched_inactive) {
836 qh_item = list_entry(item, dwc_otg_qh_t, qh_list_entry);
837 DWC_PRINT(" %p\n", qh_item);
838 }
839 DWC_PRINT(" NP active sched:\n");
840 list_for_each(item, &hcd->non_periodic_sched_active) {
841 qh_item = list_entry(item, dwc_otg_qh_t, qh_list_entry);
842 DWC_PRINT(" %p\n", qh_item);
843 }
844 DWC_PRINT(" Channels: \n");
845 for (i = 0; i < num_channels; i++) {
846 dwc_hc_t *hc = hcd->hc_ptr_array[i];
847 DWC_PRINT(" %2d: %p\n", i, hc);
848 }
849 }
850 }
851 #endif
852
853
854 //OTG host require the DMA addr is DWORD-aligned,
855 //patch it if the buffer is not DWORD-aligned
856 inline
857 int hcd_check_and_patch_dma_addr(struct urb *urb){
858
859 if((!urb->transfer_buffer)||!urb->transfer_dma||urb->transfer_dma==0xffffffff)
860 return 0;
861
862 if(((u32)urb->transfer_buffer)& 0x3){
863 /*
864 printk("%s: "
865 "urb(%.8x) "
866 "transfer_buffer=%.8x, "
867 "transfer_dma=%.8x, "
868 "transfer_buffer_length=%d, "
869 "actual_length=%d(%x), "
870 "\n",
871 ((urb->transfer_flags & URB_DIR_MASK)==URB_DIR_OUT)?"OUT":"IN",
872 urb,
873 urb->transfer_buffer,
874 urb->transfer_dma,
875 urb->transfer_buffer_length,
876 urb->actual_length,urb->actual_length
877 );
878 */
879 if(!urb->aligned_transfer_buffer||urb->aligned_transfer_buffer_length<urb->transfer_buffer_length){
880 urb->aligned_transfer_buffer_length=urb->transfer_buffer_length;
881 if(urb->aligned_transfer_buffer) {
882 kfree(urb->aligned_transfer_buffer);
883 }
884 urb->aligned_transfer_buffer=kmalloc(urb->aligned_transfer_buffer_length,GFP_KERNEL|GFP_DMA|GFP_ATOMIC);
885 if(!urb->aligned_transfer_buffer){
886 DWC_ERROR("Cannot alloc required buffer!!\n");
887 //BUG();
888 return -1;
889 }
890 urb->aligned_transfer_dma=dma_map_single(NULL,(void *)(urb->aligned_transfer_buffer),(urb->aligned_transfer_buffer_length),DMA_FROM_DEVICE);
891 //printk(" new allocated aligned_buf=%.8x aligned_buf_len=%d\n", (u32)urb->aligned_transfer_buffer, urb->aligned_transfer_buffer_length);
892 }
893 urb->transfer_dma=urb->aligned_transfer_dma;
894 if((urb->transfer_flags & URB_DIR_MASK)==URB_DIR_OUT) {
895 memcpy(urb->aligned_transfer_buffer,urb->transfer_buffer,urb->transfer_buffer_length);
896 dma_sync_single_for_device(NULL,urb->transfer_dma,urb->transfer_buffer_length,DMA_TO_DEVICE);
897 }
898 }
899 return 0;
900 }
901
902
903
904 /** Starts processing a USB transfer request specified by a USB Request Block
905 * (URB). mem_flags indicates the type of memory allocation to use while
906 * processing this URB. */
907 int dwc_otg_hcd_urb_enqueue(struct usb_hcd *hcd,
908 // struct usb_host_endpoint *ep,
909 struct urb *urb,
910 gfp_t mem_flags
911 )
912 {
913 int retval = 0;
914 dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
915 dwc_otg_qtd_t *qtd;
916 unsigned long flags;
917
918 SPIN_LOCK_IRQSAVE(&dwc_otg_hcd->lock, flags);
919
920 if (urb->hcpriv != NULL) {
921 SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags);
922 return -ENOMEM;
923
924 }
925 #ifdef DEBUG
926 if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
927 dump_urb_info(urb, "dwc_otg_hcd_urb_enqueue");
928 }
929 #endif
930 if (!dwc_otg_hcd->flags.b.port_connect_status) {
931 /* No longer connected. */
932 SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags);
933 return -ENODEV;
934 }
935
936 if (hcd_check_and_patch_dma_addr(urb)) {
937 DWC_ERROR("Unable to check and patch dma addr\n");
938 SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags);
939 return -ENOMEM;
940 }
941 qtd = dwc_otg_hcd_qtd_create(urb);
942 if (qtd == NULL) {
943 DWC_ERROR("DWC OTG HCD URB Enqueue failed creating QTD\n");
944 SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags);
945 return -ENOMEM;
946 }
947
948 retval = dwc_otg_hcd_qtd_add(qtd, dwc_otg_hcd);
949 if (retval < 0) {
950 DWC_ERROR("DWC OTG HCD URB Enqueue failed adding QTD. "
951 "Error status %d\n", retval);
952 dwc_otg_hcd_qtd_free(qtd);
953 }
954 SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags);
955 return retval;
956 }
957
958 /** Aborts/cancels a USB transfer request. Always returns 0 to indicate
959 * success. */
960 int dwc_otg_hcd_urb_dequeue(struct usb_hcd *hcd,
961 struct urb *urb, int status)
962 {
963 unsigned long flags;
964 dwc_otg_hcd_t *dwc_otg_hcd;
965 dwc_otg_qtd_t *urb_qtd;
966 dwc_otg_qh_t *qh;
967 struct usb_host_endpoint *ep = dwc_urb_to_endpoint(urb);
968 int rc;
969
970 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD URB Dequeue\n");
971
972 dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
973
974 SPIN_LOCK_IRQSAVE(&dwc_otg_hcd->lock, flags);
975
976 urb_qtd = (dwc_otg_qtd_t *)urb->hcpriv;
977 qh = (dwc_otg_qh_t *)ep->hcpriv;
978
979 #ifdef DEBUG
980 if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
981 dump_urb_info(urb, "dwc_otg_hcd_urb_dequeue");
982 if (urb_qtd == qh->qtd_in_process) {
983 dump_channel_info(dwc_otg_hcd, qh);
984 }
985 }
986 #endif
987
988 if (qh && urb_qtd == qh->qtd_in_process) {
989 /* The QTD is in process (it has been assigned to a channel). */
990
991 if (dwc_otg_hcd->flags.b.port_connect_status) {
992 /*
993 * If still connected (i.e. in host mode), halt the
994 * channel so it can be used for other transfers. If
995 * no longer connected, the host registers can't be
996 * written to halt the channel since the core is in
997 * device mode.
998 */
999 dwc_otg_hc_halt(dwc_otg_hcd, qh->channel,
1000 DWC_OTG_HC_XFER_URB_DEQUEUE);
1001 }
1002 }
1003
1004 /*
1005 * Free the QTD and clean up the associated QH. Leave the QH in the
1006 * schedule if it has any remaining QTDs.
1007 */
1008 dwc_otg_hcd_qtd_remove_and_free(dwc_otg_hcd, urb_qtd);
1009 if (qh && urb_qtd == qh->qtd_in_process) {
1010 dwc_otg_hcd_qh_deactivate(dwc_otg_hcd, qh, 0);
1011 qh->channel = NULL;
1012 qh->qtd_in_process = NULL;
1013 } else {
1014 if (qh && list_empty(&qh->qtd_list)) {
1015 dwc_otg_hcd_qh_remove(dwc_otg_hcd, qh);
1016 }
1017 }
1018
1019
1020 rc = usb_hcd_check_unlink_urb(hcd, urb, status);
1021
1022 if (!rc) {
1023 usb_hcd_unlink_urb_from_ep(hcd, urb);
1024 }
1025 urb->hcpriv = NULL;
1026 SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags);
1027
1028 if (!rc) {
1029 usb_hcd_giveback_urb(hcd, urb, status);
1030 }
1031 if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
1032 DWC_PRINT("Called usb_hcd_giveback_urb()\n");
1033 DWC_PRINT(" urb->status = %d\n", urb->status);
1034 }
1035
1036 return 0;
1037 }
1038
1039 /** Frees resources in the DWC_otg controller related to a given endpoint. Also
1040 * clears state in the HCD related to the endpoint. Any URBs for the endpoint
1041 * must already be dequeued. */
1042 void dwc_otg_hcd_endpoint_disable(struct usb_hcd *hcd,
1043 struct usb_host_endpoint *ep)
1044 {
1045 dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
1046 dwc_otg_qh_t *qh;
1047
1048 unsigned long flags;
1049 int retry = 0;
1050
1051 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD EP DISABLE: _bEndpointAddress=0x%02x, "
1052 "endpoint=%d\n", ep->desc.bEndpointAddress,
1053 dwc_ep_addr_to_endpoint(ep->desc.bEndpointAddress));
1054
1055 rescan:
1056 SPIN_LOCK_IRQSAVE(&dwc_otg_hcd->lock, flags);
1057 qh = (dwc_otg_qh_t *)(ep->hcpriv);
1058 if (!qh)
1059 goto done;
1060
1061 /** Check that the QTD list is really empty */
1062 if (!list_empty(&qh->qtd_list)) {
1063 if (retry++ < 250) {
1064 SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags);
1065 schedule_timeout_uninterruptible(1);
1066 goto rescan;
1067 }
1068
1069 DWC_WARN("DWC OTG HCD EP DISABLE:"
1070 " QTD List for this endpoint is not empty\n");
1071 }
1072
1073 dwc_otg_hcd_qh_remove_and_free(dwc_otg_hcd, qh);
1074 ep->hcpriv = NULL;
1075 done:
1076 SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags);
1077 }
1078
1079 /** Handles host mode interrupts for the DWC_otg controller. Returns IRQ_NONE if
1080 * there was no interrupt to handle. Returns IRQ_HANDLED if there was a valid
1081 * interrupt.
1082 *
1083 * This function is called by the USB core when an interrupt occurs */
1084 irqreturn_t dwc_otg_hcd_irq(struct usb_hcd *hcd)
1085 {
1086 int retVal = 0;
1087 dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
1088 retVal = dwc_otg_hcd_handle_intr(dwc_otg_hcd);
1089 if (dwc_otg_hcd->flags.b.port_connect_status_change == 1)
1090 usb_hcd_poll_rh_status(hcd);
1091 return IRQ_RETVAL(retVal);
1092 }
1093
1094 /** Creates Status Change bitmap for the root hub and root port. The bitmap is
1095 * returned in buf. Bit 0 is the status change indicator for the root hub. Bit 1
1096 * is the status change indicator for the single root port. Returns 1 if either
1097 * change indicator is 1, otherwise returns 0. */
1098 int dwc_otg_hcd_hub_status_data(struct usb_hcd *hcd, char *buf)
1099 {
1100 dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
1101
1102 buf[0] = 0;
1103 buf[0] |= (dwc_otg_hcd->flags.b.port_connect_status_change ||
1104 dwc_otg_hcd->flags.b.port_reset_change ||
1105 dwc_otg_hcd->flags.b.port_enable_change ||
1106 dwc_otg_hcd->flags.b.port_suspend_change ||
1107 dwc_otg_hcd->flags.b.port_over_current_change) << 1;
1108
1109 #ifdef DEBUG
1110 if (buf[0]) {
1111 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB STATUS DATA:"
1112 " Root port status changed\n");
1113 DWC_DEBUGPL(DBG_HCDV, " port_connect_status_change: %d\n",
1114 dwc_otg_hcd->flags.b.port_connect_status_change);
1115 DWC_DEBUGPL(DBG_HCDV, " port_reset_change: %d\n",
1116 dwc_otg_hcd->flags.b.port_reset_change);
1117 DWC_DEBUGPL(DBG_HCDV, " port_enable_change: %d\n",
1118 dwc_otg_hcd->flags.b.port_enable_change);
1119 DWC_DEBUGPL(DBG_HCDV, " port_suspend_change: %d\n",
1120 dwc_otg_hcd->flags.b.port_suspend_change);
1121 DWC_DEBUGPL(DBG_HCDV, " port_over_current_change: %d\n",
1122 dwc_otg_hcd->flags.b.port_over_current_change);
1123 }
1124 #endif
1125 return (buf[0] != 0);
1126 }
1127
1128 #ifdef DWC_HS_ELECT_TST
1129 /*
1130 * Quick and dirty hack to implement the HS Electrical Test
1131 * SINGLE_STEP_GET_DEVICE_DESCRIPTOR feature.
1132 *
1133 * This code was copied from our userspace app "hset". It sends a
1134 * Get Device Descriptor control sequence in two parts, first the
1135 * Setup packet by itself, followed some time later by the In and
1136 * Ack packets. Rather than trying to figure out how to add this
1137 * functionality to the normal driver code, we just hijack the
1138 * hardware, using these two function to drive the hardware
1139 * directly.
1140 */
1141
1142 dwc_otg_core_global_regs_t *global_regs;
1143 dwc_otg_host_global_regs_t *hc_global_regs;
1144 dwc_otg_hc_regs_t *hc_regs;
1145 uint32_t *data_fifo;
1146
1147 static void do_setup(void)
1148 {
1149 gintsts_data_t gintsts;
1150 hctsiz_data_t hctsiz;
1151 hcchar_data_t hcchar;
1152 haint_data_t haint;
1153 hcint_data_t hcint;
1154
1155 /* Enable HAINTs */
1156 dwc_write_reg32(&hc_global_regs->haintmsk, 0x0001);
1157
1158 /* Enable HCINTs */
1159 dwc_write_reg32(&hc_regs->hcintmsk, 0x04a3);
1160
1161 /* Read GINTSTS */
1162 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1163 //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
1164
1165 /* Read HAINT */
1166 haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
1167 //fprintf(stderr, "HAINT: %08x\n", haint.d32);
1168
1169 /* Read HCINT */
1170 hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
1171 //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
1172
1173 /* Read HCCHAR */
1174 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1175 //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
1176
1177 /* Clear HCINT */
1178 dwc_write_reg32(&hc_regs->hcint, hcint.d32);
1179
1180 /* Clear HAINT */
1181 dwc_write_reg32(&hc_global_regs->haint, haint.d32);
1182
1183 /* Clear GINTSTS */
1184 dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
1185
1186 /* Read GINTSTS */
1187 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1188 //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
1189
1190 /*
1191 * Send Setup packet (Get Device Descriptor)
1192 */
1193
1194 /* Make sure channel is disabled */
1195 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1196 if (hcchar.b.chen) {
1197 //fprintf(stderr, "Channel already enabled 1, HCCHAR = %08x\n", hcchar.d32);
1198 hcchar.b.chdis = 1;
1199 // hcchar.b.chen = 1;
1200 dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
1201 //sleep(1);
1202 mdelay(1000);
1203
1204 /* Read GINTSTS */
1205 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1206 //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
1207
1208 /* Read HAINT */
1209 haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
1210 //fprintf(stderr, "HAINT: %08x\n", haint.d32);
1211
1212 /* Read HCINT */
1213 hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
1214 //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
1215
1216 /* Read HCCHAR */
1217 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1218 //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
1219
1220 /* Clear HCINT */
1221 dwc_write_reg32(&hc_regs->hcint, hcint.d32);
1222
1223 /* Clear HAINT */
1224 dwc_write_reg32(&hc_global_regs->haint, haint.d32);
1225
1226 /* Clear GINTSTS */
1227 dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
1228
1229 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1230 //if (hcchar.b.chen) {
1231 // fprintf(stderr, "** Channel _still_ enabled 1, HCCHAR = %08x **\n", hcchar.d32);
1232 //}
1233 }
1234
1235 /* Set HCTSIZ */
1236 hctsiz.d32 = 0;
1237 hctsiz.b.xfersize = 8;
1238 hctsiz.b.pktcnt = 1;
1239 hctsiz.b.pid = DWC_OTG_HC_PID_SETUP;
1240 dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32);
1241
1242 /* Set HCCHAR */
1243 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1244 hcchar.b.eptype = DWC_OTG_EP_TYPE_CONTROL;
1245 hcchar.b.epdir = 0;
1246 hcchar.b.epnum = 0;
1247 hcchar.b.mps = 8;
1248 hcchar.b.chen = 1;
1249 dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
1250
1251 /* Fill FIFO with Setup data for Get Device Descriptor */
1252 data_fifo = (uint32_t *)((char *)global_regs + 0x1000);
1253 dwc_write_reg32(data_fifo++, 0x01000680);
1254 dwc_write_reg32(data_fifo++, 0x00080000);
1255
1256 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1257 //fprintf(stderr, "Waiting for HCINTR intr 1, GINTSTS = %08x\n", gintsts.d32);
1258
1259 /* Wait for host channel interrupt */
1260 do {
1261 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1262 } while (gintsts.b.hcintr == 0);
1263
1264 //fprintf(stderr, "Got HCINTR intr 1, GINTSTS = %08x\n", gintsts.d32);
1265
1266 /* Disable HCINTs */
1267 dwc_write_reg32(&hc_regs->hcintmsk, 0x0000);
1268
1269 /* Disable HAINTs */
1270 dwc_write_reg32(&hc_global_regs->haintmsk, 0x0000);
1271
1272 /* Read HAINT */
1273 haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
1274 //fprintf(stderr, "HAINT: %08x\n", haint.d32);
1275
1276 /* Read HCINT */
1277 hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
1278 //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
1279
1280 /* Read HCCHAR */
1281 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1282 //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
1283
1284 /* Clear HCINT */
1285 dwc_write_reg32(&hc_regs->hcint, hcint.d32);
1286
1287 /* Clear HAINT */
1288 dwc_write_reg32(&hc_global_regs->haint, haint.d32);
1289
1290 /* Clear GINTSTS */
1291 dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
1292
1293 /* Read GINTSTS */
1294 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1295 //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
1296 }
1297
1298 static void do_in_ack(void)
1299 {
1300 gintsts_data_t gintsts;
1301 hctsiz_data_t hctsiz;
1302 hcchar_data_t hcchar;
1303 haint_data_t haint;
1304 hcint_data_t hcint;
1305 host_grxsts_data_t grxsts;
1306
1307 /* Enable HAINTs */
1308 dwc_write_reg32(&hc_global_regs->haintmsk, 0x0001);
1309
1310 /* Enable HCINTs */
1311 dwc_write_reg32(&hc_regs->hcintmsk, 0x04a3);
1312
1313 /* Read GINTSTS */
1314 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1315 //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
1316
1317 /* Read HAINT */
1318 haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
1319 //fprintf(stderr, "HAINT: %08x\n", haint.d32);
1320
1321 /* Read HCINT */
1322 hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
1323 //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
1324
1325 /* Read HCCHAR */
1326 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1327 //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
1328
1329 /* Clear HCINT */
1330 dwc_write_reg32(&hc_regs->hcint, hcint.d32);
1331
1332 /* Clear HAINT */
1333 dwc_write_reg32(&hc_global_regs->haint, haint.d32);
1334
1335 /* Clear GINTSTS */
1336 dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
1337
1338 /* Read GINTSTS */
1339 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1340 //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
1341
1342 /*
1343 * Receive Control In packet
1344 */
1345
1346 /* Make sure channel is disabled */
1347 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1348 if (hcchar.b.chen) {
1349 //fprintf(stderr, "Channel already enabled 2, HCCHAR = %08x\n", hcchar.d32);
1350 hcchar.b.chdis = 1;
1351 hcchar.b.chen = 1;
1352 dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
1353 //sleep(1);
1354 mdelay(1000);
1355
1356 /* Read GINTSTS */
1357 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1358 //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
1359
1360 /* Read HAINT */
1361 haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
1362 //fprintf(stderr, "HAINT: %08x\n", haint.d32);
1363
1364 /* Read HCINT */
1365 hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
1366 //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
1367
1368 /* Read HCCHAR */
1369 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1370 //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
1371
1372 /* Clear HCINT */
1373 dwc_write_reg32(&hc_regs->hcint, hcint.d32);
1374
1375 /* Clear HAINT */
1376 dwc_write_reg32(&hc_global_regs->haint, haint.d32);
1377
1378 /* Clear GINTSTS */
1379 dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
1380
1381 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1382 //if (hcchar.b.chen) {
1383 // fprintf(stderr, "** Channel _still_ enabled 2, HCCHAR = %08x **\n", hcchar.d32);
1384 //}
1385 }
1386
1387 /* Set HCTSIZ */
1388 hctsiz.d32 = 0;
1389 hctsiz.b.xfersize = 8;
1390 hctsiz.b.pktcnt = 1;
1391 hctsiz.b.pid = DWC_OTG_HC_PID_DATA1;
1392 dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32);
1393
1394 /* Set HCCHAR */
1395 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1396 hcchar.b.eptype = DWC_OTG_EP_TYPE_CONTROL;
1397 hcchar.b.epdir = 1;
1398 hcchar.b.epnum = 0;
1399 hcchar.b.mps = 8;
1400 hcchar.b.chen = 1;
1401 dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
1402
1403 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1404 //fprintf(stderr, "Waiting for RXSTSQLVL intr 1, GINTSTS = %08x\n", gintsts.d32);
1405
1406 /* Wait for receive status queue interrupt */
1407 do {
1408 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1409 } while (gintsts.b.rxstsqlvl == 0);
1410
1411 //fprintf(stderr, "Got RXSTSQLVL intr 1, GINTSTS = %08x\n", gintsts.d32);
1412
1413 /* Read RXSTS */
1414 grxsts.d32 = dwc_read_reg32(&global_regs->grxstsp);
1415 //fprintf(stderr, "GRXSTS: %08x\n", grxsts.d32);
1416
1417 /* Clear RXSTSQLVL in GINTSTS */
1418 gintsts.d32 = 0;
1419 gintsts.b.rxstsqlvl = 1;
1420 dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
1421
1422 switch (grxsts.b.pktsts) {
1423 case DWC_GRXSTS_PKTSTS_IN:
1424 /* Read the data into the host buffer */
1425 if (grxsts.b.bcnt > 0) {
1426 int i;
1427 int word_count = (grxsts.b.bcnt + 3) / 4;
1428
1429 data_fifo = (uint32_t *)((char *)global_regs + 0x1000);
1430
1431 for (i = 0; i < word_count; i++) {
1432 (void)dwc_read_reg32(data_fifo++);
1433 }
1434 }
1435
1436 //fprintf(stderr, "Received %u bytes\n", (unsigned)grxsts.b.bcnt);
1437 break;
1438
1439 default:
1440 //fprintf(stderr, "** Unexpected GRXSTS packet status 1 **\n");
1441 break;
1442 }
1443
1444 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1445 //fprintf(stderr, "Waiting for RXSTSQLVL intr 2, GINTSTS = %08x\n", gintsts.d32);
1446
1447 /* Wait for receive status queue interrupt */
1448 do {
1449 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1450 } while (gintsts.b.rxstsqlvl == 0);
1451
1452 //fprintf(stderr, "Got RXSTSQLVL intr 2, GINTSTS = %08x\n", gintsts.d32);
1453
1454 /* Read RXSTS */
1455 grxsts.d32 = dwc_read_reg32(&global_regs->grxstsp);
1456 //fprintf(stderr, "GRXSTS: %08x\n", grxsts.d32);
1457
1458 /* Clear RXSTSQLVL in GINTSTS */
1459 gintsts.d32 = 0;
1460 gintsts.b.rxstsqlvl = 1;
1461 dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
1462
1463 switch (grxsts.b.pktsts) {
1464 case DWC_GRXSTS_PKTSTS_IN_XFER_COMP:
1465 break;
1466
1467 default:
1468 //fprintf(stderr, "** Unexpected GRXSTS packet status 2 **\n");
1469 break;
1470 }
1471
1472 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1473 //fprintf(stderr, "Waiting for HCINTR intr 2, GINTSTS = %08x\n", gintsts.d32);
1474
1475 /* Wait for host channel interrupt */
1476 do {
1477 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1478 } while (gintsts.b.hcintr == 0);
1479
1480 //fprintf(stderr, "Got HCINTR intr 2, GINTSTS = %08x\n", gintsts.d32);
1481
1482 /* Read HAINT */
1483 haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
1484 //fprintf(stderr, "HAINT: %08x\n", haint.d32);
1485
1486 /* Read HCINT */
1487 hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
1488 //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
1489
1490 /* Read HCCHAR */
1491 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1492 //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
1493
1494 /* Clear HCINT */
1495 dwc_write_reg32(&hc_regs->hcint, hcint.d32);
1496
1497 /* Clear HAINT */
1498 dwc_write_reg32(&hc_global_regs->haint, haint.d32);
1499
1500 /* Clear GINTSTS */
1501 dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
1502
1503 /* Read GINTSTS */
1504 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1505 //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
1506
1507 // usleep(100000);
1508 // mdelay(100);
1509 mdelay(1);
1510
1511 /*
1512 * Send handshake packet
1513 */
1514
1515 /* Read HAINT */
1516 haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
1517 //fprintf(stderr, "HAINT: %08x\n", haint.d32);
1518
1519 /* Read HCINT */
1520 hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
1521 //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
1522
1523 /* Read HCCHAR */
1524 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1525 //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
1526
1527 /* Clear HCINT */
1528 dwc_write_reg32(&hc_regs->hcint, hcint.d32);
1529
1530 /* Clear HAINT */
1531 dwc_write_reg32(&hc_global_regs->haint, haint.d32);
1532
1533 /* Clear GINTSTS */
1534 dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
1535
1536 /* Read GINTSTS */
1537 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1538 //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
1539
1540 /* Make sure channel is disabled */
1541 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1542 if (hcchar.b.chen) {
1543 //fprintf(stderr, "Channel already enabled 3, HCCHAR = %08x\n", hcchar.d32);
1544 hcchar.b.chdis = 1;
1545 hcchar.b.chen = 1;
1546 dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
1547 //sleep(1);
1548 mdelay(1000);
1549
1550 /* Read GINTSTS */
1551 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1552 //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
1553
1554 /* Read HAINT */
1555 haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
1556 //fprintf(stderr, "HAINT: %08x\n", haint.d32);
1557
1558 /* Read HCINT */
1559 hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
1560 //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
1561
1562 /* Read HCCHAR */
1563 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1564 //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
1565
1566 /* Clear HCINT */
1567 dwc_write_reg32(&hc_regs->hcint, hcint.d32);
1568
1569 /* Clear HAINT */
1570 dwc_write_reg32(&hc_global_regs->haint, haint.d32);
1571
1572 /* Clear GINTSTS */
1573 dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
1574
1575 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1576 //if (hcchar.b.chen) {
1577 // fprintf(stderr, "** Channel _still_ enabled 3, HCCHAR = %08x **\n", hcchar.d32);
1578 //}
1579 }
1580
1581 /* Set HCTSIZ */
1582 hctsiz.d32 = 0;
1583 hctsiz.b.xfersize = 0;
1584 hctsiz.b.pktcnt = 1;
1585 hctsiz.b.pid = DWC_OTG_HC_PID_DATA1;
1586 dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32);
1587
1588 /* Set HCCHAR */
1589 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1590 hcchar.b.eptype = DWC_OTG_EP_TYPE_CONTROL;
1591 hcchar.b.epdir = 0;
1592 hcchar.b.epnum = 0;
1593 hcchar.b.mps = 8;
1594 hcchar.b.chen = 1;
1595 dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
1596
1597 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1598 //fprintf(stderr, "Waiting for HCINTR intr 3, GINTSTS = %08x\n", gintsts.d32);
1599
1600 /* Wait for host channel interrupt */
1601 do {
1602 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1603 } while (gintsts.b.hcintr == 0);
1604
1605 //fprintf(stderr, "Got HCINTR intr 3, GINTSTS = %08x\n", gintsts.d32);
1606
1607 /* Disable HCINTs */
1608 dwc_write_reg32(&hc_regs->hcintmsk, 0x0000);
1609
1610 /* Disable HAINTs */
1611 dwc_write_reg32(&hc_global_regs->haintmsk, 0x0000);
1612
1613 /* Read HAINT */
1614 haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
1615 //fprintf(stderr, "HAINT: %08x\n", haint.d32);
1616
1617 /* Read HCINT */
1618 hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
1619 //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
1620
1621 /* Read HCCHAR */
1622 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1623 //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
1624
1625 /* Clear HCINT */
1626 dwc_write_reg32(&hc_regs->hcint, hcint.d32);
1627
1628 /* Clear HAINT */
1629 dwc_write_reg32(&hc_global_regs->haint, haint.d32);
1630
1631 /* Clear GINTSTS */
1632 dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
1633
1634 /* Read GINTSTS */
1635 gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
1636 //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
1637 }
1638 #endif /* DWC_HS_ELECT_TST */
1639
1640 /** Handles hub class-specific requests. */
1641 int dwc_otg_hcd_hub_control(struct usb_hcd *hcd,
1642 u16 typeReq,
1643 u16 wValue,
1644 u16 wIndex,
1645 char *buf,
1646 u16 wLength)
1647 {
1648 int retval = 0;
1649
1650 dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
1651 dwc_otg_core_if_t *core_if = hcd_to_dwc_otg_hcd(hcd)->core_if;
1652 struct usb_hub_descriptor *desc;
1653 hprt0_data_t hprt0 = {.d32 = 0};
1654
1655 uint32_t port_status;
1656
1657 switch (typeReq) {
1658 case ClearHubFeature:
1659 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1660 "ClearHubFeature 0x%x\n", wValue);
1661 switch (wValue) {
1662 case C_HUB_LOCAL_POWER:
1663 case C_HUB_OVER_CURRENT:
1664 /* Nothing required here */
1665 break;
1666 default:
1667 retval = -EINVAL;
1668 DWC_ERROR("DWC OTG HCD - "
1669 "ClearHubFeature request %xh unknown\n", wValue);
1670 }
1671 break;
1672 case ClearPortFeature:
1673 if (!wIndex || wIndex > 1)
1674 goto error;
1675
1676 switch (wValue) {
1677 case USB_PORT_FEAT_ENABLE:
1678 DWC_DEBUGPL(DBG_ANY, "DWC OTG HCD HUB CONTROL - "
1679 "ClearPortFeature USB_PORT_FEAT_ENABLE\n");
1680 hprt0.d32 = dwc_otg_read_hprt0(core_if);
1681 hprt0.b.prtena = 1;
1682 dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
1683 break;
1684 case USB_PORT_FEAT_SUSPEND:
1685 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1686 "ClearPortFeature USB_PORT_FEAT_SUSPEND\n");
1687 hprt0.d32 = dwc_otg_read_hprt0(core_if);
1688 hprt0.b.prtres = 1;
1689 dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
1690 /* Clear Resume bit */
1691 mdelay(100);
1692 hprt0.b.prtres = 0;
1693 dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
1694 break;
1695 case USB_PORT_FEAT_POWER:
1696 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1697 "ClearPortFeature USB_PORT_FEAT_POWER\n");
1698 hprt0.d32 = dwc_otg_read_hprt0(core_if);
1699 hprt0.b.prtpwr = 0;
1700 dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
1701 break;
1702 case USB_PORT_FEAT_INDICATOR:
1703 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1704 "ClearPortFeature USB_PORT_FEAT_INDICATOR\n");
1705 /* Port inidicator not supported */
1706 break;
1707 case USB_PORT_FEAT_C_CONNECTION:
1708 /* Clears drivers internal connect status change
1709 * flag */
1710 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1711 "ClearPortFeature USB_PORT_FEAT_C_CONNECTION\n");
1712 dwc_otg_hcd->flags.b.port_connect_status_change = 0;
1713 break;
1714 case USB_PORT_FEAT_C_RESET:
1715 /* Clears the driver's internal Port Reset Change
1716 * flag */
1717 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1718 "ClearPortFeature USB_PORT_FEAT_C_RESET\n");
1719 dwc_otg_hcd->flags.b.port_reset_change = 0;
1720 break;
1721 case USB_PORT_FEAT_C_ENABLE:
1722 /* Clears the driver's internal Port
1723 * Enable/Disable Change flag */
1724 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1725 "ClearPortFeature USB_PORT_FEAT_C_ENABLE\n");
1726 dwc_otg_hcd->flags.b.port_enable_change = 0;
1727 break;
1728 case USB_PORT_FEAT_C_SUSPEND:
1729 /* Clears the driver's internal Port Suspend
1730 * Change flag, which is set when resume signaling on
1731 * the host port is complete */
1732 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1733 "ClearPortFeature USB_PORT_FEAT_C_SUSPEND\n");
1734 dwc_otg_hcd->flags.b.port_suspend_change = 0;
1735 break;
1736 case USB_PORT_FEAT_C_OVER_CURRENT:
1737 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1738 "ClearPortFeature USB_PORT_FEAT_C_OVER_CURRENT\n");
1739 dwc_otg_hcd->flags.b.port_over_current_change = 0;
1740 break;
1741 default:
1742 retval = -EINVAL;
1743 DWC_ERROR("DWC OTG HCD - "
1744 "ClearPortFeature request %xh "
1745 "unknown or unsupported\n", wValue);
1746 }
1747 break;
1748 case GetHubDescriptor:
1749 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1750 "GetHubDescriptor\n");
1751 desc = (struct usb_hub_descriptor *)buf;
1752 desc->bDescLength = 9;
1753 desc->bDescriptorType = 0x29;
1754 desc->bNbrPorts = 1;
1755 desc->wHubCharacteristics = 0x08;
1756 desc->bPwrOn2PwrGood = 1;
1757 desc->bHubContrCurrent = 0;
1758 desc->u.hs.DeviceRemovable[0] = 0;
1759 desc->u.hs.DeviceRemovable[1] = 0xff;
1760 break;
1761 case GetHubStatus:
1762 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1763 "GetHubStatus\n");
1764 memset(buf, 0, 4);
1765 break;
1766 case GetPortStatus:
1767 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1768 "GetPortStatus\n");
1769
1770 if (!wIndex || wIndex > 1)
1771 goto error;
1772
1773 port_status = 0;
1774
1775 if (dwc_otg_hcd->flags.b.port_connect_status_change)
1776 port_status |= (1 << USB_PORT_FEAT_C_CONNECTION);
1777
1778 if (dwc_otg_hcd->flags.b.port_enable_change)
1779 port_status |= (1 << USB_PORT_FEAT_C_ENABLE);
1780
1781 if (dwc_otg_hcd->flags.b.port_suspend_change)
1782 port_status |= (1 << USB_PORT_FEAT_C_SUSPEND);
1783
1784 if (dwc_otg_hcd->flags.b.port_reset_change)
1785 port_status |= (1 << USB_PORT_FEAT_C_RESET);
1786
1787 if (dwc_otg_hcd->flags.b.port_over_current_change) {
1788 DWC_ERROR("Device Not Supported\n");
1789 port_status |= (1 << USB_PORT_FEAT_C_OVER_CURRENT);
1790 }
1791
1792 if (!dwc_otg_hcd->flags.b.port_connect_status) {
1793 /*
1794 * The port is disconnected, which means the core is
1795 * either in device mode or it soon will be. Just
1796 * return 0's for the remainder of the port status
1797 * since the port register can't be read if the core
1798 * is in device mode.
1799 */
1800 *((__le32 *) buf) = cpu_to_le32(port_status);
1801 break;
1802 }
1803
1804 hprt0.d32 = dwc_read_reg32(core_if->host_if->hprt0);
1805 DWC_DEBUGPL(DBG_HCDV, " HPRT0: 0x%08x\n", hprt0.d32);
1806
1807 if (hprt0.b.prtconnsts)
1808 port_status |= (1 << USB_PORT_FEAT_CONNECTION);
1809
1810 if (hprt0.b.prtena)
1811 port_status |= (1 << USB_PORT_FEAT_ENABLE);
1812
1813 if (hprt0.b.prtsusp)
1814 port_status |= (1 << USB_PORT_FEAT_SUSPEND);
1815
1816 if (hprt0.b.prtovrcurract)
1817 port_status |= (1 << USB_PORT_FEAT_OVER_CURRENT);
1818
1819 if (hprt0.b.prtrst)
1820 port_status |= (1 << USB_PORT_FEAT_RESET);
1821
1822 if (hprt0.b.prtpwr)
1823 port_status |= (1 << USB_PORT_FEAT_POWER);
1824
1825 if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_HIGH_SPEED)
1826 port_status |= (USB_PORT_STAT_HIGH_SPEED);
1827 else if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_LOW_SPEED)
1828 port_status |= (USB_PORT_STAT_LOW_SPEED);
1829
1830 if (hprt0.b.prttstctl)
1831 port_status |= (1 << USB_PORT_FEAT_TEST);
1832
1833 /* USB_PORT_FEAT_INDICATOR unsupported always 0 */
1834
1835 *((__le32 *) buf) = cpu_to_le32(port_status);
1836
1837 break;
1838 case SetHubFeature:
1839 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1840 "SetHubFeature\n");
1841 /* No HUB features supported */
1842 break;
1843 case SetPortFeature:
1844 if (wValue != USB_PORT_FEAT_TEST && (!wIndex || wIndex > 1))
1845 goto error;
1846
1847 if (!dwc_otg_hcd->flags.b.port_connect_status) {
1848 /*
1849 * The port is disconnected, which means the core is
1850 * either in device mode or it soon will be. Just
1851 * return without doing anything since the port
1852 * register can't be written if the core is in device
1853 * mode.
1854 */
1855 break;
1856 }
1857
1858 switch (wValue) {
1859 case USB_PORT_FEAT_SUSPEND:
1860 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1861 "SetPortFeature - USB_PORT_FEAT_SUSPEND\n");
1862 if (hcd->self.otg_port == wIndex &&
1863 hcd->self.b_hnp_enable) {
1864 gotgctl_data_t gotgctl = {.d32=0};
1865 gotgctl.b.hstsethnpen = 1;
1866 dwc_modify_reg32(&core_if->core_global_regs->gotgctl,
1867 0, gotgctl.d32);
1868 core_if->op_state = A_SUSPEND;
1869 }
1870 hprt0.d32 = dwc_otg_read_hprt0(core_if);
1871 hprt0.b.prtsusp = 1;
1872 dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
1873 //DWC_PRINT("SUSPEND: HPRT0=%0x\n", hprt0.d32);
1874 /* Suspend the Phy Clock */
1875 {
1876 pcgcctl_data_t pcgcctl = {.d32=0};
1877 pcgcctl.b.stoppclk = 1;
1878 dwc_write_reg32(core_if->pcgcctl, pcgcctl.d32);
1879 }
1880
1881 /* For HNP the bus must be suspended for at least 200ms. */
1882 if (hcd->self.b_hnp_enable) {
1883 mdelay(200);
1884 //DWC_PRINT("SUSPEND: wait complete! (%d)\n", _hcd->state);
1885 }
1886 break;
1887 case USB_PORT_FEAT_POWER:
1888 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1889 "SetPortFeature - USB_PORT_FEAT_POWER\n");
1890 hprt0.d32 = dwc_otg_read_hprt0(core_if);
1891 hprt0.b.prtpwr = 1;
1892 dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
1893 break;
1894 case USB_PORT_FEAT_RESET:
1895 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1896 "SetPortFeature - USB_PORT_FEAT_RESET\n");
1897 hprt0.d32 = dwc_otg_read_hprt0(core_if);
1898 /* When B-Host the Port reset bit is set in
1899 * the Start HCD Callback function, so that
1900 * the reset is started within 1ms of the HNP
1901 * success interrupt. */
1902 if (!hcd->self.is_b_host) {
1903 hprt0.b.prtrst = 1;
1904 dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
1905 }
1906 /* Clear reset bit in 10ms (FS/LS) or 50ms (HS) */
1907 MDELAY(60);
1908 hprt0.b.prtrst = 0;
1909 dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
1910 break;
1911
1912 #ifdef DWC_HS_ELECT_TST
1913 case USB_PORT_FEAT_TEST:
1914 {
1915 uint32_t t;
1916 gintmsk_data_t gintmsk;
1917
1918 t = (wIndex >> 8); /* MSB wIndex USB */
1919 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
1920 "SetPortFeature - USB_PORT_FEAT_TEST %d\n", t);
1921 warn("USB_PORT_FEAT_TEST %d\n", t);
1922 if (t < 6) {
1923 hprt0.d32 = dwc_otg_read_hprt0(core_if);
1924 hprt0.b.prttstctl = t;
1925 dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
1926 } else {
1927 /* Setup global vars with reg addresses (quick and
1928 * dirty hack, should be cleaned up)
1929 */
1930 global_regs = core_if->core_global_regs;
1931 hc_global_regs = core_if->host_if->host_global_regs;
1932 hc_regs = (dwc_otg_hc_regs_t *)((char *)global_regs + 0x500);
1933 data_fifo = (uint32_t *)((char *)global_regs + 0x1000);
1934
1935 if (t == 6) { /* HS_HOST_PORT_SUSPEND_RESUME */
1936 /* Save current interrupt mask */
1937 gintmsk.d32 = dwc_read_reg32(&global_regs->gintmsk);
1938
1939 /* Disable all interrupts while we muck with
1940 * the hardware directly
1941 */
1942 dwc_write_reg32(&global_regs->gintmsk, 0);
1943
1944 /* 15 second delay per the test spec */
1945 mdelay(15000);
1946
1947 /* Drive suspend on the root port */
1948 hprt0.d32 = dwc_otg_read_hprt0(core_if);
1949 hprt0.b.prtsusp = 1;
1950 hprt0.b.prtres = 0;
1951 dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
1952
1953 /* 15 second delay per the test spec */
1954 mdelay(15000);
1955
1956 /* Drive resume on the root port */
1957 hprt0.d32 = dwc_otg_read_hprt0(core_if);
1958 hprt0.b.prtsusp = 0;
1959 hprt0.b.prtres = 1;
1960 dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
1961 mdelay(100);
1962
1963 /* Clear the resume bit */
1964 hprt0.b.prtres = 0;
1965 dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
1966
1967 /* Restore interrupts */
1968 dwc_write_reg32(&global_regs->gintmsk, gintmsk.d32);
1969 } else if (t == 7) { /* SINGLE_STEP_GET_DEVICE_DESCRIPTOR setup */
1970 /* Save current interrupt mask */
1971 gintmsk.d32 = dwc_read_reg32(&global_regs->gintmsk);
1972
1973 /* Disable all interrupts while we muck with
1974 * the hardware directly
1975 */
1976 dwc_write_reg32(&global_regs->gintmsk, 0);
1977
1978 /* 15 second delay per the test spec */
1979 mdelay(15000);
1980
1981 /* Send the Setup packet */
1982 do_setup();
1983
1984 /* 15 second delay so nothing else happens for awhile */
1985 mdelay(15000);
1986
1987 /* Restore interrupts */
1988 dwc_write_reg32(&global_regs->gintmsk, gintmsk.d32);
1989 } else if (t == 8) { /* SINGLE_STEP_GET_DEVICE_DESCRIPTOR execute */
1990 /* Save current interrupt mask */
1991 gintmsk.d32 = dwc_read_reg32(&global_regs->gintmsk);
1992
1993 /* Disable all interrupts while we muck with
1994 * the hardware directly
1995 */
1996 dwc_write_reg32(&global_regs->gintmsk, 0);
1997
1998 /* Send the Setup packet */
1999 do_setup();
2000
2001 /* 15 second delay so nothing else happens for awhile */
2002 mdelay(15000);
2003
2004 /* Send the In and Ack packets */
2005 do_in_ack();
2006
2007 /* 15 second delay so nothing else happens for awhile */
2008 mdelay(15000);
2009
2010 /* Restore interrupts */
2011 dwc_write_reg32(&global_regs->gintmsk, gintmsk.d32);
2012 }
2013 }
2014 break;
2015 }
2016 #endif /* DWC_HS_ELECT_TST */
2017
2018 case USB_PORT_FEAT_INDICATOR:
2019 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
2020 "SetPortFeature - USB_PORT_FEAT_INDICATOR\n");
2021 /* Not supported */
2022 break;
2023 default:
2024 retval = -EINVAL;
2025 DWC_ERROR("DWC OTG HCD - "
2026 "SetPortFeature request %xh "
2027 "unknown or unsupported\n", wValue);
2028 break;
2029 }
2030 break;
2031 default:
2032 error:
2033 retval = -EINVAL;
2034 DWC_WARN("DWC OTG HCD - "
2035 "Unknown hub control request type or invalid typeReq: %xh wIndex: %xh wValue: %xh\n",
2036 typeReq, wIndex, wValue);
2037 break;
2038 }
2039
2040 return retval;
2041 }
2042
2043 /**
2044 * Assigns transactions from a QTD to a free host channel and initializes the
2045 * host channel to perform the transactions. The host channel is removed from
2046 * the free list.
2047 *
2048 * @param hcd The HCD state structure.
2049 * @param qh Transactions from the first QTD for this QH are selected and
2050 * assigned to a free host channel.
2051 */
2052 static void assign_and_init_hc(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh)
2053 {
2054 dwc_hc_t *hc;
2055 dwc_otg_qtd_t *qtd;
2056 struct urb *urb;
2057
2058 DWC_DEBUGPL(DBG_HCD_FLOOD, "%s(%p,%p)\n", __func__, hcd, qh);
2059 hc = list_entry(hcd->free_hc_list.next, dwc_hc_t, hc_list_entry);
2060
2061 qtd = list_entry(qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry);
2062 urb = qtd->urb;
2063
2064 if (!urb){
2065 return;
2066 }
2067
2068 /* Remove the host channel from the free list. */
2069 list_del_init(&hc->hc_list_entry);
2070
2071 qh->channel = hc;
2072 qh->qtd_in_process = qtd;
2073
2074 /*
2075 * Use usb_pipedevice to determine device address. This address is
2076 * 0 before the SET_ADDRESS command and the correct address afterward.
2077 */
2078 hc->dev_addr = usb_pipedevice(urb->pipe);
2079 hc->ep_num = usb_pipeendpoint(urb->pipe);
2080
2081 if (urb->dev->speed == USB_SPEED_LOW) {
2082 hc->speed = DWC_OTG_EP_SPEED_LOW;
2083 } else if (urb->dev->speed == USB_SPEED_FULL) {
2084 hc->speed = DWC_OTG_EP_SPEED_FULL;
2085 } else {
2086 hc->speed = DWC_OTG_EP_SPEED_HIGH;
2087 }
2088
2089 hc->max_packet = dwc_max_packet(qh->maxp);
2090
2091 hc->xfer_started = 0;
2092 hc->halt_status = DWC_OTG_HC_XFER_NO_HALT_STATUS;
2093 hc->error_state = (qtd->error_count > 0);
2094 hc->halt_on_queue = 0;
2095 hc->halt_pending = 0;
2096 hc->requests = 0;
2097
2098 /*
2099 * The following values may be modified in the transfer type section
2100 * below. The xfer_len value may be reduced when the transfer is
2101 * started to accommodate the max widths of the XferSize and PktCnt
2102 * fields in the HCTSIZn register.
2103 */
2104 hc->do_ping = qh->ping_state;
2105 hc->ep_is_in = (usb_pipein(urb->pipe) != 0);
2106 hc->data_pid_start = qh->data_toggle;
2107 hc->multi_count = 1;
2108
2109 if (hcd->core_if->dma_enable) {
2110 hc->xfer_buff = (uint8_t *)urb->transfer_dma + urb->actual_length;
2111 } else {
2112 hc->xfer_buff = (uint8_t *)urb->transfer_buffer + urb->actual_length;
2113 }
2114 hc->xfer_len = urb->transfer_buffer_length - urb->actual_length;
2115 hc->xfer_count = 0;
2116
2117 /*
2118 * Set the split attributes
2119 */
2120 hc->do_split = 0;
2121 if (qh->do_split) {
2122 hc->do_split = 1;
2123 hc->xact_pos = qtd->isoc_split_pos;
2124 hc->complete_split = qtd->complete_split;
2125 hc->hub_addr = urb->dev->tt->hub->devnum;
2126 hc->port_addr = urb->dev->ttport;
2127 }
2128
2129 switch (usb_pipetype(urb->pipe)) {
2130 case PIPE_CONTROL:
2131 hc->ep_type = DWC_OTG_EP_TYPE_CONTROL;
2132 switch (qtd->control_phase) {
2133 case DWC_OTG_CONTROL_SETUP:
2134 DWC_DEBUGPL(DBG_HCDV, " Control setup transaction\n");
2135 hc->do_ping = 0;
2136 hc->ep_is_in = 0;
2137 hc->data_pid_start = DWC_OTG_HC_PID_SETUP;
2138 if (hcd->core_if->dma_enable) {
2139 hc->xfer_buff = (uint8_t *)urb->setup_dma;
2140 } else {
2141 hc->xfer_buff = (uint8_t *)urb->setup_packet;
2142 }
2143 hc->xfer_len = 8;
2144 break;
2145 case DWC_OTG_CONTROL_DATA:
2146 DWC_DEBUGPL(DBG_HCDV, " Control data transaction\n");
2147 hc->data_pid_start = qtd->data_toggle;
2148 break;
2149 case DWC_OTG_CONTROL_STATUS:
2150 /*
2151 * Direction is opposite of data direction or IN if no
2152 * data.
2153 */
2154 DWC_DEBUGPL(DBG_HCDV, " Control status transaction\n");
2155 if (urb->transfer_buffer_length == 0) {
2156 hc->ep_is_in = 1;
2157 } else {
2158 hc->ep_is_in = (usb_pipein(urb->pipe) != USB_DIR_IN);
2159 }
2160 if (hc->ep_is_in) {
2161 hc->do_ping = 0;
2162 }
2163 hc->data_pid_start = DWC_OTG_HC_PID_DATA1;
2164 hc->xfer_len = 0;
2165 if (hcd->core_if->dma_enable) {
2166 hc->xfer_buff = (uint8_t *)hcd->status_buf_dma;
2167 } else {
2168 hc->xfer_buff = (uint8_t *)hcd->status_buf;
2169 }
2170 break;
2171 }
2172 break;
2173 case PIPE_BULK:
2174 hc->ep_type = DWC_OTG_EP_TYPE_BULK;
2175 break;
2176 case PIPE_INTERRUPT:
2177 hc->ep_type = DWC_OTG_EP_TYPE_INTR;
2178 break;
2179 case PIPE_ISOCHRONOUS:
2180 {
2181 struct usb_iso_packet_descriptor *frame_desc;
2182 frame_desc = &urb->iso_frame_desc[qtd->isoc_frame_index];
2183 hc->ep_type = DWC_OTG_EP_TYPE_ISOC;
2184 if (hcd->core_if->dma_enable) {
2185 hc->xfer_buff = (uint8_t *)urb->transfer_dma;
2186 } else {
2187 hc->xfer_buff = (uint8_t *)urb->transfer_buffer;
2188 }
2189 hc->xfer_buff += frame_desc->offset + qtd->isoc_split_offset;
2190 hc->xfer_len = frame_desc->length - qtd->isoc_split_offset;
2191
2192 if (hc->xact_pos == DWC_HCSPLIT_XACTPOS_ALL) {
2193 if (hc->xfer_len <= 188) {
2194 hc->xact_pos = DWC_HCSPLIT_XACTPOS_ALL;
2195 }
2196 else {
2197 hc->xact_pos = DWC_HCSPLIT_XACTPOS_BEGIN;
2198 }
2199 }
2200 }
2201 break;
2202 }
2203
2204 if (hc->ep_type == DWC_OTG_EP_TYPE_INTR ||
2205 hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
2206 /*
2207 * This value may be modified when the transfer is started to
2208 * reflect the actual transfer length.
2209 */
2210 hc->multi_count = dwc_hb_mult(qh->maxp);
2211 }
2212
2213 dwc_otg_hc_init(hcd->core_if, hc);
2214 hc->qh = qh;
2215 }
2216
2217 /**
2218 * This function selects transactions from the HCD transfer schedule and
2219 * assigns them to available host channels. It is called from HCD interrupt
2220 * handler functions.
2221 *
2222 * @param hcd The HCD state structure.
2223 *
2224 * @return The types of new transactions that were assigned to host channels.
2225 */
2226 dwc_otg_transaction_type_e dwc_otg_hcd_select_transactions(dwc_otg_hcd_t *hcd)
2227 {
2228 struct list_head *qh_ptr;
2229 dwc_otg_qh_t *qh = NULL;
2230 int num_channels;
2231 dwc_otg_transaction_type_e ret_val = DWC_OTG_TRANSACTION_NONE;
2232 uint16_t cur_frame = dwc_otg_hcd_get_frame_number(dwc_otg_hcd_to_hcd(hcd));
2233 unsigned long flags;
2234 int include_nakd, channels_full;
2235 /* This condition has once been observed, but the cause was
2236 * never determined. Check for it here, to collect debug data if
2237 * it occurs again. */
2238 WARN_ON_ONCE(hcd->non_periodic_channels < 0);
2239 check_nakking(hcd, __FUNCTION__, "start");
2240
2241 #ifdef DEBUG_SOF
2242 DWC_DEBUGPL(DBG_HCD, " Select Transactions\n");
2243 #endif
2244
2245 SPIN_LOCK_IRQSAVE(&hcd->lock, flags);
2246 /* Process entries in the periodic ready list. */
2247 qh_ptr = hcd->periodic_sched_ready.next;
2248 while (qh_ptr != &hcd->periodic_sched_ready &&
2249 !list_empty(&hcd->free_hc_list)) {
2250
2251 qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list_entry);
2252 assign_and_init_hc(hcd, qh);
2253
2254 /*
2255 * Move the QH from the periodic ready schedule to the
2256 * periodic assigned schedule.
2257 */
2258 qh_ptr = qh_ptr->next;
2259 list_move(&qh->qh_list_entry, &hcd->periodic_sched_assigned);
2260
2261 ret_val = DWC_OTG_TRANSACTION_PERIODIC;
2262 }
2263
2264 /*
2265 * Process entries in the inactive portion of the non-periodic
2266 * schedule. Some free host channels may not be used if they are
2267 * reserved for periodic transfers.
2268 */
2269 num_channels = hcd->core_if->core_params->host_channels;
2270
2271 /* Go over the queue twice: Once while not including nak'd
2272 * entries, one while including them. This is so a retransmit of
2273 * an entry that has received a nak is scheduled only after all
2274 * new entries.
2275 */
2276 channels_full = 0;
2277 for (include_nakd = 0; include_nakd < 2 && !channels_full; ++include_nakd) {
2278 qh_ptr = hcd->non_periodic_sched_inactive.next;
2279 while (qh_ptr != &hcd->non_periodic_sched_inactive) {
2280 qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list_entry);
2281 qh_ptr = qh_ptr->next;
2282
2283 /* If a nak'd frame is in the queue for 100ms, forget
2284 * about its nak status, to prevent the situation where
2285 * a nak'd frame never gets resubmitted because there
2286 * are continously non-nakking tranfsfers available.
2287 */
2288 if (qh->nak_frame != 0xffff &&
2289 dwc_frame_num_gt(cur_frame, qh->nak_frame + 800))
2290 qh->nak_frame = 0xffff;
2291
2292 /* In the first pass, ignore NAK'd retransmit
2293 * alltogether, to give them lower priority. */
2294 if (!include_nakd && qh->nak_frame != 0xffff)
2295 continue;
2296
2297 /*
2298 * Check to see if this is a NAK'd retransmit, in which case ignore for retransmission
2299 * we hold off on bulk retransmissions to reduce NAK interrupt overhead for
2300 * cheeky devices that just hold off using NAKs
2301 */
2302 if (dwc_full_frame_num(qh->nak_frame) == dwc_full_frame_num(dwc_otg_hcd_get_frame_number(dwc_otg_hcd_to_hcd(hcd))))
2303 continue;
2304
2305 /* Ok, we found a candidate for scheduling. Is there a
2306 * free channel? */
2307 if (hcd->non_periodic_channels >=
2308 num_channels - hcd->periodic_channels ||
2309 list_empty(&hcd->free_hc_list)) {
2310 channels_full = 1;
2311 break;
2312 }
2313
2314 /* When retrying a NAK'd transfer, we give it a fair
2315 * chance of completing again. */
2316 qh->nak_frame = 0xffff;
2317 assign_and_init_hc(hcd, qh);
2318
2319 /*
2320 * Move the QH from the non-periodic inactive schedule to the
2321 * non-periodic active schedule.
2322 */
2323 list_move(&qh->qh_list_entry, &hcd->non_periodic_sched_active);
2324
2325 if (ret_val == DWC_OTG_TRANSACTION_NONE) {
2326 ret_val = DWC_OTG_TRANSACTION_NON_PERIODIC;
2327 } else {
2328 ret_val = DWC_OTG_TRANSACTION_ALL;
2329 }
2330
2331 hcd->non_periodic_channels++;
2332 }
2333 if (hcd->core_if->dma_enable && channels_full &&
2334 hcd->periodic_channels + hcd->nakking_channels >= num_channels) {
2335 /* There are items queued, but all channels are either
2336 * reserved for periodic or have received NAKs. This
2337 * means that it could take an indefinite amount of time
2338 * before a channel is actually freed (since in DMA
2339 * mode, the hardware takes care of retries), so we take
2340 * action here by forcing a nakking channel to halt to
2341 * give other transfers a chance to run. */
2342 dwc_otg_qtd_t *qtd = list_entry(qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry);
2343 struct urb *urb = qtd->urb;
2344 dwc_hc_t *hc = dwc_otg_halt_nakking_channel(hcd);
2345
2346 if (hc)
2347 DWC_DEBUGPL(DBG_HCD "Out of Host Channels for non-periodic transfer - Halting channel %d (dev %d ep%d%s) to service qh %p (dev %d ep%d%s)\n", hc->hc_num, hc->dev_addr, hc->ep_num, (hc->ep_is_in ? "in" : "out"), qh, usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe), (usb_pipein(urb->pipe) != 0) ? "in" : "out");
2348
2349 }
2350 }
2351
2352 SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags);
2353
2354 return ret_val;
2355 }
2356
2357 /**
2358 * Halt a bulk channel that is blocking on NAKs to free up space.
2359 *
2360 * This will decrement hcd->nakking_channels immediately, but
2361 * hcd->non_periodic_channels is not decremented until the channel is
2362 * actually halted.
2363 *
2364 * Returns the halted channel.
2365 */
2366 dwc_hc_t *dwc_otg_halt_nakking_channel(dwc_otg_hcd_t *hcd) {
2367 int num_channels, i;
2368 uint16_t cur_frame;
2369
2370 cur_frame = dwc_otg_hcd_get_frame_number(dwc_otg_hcd_to_hcd(hcd));
2371 num_channels = hcd->core_if->core_params->host_channels;
2372
2373 for (i = 0; i < num_channels; i++) {
2374 int channel = (hcd->last_channel_halted + 1 + i) % num_channels;
2375 dwc_hc_t *hc = hcd->hc_ptr_array[channel];
2376 if (hc->xfer_started
2377 && !hc->halt_on_queue
2378 && !hc->halt_pending
2379 && hc->qh->nak_frame != 0xffff) {
2380 dwc_otg_hc_halt(hcd, hc, DWC_OTG_HC_XFER_NAK);
2381 /* Store the last channel halted to
2382 * fairly rotate the channel to halt.
2383 * This prevent the scenario where there
2384 * are three blocking endpoints and only
2385 * two free host channels, where the
2386 * blocking endpoint that gets hc 3 will
2387 * never be halted, while the other two
2388 * endpoints will be fighting over the
2389 * other host channel. */
2390 hcd->last_channel_halted = channel;
2391 /* Update nak_frame, so this frame is
2392 * kept at low priority for a period of
2393 * time starting now. */
2394 hc->qh->nak_frame = cur_frame;
2395 return hc;
2396 }
2397 }
2398 dwc_otg_hcd_dump_state(hcd);
2399 return NULL;
2400 }
2401
2402 /**
2403 * Attempts to queue a single transaction request for a host channel
2404 * associated with either a periodic or non-periodic transfer. This function
2405 * assumes that there is space available in the appropriate request queue. For
2406 * an OUT transfer or SETUP transaction in Slave mode, it checks whether space
2407 * is available in the appropriate Tx FIFO.
2408 *
2409 * @param hcd The HCD state structure.
2410 * @param hc Host channel descriptor associated with either a periodic or
2411 * non-periodic transfer.
2412 * @param fifo_dwords_avail Number of DWORDs available in the periodic Tx
2413 * FIFO for periodic transfers or the non-periodic Tx FIFO for non-periodic
2414 * transfers.
2415 *
2416 * @return 1 if a request is queued and more requests may be needed to
2417 * complete the transfer, 0 if no more requests are required for this
2418 * transfer, -1 if there is insufficient space in the Tx FIFO.
2419 */
2420 static int queue_transaction(dwc_otg_hcd_t *hcd,
2421 dwc_hc_t *hc,
2422 uint16_t fifo_dwords_avail)
2423 {
2424 int retval;
2425
2426 if (hcd->core_if->dma_enable) {
2427 if (!hc->xfer_started) {
2428 dwc_otg_hc_start_transfer(hcd->core_if, hc);
2429 hc->qh->ping_state = 0;
2430 }
2431 retval = 0;
2432 } else if (hc->halt_pending) {
2433 /* Don't queue a request if the channel has been halted. */
2434 retval = 0;
2435 } else if (hc->halt_on_queue) {
2436 dwc_otg_hc_halt(hcd, hc, hc->halt_status);
2437 retval = 0;
2438 } else if (hc->do_ping) {
2439 if (!hc->xfer_started) {
2440 dwc_otg_hc_start_transfer(hcd->core_if, hc);
2441 }
2442 retval = 0;
2443 } else if (!hc->ep_is_in ||
2444 hc->data_pid_start == DWC_OTG_HC_PID_SETUP) {
2445 if ((fifo_dwords_avail * 4) >= hc->max_packet) {
2446 if (!hc->xfer_started) {
2447 dwc_otg_hc_start_transfer(hcd->core_if, hc);
2448 retval = 1;
2449 } else {
2450 retval = dwc_otg_hc_continue_transfer(hcd->core_if, hc);
2451 }
2452 } else {
2453 retval = -1;
2454 }
2455 } else {
2456 if (!hc->xfer_started) {
2457 dwc_otg_hc_start_transfer(hcd->core_if, hc);
2458 retval = 1;
2459 } else {
2460 retval = dwc_otg_hc_continue_transfer(hcd->core_if, hc);
2461 }
2462 }
2463
2464 return retval;
2465 }
2466
2467 /**
2468 * Processes active non-periodic channels and queues transactions for these
2469 * channels to the DWC_otg controller. After queueing transactions, the NP Tx
2470 * FIFO Empty interrupt is enabled if there are more transactions to queue as
2471 * NP Tx FIFO or request queue space becomes available. Otherwise, the NP Tx
2472 * FIFO Empty interrupt is disabled.
2473 */
2474 static void process_non_periodic_channels(dwc_otg_hcd_t *hcd)
2475 {
2476 gnptxsts_data_t tx_status;
2477 struct list_head *orig_qh_ptr;
2478 dwc_otg_qh_t *qh;
2479 int status;
2480 int no_queue_space = 0;
2481 int no_fifo_space = 0;
2482 int more_to_do = 0;
2483
2484 dwc_otg_core_global_regs_t *global_regs = hcd->core_if->core_global_regs;
2485
2486 DWC_DEBUGPL(DBG_HCDV, "Queue non-periodic transactions\n");
2487 #ifdef DEBUG
2488 tx_status.d32 = dwc_read_reg32(&global_regs->gnptxsts);
2489 DWC_DEBUGPL(DBG_HCDV, " NP Tx Req Queue Space Avail (before queue): %d\n",
2490 tx_status.b.nptxqspcavail);
2491 DWC_DEBUGPL(DBG_HCDV, " NP Tx FIFO Space Avail (before queue): %d\n",
2492 tx_status.b.nptxfspcavail);
2493 #endif
2494 /*
2495 * Keep track of the starting point. Skip over the start-of-list
2496 * entry.
2497 */
2498 if (hcd->non_periodic_qh_ptr == &hcd->non_periodic_sched_active) {
2499 hcd->non_periodic_qh_ptr = hcd->non_periodic_qh_ptr->next;
2500 }
2501 orig_qh_ptr = hcd->non_periodic_qh_ptr;
2502
2503 /*
2504 * Process once through the active list or until no more space is
2505 * available in the request queue or the Tx FIFO.
2506 */
2507 do {
2508 tx_status.d32 = dwc_read_reg32(&global_regs->gnptxsts);
2509 if (!hcd->core_if->dma_enable && tx_status.b.nptxqspcavail == 0) {
2510 no_queue_space = 1;
2511 break;
2512 }
2513
2514 qh = list_entry(hcd->non_periodic_qh_ptr, dwc_otg_qh_t, qh_list_entry);
2515 status = queue_transaction(hcd, qh->channel, tx_status.b.nptxfspcavail);
2516
2517 if (status > 0) {
2518 more_to_do = 1;
2519 } else if (status < 0) {
2520 no_fifo_space = 1;
2521 break;
2522 }
2523
2524 /* Advance to next QH, skipping start-of-list entry. */
2525 hcd->non_periodic_qh_ptr = hcd->non_periodic_qh_ptr->next;
2526 if (hcd->non_periodic_qh_ptr == &hcd->non_periodic_sched_active) {
2527 hcd->non_periodic_qh_ptr = hcd->non_periodic_qh_ptr->next;
2528 }
2529
2530 } while (hcd->non_periodic_qh_ptr != orig_qh_ptr);
2531
2532 if (!hcd->core_if->dma_enable) {
2533 gintmsk_data_t intr_mask = {.d32 = 0};
2534 intr_mask.b.nptxfempty = 1;
2535
2536 #ifdef DEBUG
2537 tx_status.d32 = dwc_read_reg32(&global_regs->gnptxsts);
2538 DWC_DEBUGPL(DBG_HCDV, " NP Tx Req Queue Space Avail (after queue): %d\n",
2539 tx_status.b.nptxqspcavail);
2540 DWC_DEBUGPL(DBG_HCDV, " NP Tx FIFO Space Avail (after queue): %d\n",
2541 tx_status.b.nptxfspcavail);
2542 #endif
2543 if (more_to_do || no_queue_space || no_fifo_space) {
2544 /*
2545 * May need to queue more transactions as the request
2546 * queue or Tx FIFO empties. Enable the non-periodic
2547 * Tx FIFO empty interrupt. (Always use the half-empty
2548 * level to ensure that new requests are loaded as
2549 * soon as possible.)
2550 */
2551 dwc_modify_reg32(&global_regs->gintmsk, 0, intr_mask.d32);
2552 } else {
2553 /*
2554 * Disable the Tx FIFO empty interrupt since there are
2555 * no more transactions that need to be queued right
2556 * now. This function is called from interrupt
2557 * handlers to queue more transactions as transfer
2558 * states change.
2559 */
2560 dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, 0);
2561 }
2562 }
2563 }
2564
2565 /**
2566 * Processes periodic channels for the next frame and queues transactions for
2567 * these channels to the DWC_otg controller. After queueing transactions, the
2568 * Periodic Tx FIFO Empty interrupt is enabled if there are more transactions
2569 * to queue as Periodic Tx FIFO or request queue space becomes available.
2570 * Otherwise, the Periodic Tx FIFO Empty interrupt is disabled.
2571 */
2572 static void process_periodic_channels(dwc_otg_hcd_t *hcd)
2573 {
2574 hptxsts_data_t tx_status;
2575 struct list_head *qh_ptr;
2576 dwc_otg_qh_t *qh;
2577 int status;
2578 int no_queue_space = 0;
2579 int no_fifo_space = 0;
2580
2581 dwc_otg_host_global_regs_t *host_regs;
2582 host_regs = hcd->core_if->host_if->host_global_regs;
2583
2584 DWC_DEBUGPL(DBG_HCD_FLOOD, "Queue periodic transactions\n");
2585 #ifdef DEBUG
2586 tx_status.d32 = dwc_read_reg32(&host_regs->hptxsts);
2587 DWC_DEBUGPL(DBG_HCD_FLOOD, " P Tx Req Queue Space Avail (before queue): %d\n",
2588 tx_status.b.ptxqspcavail);
2589 DWC_DEBUGPL(DBG_HCD_FLOOD, " P Tx FIFO Space Avail (before queue): %d\n",
2590 tx_status.b.ptxfspcavail);
2591 #endif
2592
2593 qh_ptr = hcd->periodic_sched_assigned.next;
2594 while (qh_ptr != &hcd->periodic_sched_assigned) {
2595 tx_status.d32 = dwc_read_reg32(&host_regs->hptxsts);
2596 if (tx_status.b.ptxqspcavail == 0) {
2597 no_queue_space = 1;
2598 break;
2599 }
2600
2601 qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list_entry);
2602
2603 /*
2604 * Set a flag if we're queuing high-bandwidth in slave mode.
2605 * The flag prevents any halts to get into the request queue in
2606 * the middle of multiple high-bandwidth packets getting queued.
2607 */
2608 if (!hcd->core_if->dma_enable &&
2609 qh->channel->multi_count > 1)
2610 {
2611 hcd->core_if->queuing_high_bandwidth = 1;
2612 }
2613
2614 status = queue_transaction(hcd, qh->channel, tx_status.b.ptxfspcavail);
2615 if (status < 0) {
2616 no_fifo_space = 1;
2617 break;
2618 }
2619
2620 /*
2621 * In Slave mode, stay on the current transfer until there is
2622 * nothing more to do or the high-bandwidth request count is
2623 * reached. In DMA mode, only need to queue one request. The
2624 * controller automatically handles multiple packets for
2625 * high-bandwidth transfers.
2626 */
2627 if (hcd->core_if->dma_enable || status == 0 ||
2628 qh->channel->requests == qh->channel->multi_count) {
2629 qh_ptr = qh_ptr->next;
2630 /*
2631 * Move the QH from the periodic assigned schedule to
2632 * the periodic queued schedule.
2633 */
2634 list_move(&qh->qh_list_entry, &hcd->periodic_sched_queued);
2635
2636 /* done queuing high bandwidth */
2637 hcd->core_if->queuing_high_bandwidth = 0;
2638 }
2639 }
2640
2641 if (!hcd->core_if->dma_enable) {
2642 dwc_otg_core_global_regs_t *global_regs;
2643 gintmsk_data_t intr_mask = {.d32 = 0};
2644
2645 global_regs = hcd->core_if->core_global_regs;
2646 intr_mask.b.ptxfempty = 1;
2647 #ifdef DEBUG
2648 tx_status.d32 = dwc_read_reg32(&host_regs->hptxsts);
2649 DWC_DEBUGPL(DBG_HCDV, " P Tx Req Queue Space Avail (after queue): %d\n",
2650 tx_status.b.ptxqspcavail);
2651 DWC_DEBUGPL(DBG_HCDV, " P Tx FIFO Space Avail (after queue): %d\n",
2652 tx_status.b.ptxfspcavail);
2653 #endif
2654 if (!list_empty(&hcd->periodic_sched_assigned) ||
2655 no_queue_space || no_fifo_space) {
2656 /*
2657 * May need to queue more transactions as the request
2658 * queue or Tx FIFO empties. Enable the periodic Tx
2659 * FIFO empty interrupt. (Always use the half-empty
2660 * level to ensure that new requests are loaded as
2661 * soon as possible.)
2662 */
2663 dwc_modify_reg32(&global_regs->gintmsk, 0, intr_mask.d32);
2664 } else {
2665 /*
2666 * Disable the Tx FIFO empty interrupt since there are
2667 * no more transactions that need to be queued right
2668 * now. This function is called from interrupt
2669 * handlers to queue more transactions as transfer
2670 * states change.
2671 */
2672 dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, 0);
2673 }
2674 }
2675 }
2676
2677 /**
2678 * This function processes the currently active host channels and queues
2679 * transactions for these channels to the DWC_otg controller. It is called
2680 * from HCD interrupt handler functions.
2681 *
2682 * @param hcd The HCD state structure.
2683 * @param tr_type The type(s) of transactions to queue (non-periodic,
2684 * periodic, or both).
2685 */
2686 void dwc_otg_hcd_queue_transactions(dwc_otg_hcd_t *hcd,
2687 dwc_otg_transaction_type_e tr_type)
2688 {
2689 #ifdef DEBUG_SOF
2690 DWC_DEBUGPL(DBG_HCD, "Queue Transactions\n");
2691 #endif
2692 /* Process host channels associated with periodic transfers. */
2693 if ((tr_type == DWC_OTG_TRANSACTION_PERIODIC ||
2694 tr_type == DWC_OTG_TRANSACTION_ALL) &&
2695 !list_empty(&hcd->periodic_sched_assigned)) {
2696
2697 process_periodic_channels(hcd);
2698 }
2699
2700 /* Process host channels associated with non-periodic transfers. */
2701 if (tr_type == DWC_OTG_TRANSACTION_NON_PERIODIC ||
2702 tr_type == DWC_OTG_TRANSACTION_ALL) {
2703 if (!list_empty(&hcd->non_periodic_sched_active)) {
2704 process_non_periodic_channels(hcd);
2705 } else {
2706 /*
2707 * Ensure NP Tx FIFO empty interrupt is disabled when
2708 * there are no non-periodic transfers to process.
2709 */
2710 gintmsk_data_t gintmsk = {.d32 = 0};
2711 gintmsk.b.nptxfempty = 1;
2712 dwc_modify_reg32(&hcd->core_if->core_global_regs->gintmsk,
2713 gintmsk.d32, 0);
2714 }
2715 }
2716 }
2717
2718 /**
2719 * Sets the final status of an URB and returns it to the device driver. Any
2720 * required cleanup of the URB is performed.
2721 */
2722 void dwc_otg_hcd_complete_urb(dwc_otg_hcd_t *hcd, struct urb *urb, int status)
2723 {
2724 unsigned long flags;
2725
2726 SPIN_LOCK_IRQSAVE(&hcd->lock, flags);
2727
2728 #ifdef DEBUG
2729
2730 if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
2731 DWC_PRINT("%s: urb %p, device %d, ep %d %s, status=%d\n",
2732 __func__, urb, usb_pipedevice(urb->pipe),
2733 usb_pipeendpoint(urb->pipe),
2734 usb_pipein(urb->pipe) ? "IN" : "OUT", status);
2735 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
2736 int i;
2737 for (i = 0; i < urb->number_of_packets; i++) {
2738 DWC_PRINT(" ISO Desc %d status: %d\n",
2739 i, urb->iso_frame_desc[i].status);
2740 }
2741 }
2742 }
2743 #endif
2744
2745 //if we use the aligned buffer instead of the original unaligned buffer,
2746 //for IN data, we have to move the data to the original buffer
2747 if((urb->transfer_dma==urb->aligned_transfer_dma)&&((urb->transfer_flags & URB_DIR_MASK)==URB_DIR_IN)){
2748 dma_sync_single_for_device(NULL,urb->transfer_dma,urb->actual_length,DMA_FROM_DEVICE);
2749 memcpy(urb->transfer_buffer,urb->aligned_transfer_buffer,urb->actual_length);
2750 }
2751
2752 usb_hcd_unlink_urb_from_ep(dwc_otg_hcd_to_hcd(hcd), urb);
2753 urb->status = status;
2754 urb->hcpriv = NULL;
2755 SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags);
2756 usb_hcd_giveback_urb(dwc_otg_hcd_to_hcd(hcd), urb, status);
2757
2758 }
2759
2760 /*
2761 * Returns the Queue Head for an URB.
2762 */
2763 dwc_otg_qh_t *dwc_urb_to_qh(struct urb *urb)
2764 {
2765 struct usb_host_endpoint *ep = dwc_urb_to_endpoint(urb);
2766 return (dwc_otg_qh_t *)ep->hcpriv;
2767 }
2768
2769 #ifdef DEBUG
2770 void dwc_print_setup_data(uint8_t *setup)
2771 {
2772 int i;
2773 if (CHK_DEBUG_LEVEL(DBG_HCD)){
2774 DWC_PRINT("Setup Data = MSB ");
2775 for (i = 7; i >= 0; i--) DWC_PRINT("%02x ", setup[i]);
2776 DWC_PRINT("\n");
2777 DWC_PRINT(" bmRequestType Tranfer = %s\n", (setup[0] & 0x80) ? "Device-to-Host" : "Host-to-Device");
2778 DWC_PRINT(" bmRequestType Type = ");
2779 switch ((setup[0] & 0x60) >> 5) {
2780 case 0: DWC_PRINT("Standard\n"); break;
2781 case 1: DWC_PRINT("Class\n"); break;
2782 case 2: DWC_PRINT("Vendor\n"); break;
2783 case 3: DWC_PRINT("Reserved\n"); break;
2784 }
2785 DWC_PRINT(" bmRequestType Recipient = ");
2786 switch (setup[0] & 0x1f) {
2787 case 0: DWC_PRINT("Device\n"); break;
2788 case 1: DWC_PRINT("Interface\n"); break;
2789 case 2: DWC_PRINT("Endpoint\n"); break;
2790 case 3: DWC_PRINT("Other\n"); break;
2791 default: DWC_PRINT("Reserved\n"); break;
2792 }
2793 DWC_PRINT(" bRequest = 0x%0x\n", setup[1]);
2794 DWC_PRINT(" wValue = 0x%0x\n", *((uint16_t *)&setup[2]));
2795 DWC_PRINT(" wIndex = 0x%0x\n", *((uint16_t *)&setup[4]));
2796 DWC_PRINT(" wLength = 0x%0x\n\n", *((uint16_t *)&setup[6]));
2797 }
2798 }
2799 #endif
2800
2801 void dwc_otg_hcd_dump_frrem(dwc_otg_hcd_t *hcd) {
2802 }
2803
2804 void dwc_otg_hcd_dump_state(dwc_otg_hcd_t *hcd)
2805 {
2806 #ifdef DEBUG
2807 int num_channels;
2808 int i;
2809 gnptxsts_data_t np_tx_status;
2810 hptxsts_data_t p_tx_status;
2811
2812 num_channels = hcd->core_if->core_params->host_channels;
2813 DWC_PRINT("\n");
2814 DWC_PRINT("************************************************************\n");
2815 DWC_PRINT("HCD State:\n");
2816 DWC_PRINT(" Num channels: %d\n", num_channels);
2817 for (i = 0; i < num_channels; i++) {
2818 dwc_hc_t *hc = hcd->hc_ptr_array[i];
2819 DWC_PRINT(" Channel %d: %p\n", i, hc);
2820 DWC_PRINT(" dev_addr: %d, ep_num: %d, ep_is_in: %d\n",
2821 hc->dev_addr, hc->ep_num, hc->ep_is_in);
2822 DWC_PRINT(" speed: %d\n", hc->speed);
2823 DWC_PRINT(" ep_type: %d\n", hc->ep_type);
2824 DWC_PRINT(" max_packet: %d\n", hc->max_packet);
2825 DWC_PRINT(" data_pid_start: %d\n", hc->data_pid_start);
2826 DWC_PRINT(" multi_count: %d\n", hc->multi_count);
2827 DWC_PRINT(" xfer_started: %d\n", hc->xfer_started);
2828 DWC_PRINT(" xfer_buff: %p\n", hc->xfer_buff);
2829 DWC_PRINT(" xfer_len: %d\n", hc->xfer_len);
2830 DWC_PRINT(" xfer_count: %d\n", hc->xfer_count);
2831 DWC_PRINT(" halt_on_queue: %d\n", hc->halt_on_queue);
2832 DWC_PRINT(" halt_pending: %d\n", hc->halt_pending);
2833 DWC_PRINT(" halt_status: %d\n", hc->halt_status);
2834 DWC_PRINT(" do_split: %d\n", hc->do_split);
2835 DWC_PRINT(" complete_split: %d\n", hc->complete_split);
2836 DWC_PRINT(" hub_addr: %d\n", hc->hub_addr);
2837 DWC_PRINT(" port_addr: %d\n", hc->port_addr);
2838 DWC_PRINT(" xact_pos: %d\n", hc->xact_pos);
2839 DWC_PRINT(" requests: %d\n", hc->requests);
2840 DWC_PRINT(" qh: %p\n", hc->qh);
2841 if (hc->qh)
2842 DWC_PRINT(" nak_frame: %x\n", hc->qh->nak_frame);
2843 if (hc->xfer_started) {
2844 hfnum_data_t hfnum;
2845 hcchar_data_t hcchar;
2846 hctsiz_data_t hctsiz;
2847 hcint_data_t hcint;
2848 hcintmsk_data_t hcintmsk;
2849 hfnum.d32 = dwc_read_reg32(&hcd->core_if->host_if->host_global_regs->hfnum);
2850 hcchar.d32 = dwc_read_reg32(&hcd->core_if->host_if->hc_regs[i]->hcchar);
2851 hctsiz.d32 = dwc_read_reg32(&hcd->core_if->host_if->hc_regs[i]->hctsiz);
2852 hcint.d32 = dwc_read_reg32(&hcd->core_if->host_if->hc_regs[i]->hcint);
2853 hcintmsk.d32 = dwc_read_reg32(&hcd->core_if->host_if->hc_regs[i]->hcintmsk);
2854 DWC_PRINT(" hfnum: 0x%08x\n", hfnum.d32);
2855 DWC_PRINT(" hcchar: 0x%08x\n", hcchar.d32);
2856 DWC_PRINT(" hctsiz: 0x%08x\n", hctsiz.d32);
2857 DWC_PRINT(" hcint: 0x%08x\n", hcint.d32);
2858 DWC_PRINT(" hcintmsk: 0x%08x\n", hcintmsk.d32);
2859 }
2860 if (hc->xfer_started && hc->qh && hc->qh->qtd_in_process) {
2861 dwc_otg_qtd_t *qtd;
2862 struct urb *urb;
2863 qtd = hc->qh->qtd_in_process;
2864 urb = qtd->urb;
2865 DWC_PRINT(" URB Info:\n");
2866 DWC_PRINT(" qtd: %p, urb: %p\n", qtd, urb);
2867 if (urb) {
2868 DWC_PRINT(" Dev: %d, EP: %d %s\n",
2869 usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe),
2870 usb_pipein(urb->pipe) ? "IN" : "OUT");
2871 DWC_PRINT(" Max packet size: %d\n",
2872 usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)));
2873 DWC_PRINT(" transfer_buffer: %p\n", urb->transfer_buffer);
2874 DWC_PRINT(" transfer_dma: %p\n", (void *)urb->transfer_dma);
2875 DWC_PRINT(" transfer_buffer_length: %d\n", urb->transfer_buffer_length);
2876 DWC_PRINT(" actual_length: %d\n", urb->actual_length);
2877 }
2878 }
2879 }
2880 DWC_PRINT(" non_periodic_channels: %d\n", hcd->non_periodic_channels);
2881 DWC_PRINT(" periodic_channels: %d\n", hcd->periodic_channels);
2882 DWC_PRINT(" nakking_channels: %d\n", hcd->nakking_channels);
2883 DWC_PRINT(" last_channel_halted: %d\n", hcd->last_channel_halted);
2884 DWC_PRINT(" periodic_usecs: %d\n", hcd->periodic_usecs);
2885 np_tx_status.d32 = dwc_read_reg32(&hcd->core_if->core_global_regs->gnptxsts);
2886 DWC_PRINT(" NP Tx Req Queue Space Avail: %d\n", np_tx_status.b.nptxqspcavail);
2887 DWC_PRINT(" NP Tx FIFO Space Avail: %d\n", np_tx_status.b.nptxfspcavail);
2888 p_tx_status.d32 = dwc_read_reg32(&hcd->core_if->host_if->host_global_regs->hptxsts);
2889 DWC_PRINT(" P Tx Req Queue Space Avail: %d\n", p_tx_status.b.ptxqspcavail);
2890 DWC_PRINT(" P Tx FIFO Space Avail: %d\n", p_tx_status.b.ptxfspcavail);
2891 dwc_otg_hcd_dump_frrem(hcd);
2892 dwc_otg_dump_global_registers(hcd->core_if);
2893 dwc_otg_dump_host_registers(hcd->core_if);
2894 DWC_PRINT("************************************************************\n");
2895 DWC_PRINT("\n");
2896 #endif
2897 }
2898 #endif /* DWC_DEVICE_ONLY */