more USB driver changes
[openwrt/openwrt.git] / target / linux / adm5120 / files / drivers / usb / host / adm5120-q.c
1 /*
2 * OHCI HCD (Host Controller Driver) for USB.
3 *
4 * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
5 * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
6 *
7 * This file is licenced under the GPL.
8 */
9
10 #include <linux/irq.h>
11
12 /*-------------------------------------------------------------------------*/
13
14 /*
15 * URB goes back to driver, and isn't reissued.
16 * It's completely gone from HC data structures.
17 * PRECONDITION: ahcd lock held, irqs blocked.
18 */
19 static void
20 finish_urb(struct admhcd *ahcd, struct urb *urb)
21 __releases(ahcd->lock)
22 __acquires(ahcd->lock)
23 {
24 urb_priv_free(ahcd, urb->hcpriv);
25 urb->hcpriv = NULL;
26
27 spin_lock(&urb->lock);
28 if (likely(urb->status == -EINPROGRESS))
29 urb->status = 0;
30
31 /* report short control reads right even though the data TD always
32 * has TD_R set. (much simpler, but creates the 1-td limit.)
33 */
34 if (unlikely(urb->transfer_flags & URB_SHORT_NOT_OK)
35 && unlikely(usb_pipecontrol(urb->pipe))
36 && urb->actual_length < urb->transfer_buffer_length
37 && usb_pipein(urb->pipe)
38 && urb->status == 0) {
39 urb->status = -EREMOTEIO;
40 #ifdef ADMHC_VERBOSE_DEBUG
41 urb_print(ahcd, urb, "SHORT", usb_pipeout (urb->pipe));
42 #endif
43 }
44 spin_unlock(&urb->lock);
45
46 switch (usb_pipetype(urb->pipe)) {
47 case PIPE_ISOCHRONOUS:
48 admhcd_to_hcd(ahcd)->self.bandwidth_isoc_reqs--;
49 break;
50 case PIPE_INTERRUPT:
51 admhcd_to_hcd(ahcd)->self.bandwidth_int_reqs--;
52 break;
53 }
54
55 #ifdef ADMHC_VERBOSE_DEBUG
56 urb_print(ahcd, urb, "FINISH", 0);
57 #endif
58
59 /* urb->complete() can reenter this HCD */
60 spin_unlock(&ahcd->lock);
61 usb_hcd_giveback_urb(admhcd_to_hcd(ahcd), urb);
62 spin_lock(&ahcd->lock);
63 }
64
65
66 /*-------------------------------------------------------------------------*
67 * ED handling functions
68 *-------------------------------------------------------------------------*/
69
70 static struct ed *ed_create(struct admhcd *ahcd, unsigned int type, u32 info)
71 {
72 struct ed *ed;
73 struct td *td;
74
75 ed = ed_alloc(ahcd, GFP_ATOMIC);
76 if (!ed)
77 goto err;
78
79 /* dummy td; end of td list for this ed */
80 td = td_alloc(ahcd, GFP_ATOMIC);
81 if (!td)
82 goto err_free_ed;
83
84 switch (type) {
85 case PIPE_INTERRUPT:
86 info |= ED_INT;
87 break;
88 case PIPE_ISOCHRONOUS:
89 info |= ED_ISO;
90 break;
91 }
92
93 info |= ED_SKIP;
94
95 ed->dummy = td;
96 ed->state = ED_NEW;
97 ed->type = type;
98
99 ed->hwINFO = cpu_to_hc32(ahcd, info);
100 ed->hwTailP = cpu_to_hc32(ahcd, td->td_dma);
101 ed->hwHeadP = cpu_to_hc32(ahcd, td->td_dma);
102
103 return ed;
104
105 err_free_ed:
106 ed_free(ahcd, ed);
107 err:
108 return NULL;
109 }
110
111 /* get and maybe (re)init an endpoint. init _should_ be done only as part
112 * of enumeration, usb_set_configuration() or usb_set_interface().
113 */
114 static struct ed *ed_get(struct admhcd *ahcd, struct usb_host_endpoint *ep,
115 struct usb_device *udev, unsigned int pipe, int interval)
116 {
117 struct ed *ed;
118
119 ed = ep->hcpriv;
120 if (!ed) {
121 u32 info;
122
123 /* FIXME: usbcore changes dev->devnum before SET_ADDRESS
124 * suceeds ... otherwise we wouldn't need "pipe".
125 */
126 info = usb_pipedevice(pipe);
127 info |= (ep->desc.bEndpointAddress & ~USB_DIR_IN) << ED_EN_SHIFT;
128 info |= le16_to_cpu(ep->desc.wMaxPacketSize) << ED_MPS_SHIFT;
129 if (udev->speed == USB_SPEED_FULL)
130 info |= ED_SPEED_FULL;
131
132 ed = ed_create(ahcd, usb_pipetype(pipe), info);
133 if (ed)
134 ep->hcpriv = ed;
135 }
136
137 return ed;
138 }
139
140 static void ed_next_urb(struct admhcd *ahcd, struct ed *ed)
141 {
142 struct urb_priv *up;
143 u32 carry;
144
145 up = list_entry(ed->urb_pending.next, struct urb_priv, pending);
146 list_del(&up->pending);
147
148 ed->urb_active = up;
149 ed->state = ED_OPER;
150
151 #ifdef ADMHC_VERBOSE_DEBUG
152 urb_print(ahcd, up->urb, "NEXT", 0);
153 admhc_dump_ed(ahcd, " ", ed, 0);
154 #endif
155
156 up->td[up->td_cnt-1]->hwNextTD = cpu_to_hc32(ahcd, ed->dummy->td_dma);
157
158 carry = hc32_to_cpup(ahcd, &ed->hwHeadP) & ED_C;
159 ed->hwHeadP = cpu_to_hc32(ahcd, up->td[0]->td_dma | carry);
160 ed->hwINFO &= ~cpu_to_hc32(ahcd, ED_SKIP);
161 }
162
163 /* link an ed into the HC chain */
164 static int ed_schedule(struct admhcd *ahcd, struct ed *ed)
165 {
166 struct ed *old_tail;
167
168 if (admhcd_to_hcd(ahcd)->state == HC_STATE_QUIESCING)
169 return -EAGAIN;
170
171 if (ed->state == ED_NEW) {
172 ed->state = ED_IDLE;
173
174 old_tail = ahcd->ed_tails[ed->type];
175
176 ed->ed_next = old_tail->ed_next;
177 if (ed->ed_next) {
178 ed->ed_next->ed_prev = ed;
179 ed->hwNextED = cpu_to_hc32(ahcd, ed->ed_next->dma);
180 }
181 ed->ed_prev = old_tail;
182
183 old_tail->ed_next = ed;
184 old_tail->hwNextED = cpu_to_hc32(ahcd, ed->dma);
185
186 ahcd->ed_tails[ed->type] = ed;
187 ed->hwINFO &= ~cpu_to_hc32(ahcd, ED_SKIP);
188 }
189
190 #ifdef ADMHC_VERBOSE_DEBUG
191 admhc_dump_ed(ahcd, "ED-SCHED", ed, 0);
192 #endif
193
194 if (!ed->urb_active) {
195 ed_next_urb(ahcd, ed);
196 admhc_dma_enable(ahcd);
197 }
198
199 return 0;
200 }
201
202 static void ed_deschedule(struct admhcd *ahcd, struct ed *ed)
203 {
204
205 #ifdef ADMHC_VERBOSE_DEBUG
206 admhc_dump_ed(ahcd, "ED-DESCHED", ed, 0);
207 #endif
208
209 /* remove this ED from the HC list */
210 ed->ed_prev->hwNextED = ed->hwNextED;
211
212 /* and remove it from our list */
213 ed->ed_prev->ed_next = ed->ed_next;
214
215 if (ed->ed_next) {
216 ed->ed_next->ed_prev = ed->ed_prev;
217 ed->ed_next = NULL;
218 }
219
220 if (ahcd->ed_tails[ed->type] == ed)
221 ahcd->ed_tails[ed->type] = ed->ed_prev;
222
223 ed->state = ED_NEW;
224 }
225
226 static void ed_start_deschedule(struct admhcd *ahcd, struct ed *ed)
227 {
228
229 #ifdef ADMHC_VERBOSE_DEBUG
230 admhc_dump_ed(ahcd, "ED-UNLINK", ed, 0);
231 #endif
232
233 ed->hwINFO |= cpu_to_hc32(ahcd, ED_SKIP);
234 ed->state = ED_UNLINK;
235
236 /* SOF interrupt might get delayed; record the frame counter value that
237 * indicates when the HC isn't looking at it, so concurrent unlinks
238 * behave. frame_no wraps every 2^16 msec, and changes right before
239 * SOF is triggered.
240 */
241 ed->tick = admhc_frame_no(ahcd) + 1;
242
243 admhc_intr_enable(ahcd, ADMHC_INTR_SOFI);
244 }
245
246 /*-------------------------------------------------------------------------*
247 * TD handling functions
248 *-------------------------------------------------------------------------*/
249
250 static void td_fill(struct admhcd *ahcd, u32 info, dma_addr_t data, int len,
251 struct urb_priv *up)
252 {
253 struct td *td;
254 u32 cbl = 0;
255
256 if (up->td_idx >= up->td_cnt) {
257 admhc_err(ahcd, "td_fill error, idx=%d, cnt=%d\n", up->td_idx,
258 up->td_cnt);
259 BUG();
260 }
261
262 td = up->td[up->td_idx];
263 td->data_dma = data;
264 if (!len)
265 data = 0;
266
267 if (up->td_idx == up->td_cnt-1)
268 cbl |= TD_IE;
269
270 if (data)
271 cbl |= (len & TD_BL_MASK);
272
273 info |= TD_OWN;
274
275 /* setup hardware specific fields */
276 td->hwINFO = cpu_to_hc32(ahcd, info);
277 td->hwDBP = cpu_to_hc32(ahcd, data);
278 td->hwCBL = cpu_to_hc32(ahcd, cbl);
279
280 if (up->td_idx > 0)
281 up->td[up->td_idx-1]->hwNextTD = cpu_to_hc32(ahcd, td->td_dma);
282
283 up->td_idx++;
284 }
285
286 /*-------------------------------------------------------------------------*/
287
288 /* Prepare all TDs of a transfer, and queue them onto the ED.
289 * Caller guarantees HC is active.
290 * Usually the ED is already on the schedule, so TDs might be
291 * processed as soon as they're queued.
292 */
293 static void td_submit_urb(struct admhcd *ahcd, struct urb *urb)
294 {
295 struct urb_priv *urb_priv = urb->hcpriv;
296 dma_addr_t data;
297 int data_len = urb->transfer_buffer_length;
298 int cnt = 0;
299 u32 info = 0;
300 int is_out = usb_pipeout(urb->pipe);
301 u32 toggle = 0;
302
303 /* OHCI handles the bulk/interrupt data toggles itself. We just
304 * use the device toggle bits for resetting, and rely on the fact
305 * that resetting toggle is meaningless if the endpoint is active.
306 */
307
308 if (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), is_out)) {
309 toggle = TD_T_CARRY;
310 } else {
311 toggle = TD_T_DATA0;
312 usb_settoggle(urb->dev, usb_pipeendpoint (urb->pipe),
313 is_out, 1);
314 }
315
316 urb_priv->td_idx = 0;
317
318 if (data_len)
319 data = urb->transfer_dma;
320 else
321 data = 0;
322
323 /* NOTE: TD_CC is set so we can tell which TDs the HC processed by
324 * using TD_CC_GET, as well as by seeing them on the done list.
325 * (CC = NotAccessed ... 0x0F, or 0x0E in PSWs for ISO.)
326 */
327 switch (urb_priv->ed->type) {
328 case PIPE_INTERRUPT:
329 info = is_out
330 ? TD_T_CARRY | TD_SCC_NOTACCESSED | TD_DP_OUT
331 : TD_T_CARRY | TD_SCC_NOTACCESSED | TD_DP_IN;
332
333 /* setup service interval and starting frame number */
334 info |= (urb->start_frame & TD_FN_MASK);
335 info |= (urb->interval & TD_ISI_MASK) << TD_ISI_SHIFT;
336
337 td_fill(ahcd, info, data, data_len, urb_priv);
338 cnt++;
339
340 admhcd_to_hcd(ahcd)->self.bandwidth_int_reqs++;
341 break;
342
343 case PIPE_BULK:
344 info = is_out
345 ? TD_SCC_NOTACCESSED | TD_DP_OUT
346 : TD_SCC_NOTACCESSED | TD_DP_IN;
347
348 /* TDs _could_ transfer up to 8K each */
349 while (data_len > TD_DATALEN_MAX) {
350 td_fill(ahcd, info | ((cnt) ? TD_T_CARRY : toggle),
351 data, TD_DATALEN_MAX, urb_priv);
352 data += TD_DATALEN_MAX;
353 data_len -= TD_DATALEN_MAX;
354 cnt++;
355 }
356
357 td_fill(ahcd, info | ((cnt) ? TD_T_CARRY : toggle), data,
358 data_len, urb_priv);
359 cnt++;
360
361 if ((urb->transfer_flags & URB_ZERO_PACKET)
362 && (cnt < urb_priv->td_cnt)) {
363 td_fill(ahcd, info | ((cnt) ? TD_T_CARRY : toggle),
364 0, 0, urb_priv);
365 cnt++;
366 }
367 break;
368
369 /* control manages DATA0/DATA1 toggle per-request; SETUP resets it,
370 * any DATA phase works normally, and the STATUS ack is special.
371 */
372 case PIPE_CONTROL:
373 /* fill a TD for the setup */
374 info = TD_SCC_NOTACCESSED | TD_DP_SETUP | TD_T_DATA0;
375 td_fill(ahcd, info, urb->setup_dma, 8, urb_priv);
376 cnt++;
377
378 if (data_len > 0) {
379 /* fill a TD for the data */
380 info = TD_SCC_NOTACCESSED | TD_T_DATA1;
381 info |= is_out ? TD_DP_OUT : TD_DP_IN;
382 /* NOTE: mishandles transfers >8K, some >4K */
383 td_fill(ahcd, info, data, data_len, urb_priv);
384 cnt++;
385 }
386
387 /* fill a TD for the ACK */
388 info = (is_out || data_len == 0)
389 ? TD_SCC_NOTACCESSED | TD_DP_IN | TD_T_DATA1
390 : TD_SCC_NOTACCESSED | TD_DP_OUT | TD_T_DATA1;
391 td_fill(ahcd, info, data, 0, urb_priv);
392 cnt++;
393
394 break;
395
396 /* ISO has no retransmit, so no toggle;
397 * Each TD could handle multiple consecutive frames (interval 1);
398 * we could often reduce the number of TDs here.
399 */
400 case PIPE_ISOCHRONOUS:
401 info = TD_SCC_NOTACCESSED;
402 for (cnt = 0; cnt < urb->number_of_packets; cnt++) {
403 int frame = urb->start_frame;
404
405 frame += cnt * urb->interval;
406 frame &= TD_FN_MASK;
407 td_fill(ahcd, info | frame,
408 data + urb->iso_frame_desc[cnt].offset,
409 urb->iso_frame_desc[cnt].length,
410 urb_priv);
411 }
412 admhcd_to_hcd(ahcd)->self.bandwidth_isoc_reqs++;
413 break;
414 }
415
416 if (urb_priv->td_cnt != cnt)
417 admhc_err(ahcd, "bad number of tds created for urb %p\n", urb);
418
419 urb_priv->td_idx = 0;
420 }
421
422 /* calculate transfer length/status and update the urb
423 * PRECONDITION: irqsafe (only for urb->status locking)
424 */
425 static int td_done(struct admhcd *ahcd, struct urb *urb, struct td *td)
426 {
427 u32 info = hc32_to_cpup(ahcd, &td->hwINFO);
428 u32 dbp = hc32_to_cpup(ahcd, &td->hwDBP);
429 u32 cbl = TD_BL_GET(hc32_to_cpup(ahcd, &td->hwCBL));
430 int type = usb_pipetype(urb->pipe);
431 int cc;
432
433 cc = TD_CC_GET(info);
434
435 /* ISO ... drivers see per-TD length/status */
436 if (type == PIPE_ISOCHRONOUS) {
437 #if 0
438 /* TODO */
439 int dlen = 0;
440
441 /* NOTE: assumes FC in tdINFO == 0, and that
442 * only the first of 0..MAXPSW psws is used.
443 */
444
445 cc = TD_CC_GET(td);
446 if (tdINFO & TD_CC) /* hc didn't touch? */
447 return;
448
449 if (usb_pipeout (urb->pipe))
450 dlen = urb->iso_frame_desc[td->index].length;
451 else {
452 /* short reads are always OK for ISO */
453 if (cc == TD_DATAUNDERRUN)
454 cc = TD_CC_NOERROR;
455 dlen = tdPSW & 0x3ff;
456 }
457
458 urb->actual_length += dlen;
459 urb->iso_frame_desc[td->index].actual_length = dlen;
460 urb->iso_frame_desc[td->index].status = cc_to_error[cc];
461
462 if (cc != TD_CC_NOERROR)
463 admhc_vdbg (ahcd,
464 "urb %p iso td %p (%d) len %d cc %d\n",
465 urb, td, 1 + td->index, dlen, cc);
466 #endif
467 /* BULK, INT, CONTROL ... drivers see aggregate length/status,
468 * except that "setup" bytes aren't counted and "short" transfers
469 * might not be reported as errors.
470 */
471 } else {
472
473 #ifdef ADMHC_VERBOSE_DEBUG
474 admhc_dump_td(ahcd, "td_done", td);
475 #endif
476
477 /* count all non-empty packets except control SETUP packet */
478 if ((type != PIPE_CONTROL || td->index != 0) && dbp != 0) {
479 urb->actual_length += dbp - td->data_dma + cbl;
480 }
481 }
482
483 return cc;
484 }
485
486 /*-------------------------------------------------------------------------*/
487
488 static void ed_update(struct admhcd *ahcd, struct ed *ed, int force)
489 {
490 struct urb_priv *up;
491 struct urb *urb;
492 int cc;
493
494 up = ed->urb_active;
495 if (!up)
496 return;
497
498 urb = up->urb;
499
500 #ifdef ADMHC_VERBOSE_DEBUG
501 urb_print(ahcd, urb, "UPDATE", 0);
502 admhc_dump_ed(ahcd, "ED-UPDATE", ed, 1);
503 #endif
504
505 cc = TD_CC_NOERROR;
506 for (; up->td_idx < up->td_cnt; up->td_idx++) {
507 struct td *td = up->td[up->td_idx];
508
509 if (hc32_to_cpup(ahcd, &td->hwINFO) & TD_OWN)
510 break;
511
512 cc = td_done(ahcd, urb, td);
513 if (cc != TD_CC_NOERROR) {
514 admhc_vdbg(ahcd,
515 "urb %p td %p (%d) cc %d, len=%d/%d\n",
516 urb, td, td->index, cc,
517 urb->actual_length,
518 urb->transfer_buffer_length);
519
520 up->td_idx = up->td_cnt;
521 break;
522 }
523 }
524
525 if ((up->td_idx != up->td_cnt) && (!force))
526 /* the URB is not completed yet */
527 return;
528
529 /* update packet status if needed (short is normally ok) */
530 if (cc == TD_CC_DATAUNDERRUN
531 && !(urb->transfer_flags & URB_SHORT_NOT_OK))
532 cc = TD_CC_NOERROR;
533
534 if (cc != TD_CC_NOERROR && cc < TD_CC_HCD0) {
535 spin_lock(&urb->lock);
536 if (urb->status == -EINPROGRESS)
537 urb->status = cc_to_error[cc];
538 spin_unlock(&urb->lock);
539 }
540
541 finish_urb(ahcd, urb);
542
543 ed->urb_active = NULL;
544 ed->state = ED_IDLE;
545 }
546
547 /* there are some tds completed; called in_irq(), with HCD locked */
548 static void admhc_td_complete(struct admhcd *ahcd)
549 {
550 struct ed *ed;
551 int more = 0;
552
553 for (ed = ahcd->ed_head; ed; ed = ed->ed_next) {
554 if (ed->state != ED_OPER)
555 continue;
556
557 if (hc32_to_cpup(ahcd, &ed->hwHeadP) & ED_H) {
558 admhc_dump_ed(ahcd, "ed halted", ed, 1);
559 ed_update(ahcd, ed, 1);
560 ed->hwHeadP &= ~cpu_to_hc32(ahcd, ED_H);
561 } else
562 ed_update(ahcd, ed, 0);
563
564 if (ed->urb_active) {
565 more = 1;
566 continue;
567 }
568
569 if (!(list_empty(&ed->urb_pending))) {
570 more = 1;
571 ed_next_urb(ahcd, ed);
572 continue;
573 }
574
575 ed_start_deschedule(ahcd, ed);
576 }
577
578 if (!more)
579 admhc_dma_disable(ahcd);
580
581 }
582
583 /* there are some urbs/eds to unlink; called in_irq(), with HCD locked */
584 static void admhc_finish_unlinks(struct admhcd *ahcd, u16 tick)
585 {
586 struct ed *ed;
587 int more = 0;
588
589 for (ed = ahcd->ed_head; ed; ed = ed->ed_next) {
590 if (ed->state != ED_UNLINK)
591 continue;
592
593 if (likely(HC_IS_RUNNING(admhcd_to_hcd(ahcd)->state)))
594 if (tick_before(tick, ed->tick)) {
595 more = 1;
596 continue;
597 }
598
599 /* process partial status */
600 if (ed->urb_active)
601 ed_update(ahcd, ed, 1);
602
603 if (list_empty(&ed->urb_pending))
604 ed_deschedule(ahcd, ed);
605 else
606 ed_schedule(ahcd, ed);
607 }
608
609 if (!more)
610 if (likely(HC_IS_RUNNING(admhcd_to_hcd(ahcd)->state)))
611 admhc_intr_disable(ahcd, ADMHC_INTR_SOFI);
612 }