2 * OHCI HCD (Host Controller Driver) for USB.
4 * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
5 * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
7 * This file is licenced under the GPL.
10 #include <linux/irq.h>
12 /*-------------------------------------------------------------------------*/
15 * URB goes back to driver, and isn't reissued.
16 * It's completely gone from HC data structures.
17 * PRECONDITION: ahcd lock held, irqs blocked.
20 finish_urb(struct admhcd
*ahcd
, struct urb
*urb
)
21 __releases(ahcd
->lock
)
22 __acquires(ahcd
->lock
)
24 urb_priv_free(ahcd
, urb
->hcpriv
);
27 spin_lock(&urb
->lock
);
28 if (likely(urb
->status
== -EINPROGRESS
))
31 /* report short control reads right even though the data TD always
32 * has TD_R set. (much simpler, but creates the 1-td limit.)
34 if (unlikely(urb
->transfer_flags
& URB_SHORT_NOT_OK
)
35 && unlikely(usb_pipecontrol(urb
->pipe
))
36 && urb
->actual_length
< urb
->transfer_buffer_length
37 && usb_pipein(urb
->pipe
)
38 && urb
->status
== 0) {
39 urb
->status
= -EREMOTEIO
;
40 #ifdef ADMHC_VERBOSE_DEBUG
41 urb_print(ahcd
, urb
, "SHORT", usb_pipeout (urb
->pipe
));
44 spin_unlock(&urb
->lock
);
46 switch (usb_pipetype(urb
->pipe
)) {
47 case PIPE_ISOCHRONOUS
:
48 admhcd_to_hcd(ahcd
)->self
.bandwidth_isoc_reqs
--;
51 admhcd_to_hcd(ahcd
)->self
.bandwidth_int_reqs
--;
55 #ifdef ADMHC_VERBOSE_DEBUG
56 urb_print(ahcd
, urb
, "FINISH", 0);
59 /* urb->complete() can reenter this HCD */
60 spin_unlock(&ahcd
->lock
);
61 usb_hcd_giveback_urb(admhcd_to_hcd(ahcd
), urb
);
62 spin_lock(&ahcd
->lock
);
66 /*-------------------------------------------------------------------------*
67 * ED handling functions
68 *-------------------------------------------------------------------------*/
70 static struct ed
*ed_create(struct admhcd
*ahcd
, unsigned int type
, u32 info
)
75 ed
= ed_alloc(ahcd
, GFP_ATOMIC
);
79 /* dummy td; end of td list for this ed */
80 td
= td_alloc(ahcd
, GFP_ATOMIC
);
88 case PIPE_ISOCHRONOUS
:
97 ed
->hwINFO
= cpu_to_hc32(ahcd
, info
);
98 ed
->hwTailP
= cpu_to_hc32(ahcd
, td
->td_dma
);
99 ed
->hwHeadP
= cpu_to_hc32(ahcd
, td
->td_dma
);
109 /* get and maybe (re)init an endpoint. init _should_ be done only as part
110 * of enumeration, usb_set_configuration() or usb_set_interface().
112 static struct ed
*ed_get(struct admhcd
*ahcd
, struct usb_host_endpoint
*ep
,
113 struct usb_device
*udev
, unsigned int pipe
, int interval
)
118 spin_lock_irqsave(&ahcd
->lock
, flags
);
123 /* FIXME: usbcore changes dev->devnum before SET_ADDRESS
124 * suceeds ... otherwise we wouldn't need "pipe".
126 info
= usb_pipedevice(pipe
);
127 info
|= (ep
->desc
.bEndpointAddress
& ~USB_DIR_IN
) << ED_EN_SHIFT
;
128 info
|= le16_to_cpu(ep
->desc
.wMaxPacketSize
) << ED_MPS_SHIFT
;
129 if (udev
->speed
== USB_SPEED_FULL
)
130 info
|= ED_SPEED_FULL
;
132 ed
= ed_create(ahcd
, usb_pipetype(pipe
), info
);
136 spin_unlock_irqrestore(&ahcd
->lock
, flags
);
141 /* link an ed into the HC chain */
142 static int ed_schedule(struct admhcd
*ahcd
, struct ed
*ed
)
146 if (admhcd_to_hcd(ahcd
)->state
== HC_STATE_QUIESCING
)
149 if (ed
->state
!= ED_NEW
)
152 admhc_dump_ed(ahcd
, "ED-SCHED", ed
, 0);
156 ed
->hwINFO
&= ~cpu_to_hc32(ahcd
, ED_SKIP
);
158 old_tail
= ahcd
->ed_tails
[ed
->type
];
160 ed
->ed_next
= old_tail
->ed_next
;
162 ed
->ed_next
->ed_prev
= ed
;
163 ed
->hwNextED
= cpu_to_hc32(ahcd
, ed
->ed_next
->dma
);
165 ed
->ed_prev
= old_tail
;
167 old_tail
->ed_next
= ed
;
168 old_tail
->hwNextED
= cpu_to_hc32(ahcd
, ed
->dma
);
170 ahcd
->ed_tails
[ed
->type
] = ed
;
172 admhc_intr_enable(ahcd
, ADMHC_INTR_SOFI
);
177 static void ed_deschedule(struct admhcd
*ahcd
, struct ed
*ed
)
179 admhc_dump_ed(ahcd
, "ED-DESCHED", ed
, 0);
181 /* remove this ED from the HC list */
182 ed
->ed_prev
->hwNextED
= ed
->hwNextED
;
184 /* and remove it from our list */
185 ed
->ed_prev
->ed_next
= ed
->ed_next
;
188 ed
->ed_next
->ed_prev
= ed
->ed_prev
;
192 if (ahcd
->ed_tails
[ed
->type
] == ed
)
193 ahcd
->ed_tails
[ed
->type
] = ed
->ed_prev
;
198 static void ed_start_deschedule(struct admhcd
*ahcd
, struct ed
*ed
)
200 admhc_dump_ed(ahcd
, "ED-UNLINK", ed
, 0);
202 ed
->hwINFO
|= cpu_to_hc32(ahcd
, ED_SKIP
);
204 ed
->state
= ED_UNLINK
;
206 /* add this ED into the remove list */
207 ed
->ed_rm_next
= ahcd
->ed_rm_list
;
208 ahcd
->ed_rm_list
= ed
;
210 /* SOF interrupt might get delayed; record the frame counter value that
211 * indicates when the HC isn't looking at it, so concurrent unlinks
212 * behave. frame_no wraps every 2^16 msec, and changes right before
215 ed
->tick
= admhc_frame_no(ahcd
) + 1;
217 /* enable SOF interrupt */
218 admhc_intr_enable(ahcd
, ADMHC_INTR_SOFI
);
221 /*-------------------------------------------------------------------------*
222 * TD handling functions
223 *-------------------------------------------------------------------------*/
225 static void td_fill(struct admhcd
*ahcd
, u32 info
, dma_addr_t data
, int len
,
231 if (up
->td_idx
>= up
->td_cnt
) {
232 admhc_dbg(ahcd
, "td_fill error, idx=%d, cnt=%d\n", up
->td_idx
,
237 td
= up
->td
[up
->td_idx
];
243 if (up
->td_idx
== up
->td_cnt
-1)
248 cbl
|= (len
& TD_BL_MASK
);
252 /* setup hardware specific fields */
253 td
->hwINFO
= cpu_to_hc32(ahcd
, info
);
254 td
->hwDBP
= cpu_to_hc32(ahcd
, data
);
255 td
->hwCBL
= cpu_to_hc32(ahcd
, cbl
);
258 up
->td
[up
->td_idx
-1]->hwNextTD
= cpu_to_hc32(ahcd
, td
->td_dma
);
263 /*-------------------------------------------------------------------------*/
265 /* Prepare all TDs of a transfer, and queue them onto the ED.
266 * Caller guarantees HC is active.
267 * Usually the ED is already on the schedule, so TDs might be
268 * processed as soon as they're queued.
270 static void td_submit_urb(struct admhcd
*ahcd
, struct urb
*urb
)
272 struct urb_priv
*urb_priv
= urb
->hcpriv
;
274 int data_len
= urb
->transfer_buffer_length
;
277 int is_out
= usb_pipeout(urb
->pipe
);
280 /* OHCI handles the bulk/interrupt data toggles itself. We just
281 * use the device toggle bits for resetting, and rely on the fact
282 * that resetting toggle is meaningless if the endpoint is active.
285 if (usb_gettoggle(urb
->dev
, usb_pipeendpoint(urb
->pipe
), is_out
)) {
289 usb_settoggle(urb
->dev
, usb_pipeendpoint (urb
->pipe
),
293 urb_priv
->td_idx
= 0;
296 data
= urb
->transfer_dma
;
300 /* NOTE: TD_CC is set so we can tell which TDs the HC processed by
301 * using TD_CC_GET, as well as by seeing them on the done list.
302 * (CC = NotAccessed ... 0x0F, or 0x0E in PSWs for ISO.)
304 switch (urb_priv
->ed
->type
) {
307 ? TD_T_CARRY
| TD_SCC_NOTACCESSED
| TD_DP_OUT
308 : TD_T_CARRY
| TD_SCC_NOTACCESSED
| TD_DP_IN
;
310 /* setup service interval and starting frame number */
311 info
|= (urb
->start_frame
& TD_FN_MASK
);
312 info
|= (urb
->interval
& TD_ISI_MASK
) << TD_ISI_SHIFT
;
314 td_fill(ahcd
, info
, data
, data_len
, urb_priv
);
317 admhcd_to_hcd(ahcd
)->self
.bandwidth_int_reqs
++;
322 ? TD_SCC_NOTACCESSED
| TD_DP_OUT
323 : TD_SCC_NOTACCESSED
| TD_DP_IN
;
325 /* TDs _could_ transfer up to 8K each */
326 while (data_len
> TD_DATALEN_MAX
) {
327 td_fill(ahcd
, info
| ((cnt
) ? TD_T_CARRY
: toggle
),
328 data
, TD_DATALEN_MAX
, urb_priv
);
329 data
+= TD_DATALEN_MAX
;
330 data_len
-= TD_DATALEN_MAX
;
334 td_fill(ahcd
, info
| ((cnt
) ? TD_T_CARRY
: toggle
), data
,
338 if ((urb
->transfer_flags
& URB_ZERO_PACKET
)
339 && (cnt
< urb_priv
->td_cnt
)) {
340 td_fill(ahcd
, info
| ((cnt
) ? TD_T_CARRY
: toggle
),
346 /* control manages DATA0/DATA1 toggle per-request; SETUP resets it,
347 * any DATA phase works normally, and the STATUS ack is special.
350 /* fill a TD for the setup */
351 info
= TD_SCC_NOTACCESSED
| TD_DP_SETUP
| TD_T_DATA0
;
352 td_fill(ahcd
, info
, urb
->setup_dma
, 8, urb_priv
);
356 /* fill a TD for the data */
357 info
= TD_SCC_NOTACCESSED
| TD_T_DATA1
;
358 info
|= is_out
? TD_DP_OUT
: TD_DP_IN
;
359 /* NOTE: mishandles transfers >8K, some >4K */
360 td_fill(ahcd
, info
, data
, data_len
, urb_priv
);
364 /* fill a TD for the ACK */
365 info
= (is_out
|| data_len
== 0)
366 ? TD_SCC_NOTACCESSED
| TD_DP_IN
| TD_T_DATA1
367 : TD_SCC_NOTACCESSED
| TD_DP_OUT
| TD_T_DATA1
;
368 td_fill(ahcd
, info
, data
, 0, urb_priv
);
373 /* ISO has no retransmit, so no toggle;
374 * Each TD could handle multiple consecutive frames (interval 1);
375 * we could often reduce the number of TDs here.
377 case PIPE_ISOCHRONOUS
:
378 info
= TD_SCC_NOTACCESSED
;
379 for (cnt
= 0; cnt
< urb
->number_of_packets
; cnt
++) {
380 int frame
= urb
->start_frame
;
382 frame
+= cnt
* urb
->interval
;
384 td_fill(ahcd
, info
| frame
,
385 data
+ urb
->iso_frame_desc
[cnt
].offset
,
386 urb
->iso_frame_desc
[cnt
].length
,
389 admhcd_to_hcd(ahcd
)->self
.bandwidth_isoc_reqs
++;
393 if (urb_priv
->td_cnt
!= cnt
)
394 admhc_err(ahcd
, "bad number of tds created for urb %p\n", urb
);
396 urb_priv
->td_idx
= 0;
399 /* calculate transfer length/status and update the urb
400 * PRECONDITION: irqsafe (only for urb->status locking)
402 static int td_done(struct admhcd
*ahcd
, struct urb
*urb
, struct td
*td
)
404 u32 info
= hc32_to_cpup(ahcd
, &td
->hwINFO
);
405 u32 dbp
= hc32_to_cpup(ahcd
, &td
->hwDBP
);
406 u32 cbl
= TD_BL_GET(hc32_to_cpup(ahcd
, &td
->hwCBL
));
407 int type
= usb_pipetype(urb
->pipe
);
410 cc
= TD_CC_GET(info
);
412 /* ISO ... drivers see per-TD length/status */
413 if (type
== PIPE_ISOCHRONOUS
) {
418 /* NOTE: assumes FC in tdINFO == 0, and that
419 * only the first of 0..MAXPSW psws is used.
423 if (tdINFO
& TD_CC
) /* hc didn't touch? */
426 if (usb_pipeout (urb
->pipe
))
427 dlen
= urb
->iso_frame_desc
[td
->index
].length
;
429 /* short reads are always OK for ISO */
430 if (cc
== TD_DATAUNDERRUN
)
432 dlen
= tdPSW
& 0x3ff;
435 urb
->actual_length
+= dlen
;
436 urb
->iso_frame_desc
[td
->index
].actual_length
= dlen
;
437 urb
->iso_frame_desc
[td
->index
].status
= cc_to_error
[cc
];
439 if (cc
!= TD_CC_NOERROR
)
441 "urb %p iso td %p (%d) len %d cc %d\n",
442 urb
, td
, 1 + td
->index
, dlen
, cc
);
444 /* BULK, INT, CONTROL ... drivers see aggregate length/status,
445 * except that "setup" bytes aren't counted and "short" transfers
446 * might not be reported as errors.
449 admhc_dump_td(ahcd
, "td_done", td
);
451 /* count all non-empty packets except control SETUP packet */
452 if ((type
!= PIPE_CONTROL
|| td
->index
!= 0) && dbp
!= 0) {
453 urb
->actual_length
+= dbp
- td
->data_dma
+ cbl
;
460 /*-------------------------------------------------------------------------*/
462 static inline struct td
*
463 ed_halted(struct admhcd
*ahcd
, struct td
*td
, int cc
, struct td
*rev
)
466 struct urb
*urb
= td
->urb
;
467 struct ed
*ed
= td
->ed
;
468 struct list_head
*tmp
= td
->td_list
.next
;
469 __hc32 toggle
= ed
->hwHeadP
& cpu_to_hc32 (ahcd
, ED_C
);
471 admhc_dump_ed(ahcd
, "ed halted", td
->ed
, 1);
472 /* clear ed halt; this is the td that caused it, but keep it inactive
473 * until its urb->complete() has a chance to clean up.
475 ed
->hwINFO
|= cpu_to_hc32 (ahcd
, ED_SKIP
);
477 ed
->hwHeadP
&= ~cpu_to_hc32 (ahcd
, ED_H
);
479 /* put any later tds from this urb onto the donelist, after 'td',
480 * order won't matter here: no errors, and nothing was transferred.
481 * also patch the ed so it looks as if those tds completed normally.
483 while (tmp
!= &ed
->td_list
) {
487 next
= list_entry(tmp
, struct td
, td_list
);
488 tmp
= next
->td_list
.next
;
490 if (next
->urb
!= urb
)
493 /* NOTE: if multi-td control DATA segments get supported,
494 * this urb had one of them, this td wasn't the last td
495 * in that segment (TD_R clear), this ed halted because
496 * of a short read, _and_ URB_SHORT_NOT_OK is clear ...
497 * then we need to leave the control STATUS packet queued
502 info
|= cpu_to_hc32 (ahcd
, TD_DONE
);
503 info
&= ~cpu_to_hc32 (ahcd
, TD_CC
);
507 next
->next_dl_td
= rev
;
510 ed
->hwHeadP
= next
->hwNextTD
| toggle
;
513 /* help for troubleshooting: report anything that
514 * looks odd ... that doesn't include protocol stalls
515 * (or maybe some other things)
518 case TD_CC_DATAUNDERRUN
:
519 if ((urb
->transfer_flags
& URB_SHORT_NOT_OK
) == 0)
523 if (usb_pipecontrol (urb
->pipe
))
528 "urb %p path %s ep%d%s %08x cc %d --> status %d\n",
529 urb
, urb
->dev
->devpath
,
530 usb_pipeendpoint (urb
->pipe
),
531 usb_pipein (urb
->pipe
) ? "in" : "out",
532 hc32_to_cpu(ahcd
, td
->hwINFO
),
533 cc
, cc_to_error
[cc
]);
542 /*-------------------------------------------------------------------------*/
544 static int ed_next_urb(struct admhcd
*ahcd
, struct ed
*ed
)
549 if (ed
->state
!= ED_IDLE
)
555 if (list_empty(&ed
->urb_pending
))
558 up
= list_entry(ed
->urb_pending
.next
, struct urb_priv
, pending
);
559 list_del(&up
->pending
);
563 #ifdef ADMHC_VERBOSE_DEBUG
564 urb_print(ahcd
, up
->urb
, "NEXT", 0);
565 admhc_dump_ed(ahcd
, " ", ed
, 0);
568 up
->td
[up
->td_cnt
-1]->hwNextTD
= cpu_to_hc32(ahcd
, ed
->dummy
->td_dma
);
570 carry
= hc32_to_cpup(ahcd
, &ed
->hwHeadP
) & ED_C
;
571 ed
->hwHeadP
= cpu_to_hc32(ahcd
, up
->td
[0]->td_dma
| carry
);
572 ed
->hwINFO
&= ~cpu_to_hc32(ahcd
, ED_SKIP
);
577 static void ed_update(struct admhcd
*ahcd
, struct ed
*ed
, int partial
)
589 #ifdef ADMHC_VERBOSE_DEBUG
590 urb_print(ahcd
, urb
, "UPDATE", 0);
592 admhc_dump_ed(ahcd
, "ED-UPDATE", ed
, 1);
595 for (; up
->td_idx
< up
->td_cnt
; up
->td_idx
++) {
596 struct td
*td
= up
->td
[up
->td_idx
];
598 if (hc32_to_cpup(ahcd
, &td
->hwINFO
) & TD_OWN
)
601 cc
= td_done(ahcd
, urb
, td
);
602 if (cc
!= TD_CC_NOERROR
) {
604 "urb %p td %p (%d) cc %d, len=%d/%d\n",
605 urb
, td
, td
->index
, cc
,
607 urb
->transfer_buffer_length
);
609 up
->td_idx
= up
->td_cnt
;
614 if ((up
->td_idx
!= up
->td_cnt
) && (!partial
))
615 /* the URB is not completed yet */
618 /* update packet status if needed (short is normally ok) */
619 if (cc
== TD_CC_DATAUNDERRUN
620 && !(urb
->transfer_flags
& URB_SHORT_NOT_OK
))
623 if (cc
!= TD_CC_NOERROR
&& cc
< TD_CC_HCD0
) {
624 spin_lock(&urb
->lock
);
625 if (urb
->status
== -EINPROGRESS
)
626 urb
->status
= cc_to_error
[cc
];
627 spin_unlock(&urb
->lock
);
630 finish_urb(ahcd
, urb
);
632 ed
->urb_active
= NULL
;
636 /* there are some tds completed; called in_irq(), with HCD locked */
637 static void admhc_td_complete(struct admhcd
*ahcd
)
641 for (ed
= ahcd
->ed_head
; ed
; ed
= ed
->ed_next
) {
642 if (ed
->state
!= ED_OPER
)
645 if (hc32_to_cpup(ahcd
, &ed
->hwINFO
) & ED_SKIP
)
648 if (hc32_to_cpup(ahcd
, &ed
->hwHeadP
) & ED_H
) {
653 ed_update(ahcd
, ed
, 0);
657 /* there are some urbs/eds to unlink; called in_irq(), with HCD locked */
658 static void admhc_finish_unlinks(struct admhcd
*ahcd
, u16 tick
)
662 for (ed
= ahcd
->ed_head
; ed
; ed
= ed
->ed_next
) {
663 if (ed
->state
!= ED_UNLINK
)
666 if (likely(HC_IS_RUNNING(admhcd_to_hcd(ahcd
)->state
)))
667 if (tick_before(tick
, ed
->tick
))
670 /* process partial status */
671 ed_update(ahcd
, ed
, 1);
675 static void admhc_sof_refill(struct admhcd
*ahcd
)
680 for (ed
= ahcd
->ed_head
; ed
; ed
= ed
->ed_next
) {
682 if (hc32_to_cpup(ahcd
, &ed
->hwHeadP
) & ED_H
) {
683 ed_update(ahcd
, ed
, 1);
684 ed
->hwHeadP
&= ~cpu_to_hc32 (ahcd
, ED_H
);
687 if (ed_next_urb(ahcd
, ed
)) {
692 ed_deschedule(ahcd
, ed
);
698 admhc_intr_disable(ahcd
, ADMHC_INTR_SOFI
);
699 admhc_dma_disable(ahcd
);
701 admhc_intr_enable(ahcd
, ADMHC_INTR_SOFI
);
702 admhc_dma_enable(ahcd
);