cleanup USB driver, it's still experimental
[openwrt/openwrt.git] / target / linux / adm5120 / files / drivers / usb / host / adm5120-q.c
1 /*
2 * OHCI HCD (Host Controller Driver) for USB.
3 *
4 * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
5 * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
6 *
7 * This file is licenced under the GPL.
8 */
9
10 #include <linux/irq.h>
11
12 /*-------------------------------------------------------------------------*/
13
14 /*
15 * URB goes back to driver, and isn't reissued.
16 * It's completely gone from HC data structures.
17 * PRECONDITION: ahcd lock held, irqs blocked.
18 */
19 static void
20 finish_urb(struct admhcd *ahcd, struct urb *urb)
21 __releases(ahcd->lock)
22 __acquires(ahcd->lock)
23 {
24 urb_priv_free(ahcd, urb->hcpriv);
25 urb->hcpriv = NULL;
26
27 spin_lock(&urb->lock);
28 if (likely(urb->status == -EINPROGRESS))
29 urb->status = 0;
30
31 /* report short control reads right even though the data TD always
32 * has TD_R set. (much simpler, but creates the 1-td limit.)
33 */
34 if (unlikely(urb->transfer_flags & URB_SHORT_NOT_OK)
35 && unlikely(usb_pipecontrol(urb->pipe))
36 && urb->actual_length < urb->transfer_buffer_length
37 && usb_pipein(urb->pipe)
38 && urb->status == 0) {
39 urb->status = -EREMOTEIO;
40 #ifdef ADMHC_VERBOSE_DEBUG
41 urb_print(ahcd, urb, "SHORT", usb_pipeout (urb->pipe));
42 #endif
43 }
44 spin_unlock(&urb->lock);
45
46 switch (usb_pipetype(urb->pipe)) {
47 case PIPE_ISOCHRONOUS:
48 admhcd_to_hcd(ahcd)->self.bandwidth_isoc_reqs--;
49 break;
50 case PIPE_INTERRUPT:
51 admhcd_to_hcd(ahcd)->self.bandwidth_int_reqs--;
52 break;
53 }
54
55 #ifdef ADMHC_VERBOSE_DEBUG
56 urb_print(ahcd, urb, "FINISH", 0);
57 #endif
58
59 /* urb->complete() can reenter this HCD */
60 spin_unlock(&ahcd->lock);
61 usb_hcd_giveback_urb(admhcd_to_hcd(ahcd), urb);
62 spin_lock(&ahcd->lock);
63 }
64
65
66 /*-------------------------------------------------------------------------*
67 * ED handling functions
68 *-------------------------------------------------------------------------*/
69
70 static struct ed *ed_create(struct admhcd *ahcd, unsigned int type, u32 info)
71 {
72 struct ed *ed;
73 struct td *td;
74
75 ed = ed_alloc(ahcd, GFP_ATOMIC);
76 if (!ed)
77 goto err;
78
79 /* dummy td; end of td list for this ed */
80 td = td_alloc(ahcd, GFP_ATOMIC);
81 if (!td)
82 goto err_free_ed;
83
84 switch (type) {
85 case PIPE_INTERRUPT:
86 info |= ED_INT;
87 break;
88 case PIPE_ISOCHRONOUS:
89 info |= ED_ISO;
90 break;
91 }
92
93 ed->dummy = td;
94 ed->state = ED_NEW;
95 ed->type = type;
96
97 ed->hwINFO = cpu_to_hc32(ahcd, info);
98 ed->hwTailP = cpu_to_hc32(ahcd, td->td_dma);
99 ed->hwHeadP = cpu_to_hc32(ahcd, td->td_dma);
100
101 return ed;
102
103 err_free_ed:
104 ed_free(ahcd, ed);
105 err:
106 return NULL;
107 }
108
109 /* get and maybe (re)init an endpoint. init _should_ be done only as part
110 * of enumeration, usb_set_configuration() or usb_set_interface().
111 */
112 static struct ed *ed_get(struct admhcd *ahcd, struct usb_host_endpoint *ep,
113 struct usb_device *udev, unsigned int pipe, int interval)
114 {
115 struct ed *ed;
116 unsigned long flags;
117
118 spin_lock_irqsave(&ahcd->lock, flags);
119 ed = ep->hcpriv;
120 if (!ed) {
121 u32 info;
122
123 /* FIXME: usbcore changes dev->devnum before SET_ADDRESS
124 * suceeds ... otherwise we wouldn't need "pipe".
125 */
126 info = usb_pipedevice(pipe);
127 info |= (ep->desc.bEndpointAddress & ~USB_DIR_IN) << ED_EN_SHIFT;
128 info |= le16_to_cpu(ep->desc.wMaxPacketSize) << ED_MPS_SHIFT;
129 if (udev->speed == USB_SPEED_FULL)
130 info |= ED_SPEED_FULL;
131
132 ed = ed_create(ahcd, usb_pipetype(pipe), info);
133 if (ed)
134 ep->hcpriv = ed;
135 }
136 spin_unlock_irqrestore(&ahcd->lock, flags);
137
138 return ed;
139 }
140
141 /* link an ed into the HC chain */
142 static int ed_schedule(struct admhcd *ahcd, struct ed *ed)
143 {
144 struct ed *old_tail;
145
146 if (admhcd_to_hcd(ahcd)->state == HC_STATE_QUIESCING)
147 return -EAGAIN;
148
149 if (ed->state != ED_NEW)
150 return 0;
151
152 admhc_dump_ed(ahcd, "ED-SCHED", ed, 0);
153
154 ed->state = ED_IDLE;
155
156 admhc_dma_lock(ahcd);
157 ed->hwINFO &= ~cpu_to_hc32(ahcd, ED_SKIP);
158
159 old_tail = ahcd->ed_tails[ed->type];
160
161 ed->ed_next = old_tail->ed_next;
162 if (ed->ed_next) {
163 ed->ed_next->ed_prev = ed;
164 ed->hwNextED = cpu_to_hc32(ahcd, ed->ed_next->dma);
165 }
166 ed->ed_prev = old_tail;
167
168 old_tail->ed_next = ed;
169 old_tail->hwNextED = cpu_to_hc32(ahcd, ed->dma);
170
171 ahcd->ed_tails[ed->type] = ed;
172 admhc_dma_unlock(ahcd);
173
174 admhc_intr_enable(ahcd, ADMHC_INTR_SOFI);
175
176 return 0;
177 }
178
179 static void ed_deschedule(struct admhcd *ahcd, struct ed *ed)
180 {
181 admhc_dump_ed(ahcd, "ED-DESCHED", ed, 0);
182
183 /* remove this ED from the HC list */
184 admhc_dma_lock(ahcd);
185 ed->ed_prev->hwNextED = ed->hwNextED;
186 admhc_dma_unlock(ahcd);
187
188 /* and remove it from our list */
189 ed->ed_prev->ed_next = ed->ed_next;
190
191 if (ed->ed_next) {
192 ed->ed_next->ed_prev = ed->ed_prev;
193 ed->ed_next = NULL;
194 }
195
196 if (ahcd->ed_tails[ed->type] == ed)
197 ahcd->ed_tails[ed->type] = ed->ed_prev;
198
199 ed->state = ED_NEW;
200 }
201
202 static void ed_start_deschedule(struct admhcd *ahcd, struct ed *ed)
203 {
204 admhc_dump_ed(ahcd, "ED-UNLINK", ed, 0);
205
206 admhc_dma_lock(ahcd);
207 ed->hwINFO |= cpu_to_hc32(ahcd, ED_SKIP);
208 admhc_dma_unlock(ahcd);
209
210 ed->state = ED_UNLINK;
211
212 /* add this ED into the remove list */
213 ed->ed_rm_next = ahcd->ed_rm_list;
214 ahcd->ed_rm_list = ed;
215
216 /* SOF interrupt might get delayed; record the frame counter value that
217 * indicates when the HC isn't looking at it, so concurrent unlinks
218 * behave. frame_no wraps every 2^16 msec, and changes right before
219 * SOF is triggered.
220 */
221 ed->tick = admhc_frame_no(ahcd) + 1;
222
223 /* enable SOF interrupt */
224 admhc_intr_enable(ahcd, ADMHC_INTR_SOFI);
225 }
226
227 /*-------------------------------------------------------------------------*
228 * TD handling functions
229 *-------------------------------------------------------------------------*/
230
231 static void td_fill(struct admhcd *ahcd, u32 info, dma_addr_t data, int len,
232 struct urb_priv *up)
233 {
234 struct td *td;
235 u32 cbl = 0;
236
237 if (up->td_idx >= up->td_cnt) {
238 admhc_dbg(ahcd, "td_fill error, idx=%d, cnt=%d\n", up->td_idx,
239 up->td_cnt);
240 return;
241 }
242
243 td = up->td[up->td_idx];
244 td->data_dma = data;
245 if (!len)
246 data = 0;
247
248 #if 1
249 if (up->td_idx == up->td_cnt-1)
250 #endif
251 cbl |= TD_IE;
252
253 if (data)
254 cbl |= (len & TD_BL_MASK);
255
256 info |= TD_OWN;
257
258 /* setup hardware specific fields */
259 td->hwINFO = cpu_to_hc32(ahcd, info);
260 td->hwDBP = cpu_to_hc32(ahcd, data);
261 td->hwCBL = cpu_to_hc32(ahcd, cbl);
262
263 if (up->td_idx > 0)
264 up->td[up->td_idx-1]->hwNextTD = cpu_to_hc32(ahcd, td->td_dma);
265
266 up->td_idx++;
267 }
268
269 /*-------------------------------------------------------------------------*/
270
271 /* Prepare all TDs of a transfer, and queue them onto the ED.
272 * Caller guarantees HC is active.
273 * Usually the ED is already on the schedule, so TDs might be
274 * processed as soon as they're queued.
275 */
276 static void td_submit_urb(struct admhcd *ahcd, struct urb *urb)
277 {
278 struct urb_priv *urb_priv = urb->hcpriv;
279 dma_addr_t data;
280 int data_len = urb->transfer_buffer_length;
281 int cnt = 0;
282 u32 info = 0;
283 int is_out = usb_pipeout(urb->pipe);
284 u32 toggle = 0;
285
286 /* OHCI handles the bulk/interrupt data toggles itself. We just
287 * use the device toggle bits for resetting, and rely on the fact
288 * that resetting toggle is meaningless if the endpoint is active.
289 */
290
291 if (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), is_out)) {
292 toggle = TD_T_CARRY;
293 } else {
294 toggle = TD_T_DATA0;
295 usb_settoggle(urb->dev, usb_pipeendpoint (urb->pipe),
296 is_out, 1);
297 }
298
299 urb_priv->td_idx = 0;
300
301 if (data_len)
302 data = urb->transfer_dma;
303 else
304 data = 0;
305
306 /* NOTE: TD_CC is set so we can tell which TDs the HC processed by
307 * using TD_CC_GET, as well as by seeing them on the done list.
308 * (CC = NotAccessed ... 0x0F, or 0x0E in PSWs for ISO.)
309 */
310 switch (urb_priv->ed->type) {
311 case PIPE_INTERRUPT:
312 info = is_out
313 ? TD_T_CARRY | TD_SCC_NOTACCESSED | TD_DP_OUT
314 : TD_T_CARRY | TD_SCC_NOTACCESSED | TD_DP_IN;
315
316 /* setup service interval and starting frame number */
317 info |= (urb->start_frame & TD_FN_MASK);
318 info |= (urb->interval & TD_ISI_MASK) << TD_ISI_SHIFT;
319
320 td_fill(ahcd, info, data, data_len, urb_priv);
321 cnt++;
322
323 admhcd_to_hcd(ahcd)->self.bandwidth_int_reqs++;
324 break;
325
326 case PIPE_BULK:
327 info = is_out
328 ? TD_SCC_NOTACCESSED | TD_DP_OUT
329 : TD_SCC_NOTACCESSED | TD_DP_IN;
330
331 /* TDs _could_ transfer up to 8K each */
332 while (data_len > TD_DATALEN_MAX) {
333 td_fill(ahcd, info | ((cnt) ? TD_T_CARRY : toggle),
334 data, TD_DATALEN_MAX, urb_priv);
335 data += TD_DATALEN_MAX;
336 data_len -= TD_DATALEN_MAX;
337 cnt++;
338 }
339
340 td_fill(ahcd, info | ((cnt) ? TD_T_CARRY : toggle), data,
341 data_len, urb_priv);
342 cnt++;
343
344 if ((urb->transfer_flags & URB_ZERO_PACKET)
345 && (cnt < urb_priv->td_cnt)) {
346 td_fill(ahcd, info | ((cnt) ? TD_T_CARRY : toggle),
347 0, 0, urb_priv);
348 cnt++;
349 }
350 break;
351
352 /* control manages DATA0/DATA1 toggle per-request; SETUP resets it,
353 * any DATA phase works normally, and the STATUS ack is special.
354 */
355 case PIPE_CONTROL:
356 /* fill a TD for the setup */
357 info = TD_SCC_NOTACCESSED | TD_DP_SETUP | TD_T_DATA0;
358 td_fill(ahcd, info, urb->setup_dma, 8, urb_priv);
359 cnt++;
360
361 if (data_len > 0) {
362 /* fill a TD for the data */
363 info = TD_SCC_NOTACCESSED | TD_T_DATA1;
364 info |= is_out ? TD_DP_OUT : TD_DP_IN;
365 /* NOTE: mishandles transfers >8K, some >4K */
366 td_fill(ahcd, info, data, data_len, urb_priv);
367 cnt++;
368 }
369
370 /* fill a TD for the ACK */
371 info = (is_out || data_len == 0)
372 ? TD_SCC_NOTACCESSED | TD_DP_IN | TD_T_DATA1
373 : TD_SCC_NOTACCESSED | TD_DP_OUT | TD_T_DATA1;
374 td_fill(ahcd, info, data, 0, urb_priv);
375 cnt++;
376
377 break;
378
379 /* ISO has no retransmit, so no toggle;
380 * Each TD could handle multiple consecutive frames (interval 1);
381 * we could often reduce the number of TDs here.
382 */
383 case PIPE_ISOCHRONOUS:
384 info = TD_SCC_NOTACCESSED;
385 for (cnt = 0; cnt < urb->number_of_packets; cnt++) {
386 int frame = urb->start_frame;
387
388 frame += cnt * urb->interval;
389 frame &= TD_FN_MASK;
390 td_fill(ahcd, info | frame,
391 data + urb->iso_frame_desc[cnt].offset,
392 urb->iso_frame_desc[cnt].length,
393 urb_priv);
394 }
395 admhcd_to_hcd(ahcd)->self.bandwidth_isoc_reqs++;
396 break;
397 }
398
399 if (urb_priv->td_cnt != cnt)
400 admhc_err(ahcd, "bad number of tds created for urb %p\n", urb);
401
402 urb_priv->td_idx = 0;
403 }
404
405 /* calculate transfer length/status and update the urb
406 * PRECONDITION: irqsafe (only for urb->status locking)
407 */
408 static int td_done(struct admhcd *ahcd, struct urb *urb, struct td *td)
409 {
410 u32 info = hc32_to_cpup(ahcd, &td->hwINFO);
411 u32 dbp = hc32_to_cpup(ahcd, &td->hwDBP);
412 u32 cbl = TD_BL_GET(hc32_to_cpup(ahcd, &td->hwCBL));
413 int type = usb_pipetype(urb->pipe);
414 int cc;
415
416 cc = TD_CC_GET(info);
417
418 /* ISO ... drivers see per-TD length/status */
419 if (type == PIPE_ISOCHRONOUS) {
420 #if 0
421 /* TODO */
422 int dlen = 0;
423
424 /* NOTE: assumes FC in tdINFO == 0, and that
425 * only the first of 0..MAXPSW psws is used.
426 */
427
428 cc = TD_CC_GET(td);
429 if (tdINFO & TD_CC) /* hc didn't touch? */
430 return;
431
432 if (usb_pipeout (urb->pipe))
433 dlen = urb->iso_frame_desc[td->index].length;
434 else {
435 /* short reads are always OK for ISO */
436 if (cc == TD_DATAUNDERRUN)
437 cc = TD_CC_NOERROR;
438 dlen = tdPSW & 0x3ff;
439 }
440
441 urb->actual_length += dlen;
442 urb->iso_frame_desc[td->index].actual_length = dlen;
443 urb->iso_frame_desc[td->index].status = cc_to_error[cc];
444
445 if (cc != TD_CC_NOERROR)
446 admhc_vdbg (ahcd,
447 "urb %p iso td %p (%d) len %d cc %d\n",
448 urb, td, 1 + td->index, dlen, cc);
449 #endif
450 /* BULK, INT, CONTROL ... drivers see aggregate length/status,
451 * except that "setup" bytes aren't counted and "short" transfers
452 * might not be reported as errors.
453 */
454 } else {
455 admhc_dump_td(ahcd, "td_done", td);
456
457 /* count all non-empty packets except control SETUP packet */
458 if ((type != PIPE_CONTROL || td->index != 0) && dbp != 0) {
459 urb->actual_length += dbp - td->data_dma + cbl;
460 }
461 }
462
463 return cc;
464 }
465
466 /*-------------------------------------------------------------------------*/
467
468 static inline struct td *
469 ed_halted(struct admhcd *ahcd, struct td *td, int cc, struct td *rev)
470 {
471 #if 0
472 struct urb *urb = td->urb;
473 struct ed *ed = td->ed;
474 struct list_head *tmp = td->td_list.next;
475 __hc32 toggle = ed->hwHeadP & cpu_to_hc32 (ahcd, ED_C);
476
477 admhc_dump_ed(ahcd, "ed halted", td->ed, 1);
478 /* clear ed halt; this is the td that caused it, but keep it inactive
479 * until its urb->complete() has a chance to clean up.
480 */
481 ed->hwINFO |= cpu_to_hc32 (ahcd, ED_SKIP);
482 wmb ();
483 ed->hwHeadP &= ~cpu_to_hc32 (ahcd, ED_H);
484
485 /* put any later tds from this urb onto the donelist, after 'td',
486 * order won't matter here: no errors, and nothing was transferred.
487 * also patch the ed so it looks as if those tds completed normally.
488 */
489 while (tmp != &ed->td_list) {
490 struct td *next;
491 __hc32 info;
492
493 next = list_entry(tmp, struct td, td_list);
494 tmp = next->td_list.next;
495
496 if (next->urb != urb)
497 break;
498
499 /* NOTE: if multi-td control DATA segments get supported,
500 * this urb had one of them, this td wasn't the last td
501 * in that segment (TD_R clear), this ed halted because
502 * of a short read, _and_ URB_SHORT_NOT_OK is clear ...
503 * then we need to leave the control STATUS packet queued
504 * and clear ED_SKIP.
505 */
506 info = next->hwINFO;
507 #if 0 /* FIXME */
508 info |= cpu_to_hc32 (ahcd, TD_DONE);
509 info &= ~cpu_to_hc32 (ahcd, TD_CC);
510 #endif
511 next->hwINFO = info;
512
513 next->next_dl_td = rev;
514 rev = next;
515
516 ed->hwHeadP = next->hwNextTD | toggle;
517 }
518
519 /* help for troubleshooting: report anything that
520 * looks odd ... that doesn't include protocol stalls
521 * (or maybe some other things)
522 */
523 switch (cc) {
524 case TD_CC_DATAUNDERRUN:
525 if ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0)
526 break;
527 /* fallthrough */
528 case TD_CC_STALL:
529 if (usb_pipecontrol (urb->pipe))
530 break;
531 /* fallthrough */
532 default:
533 admhc_dbg (ahcd,
534 "urb %p path %s ep%d%s %08x cc %d --> status %d\n",
535 urb, urb->dev->devpath,
536 usb_pipeendpoint (urb->pipe),
537 usb_pipein (urb->pipe) ? "in" : "out",
538 hc32_to_cpu(ahcd, td->hwINFO),
539 cc, cc_to_error [cc]);
540 }
541
542 return rev;
543 #else
544 return NULL;
545 #endif
546 }
547
548 /*-------------------------------------------------------------------------*/
549
550 static int ed_next_urb(struct admhcd *ahcd, struct ed *ed)
551 {
552 struct urb_priv *up;
553 u32 carry;
554
555 if (ed->state != ED_IDLE)
556 return 1;
557
558 if (ed->urb_active)
559 return 1;
560
561 if (list_empty(&ed->urb_pending))
562 return 0;
563
564 up = list_entry(ed->urb_pending.next, struct urb_priv, pending);
565 list_del(&up->pending);
566 ed->urb_active = up;
567 ed->state = ED_OPER;
568
569 #ifdef ADMHC_VERBOSE_DEBUG
570 urb_print(ahcd, up->urb, "NEXT", 0);
571 admhc_dump_ed(ahcd, " ", ed, 0);
572 #endif
573
574 up->td[up->td_cnt-1]->hwNextTD = cpu_to_hc32(ahcd, ed->dummy->td_dma);
575
576 admhc_dma_lock(ahcd);
577 carry = hc32_to_cpup(ahcd, &ed->hwHeadP) & ED_C;
578 ed->hwHeadP = cpu_to_hc32(ahcd, up->td[0]->td_dma | carry);
579 ed->hwINFO &= ~cpu_to_hc32(ahcd, ED_SKIP);
580 admhc_dma_unlock(ahcd);
581
582 return 1;
583 }
584
585 static void ed_update(struct admhcd *ahcd, struct ed *ed, int partial)
586 {
587 struct urb_priv *up;
588 struct urb *urb;
589 int cc;
590
591 up = ed->urb_active;
592 if (!up)
593 return;
594
595 urb = up->urb;
596
597 #ifdef ADMHC_VERBOSE_DEBUG
598 urb_print(ahcd, urb, "UPDATE", 0);
599 #endif
600 admhc_dump_ed(ahcd, "ED-UPDATE", ed, 1);
601
602 cc = TD_CC_NOERROR;
603 for (; up->td_idx < up->td_cnt; up->td_idx++) {
604 struct td *td = up->td[up->td_idx];
605
606 if (hc32_to_cpup(ahcd, &td->hwINFO) & TD_OWN)
607 break;
608
609 cc = td_done(ahcd, urb, td);
610 if (cc != TD_CC_NOERROR) {
611 admhc_vdbg(ahcd,
612 "urb %p td %p (%d) cc %d, len=%d/%d\n",
613 urb, td, td->index, cc,
614 urb->actual_length,
615 urb->transfer_buffer_length);
616
617 up->td_idx = up->td_cnt;
618 break;
619 }
620 }
621
622 if ((up->td_idx != up->td_cnt) && (!partial))
623 /* the URB is not completed yet */
624 return;
625
626 /* update packet status if needed (short is normally ok) */
627 if (cc == TD_CC_DATAUNDERRUN
628 && !(urb->transfer_flags & URB_SHORT_NOT_OK))
629 cc = TD_CC_NOERROR;
630
631 if (cc != TD_CC_NOERROR && cc < TD_CC_HCD0) {
632 spin_lock(&urb->lock);
633 if (urb->status == -EINPROGRESS)
634 urb->status = cc_to_error[cc];
635 spin_unlock(&urb->lock);
636 }
637
638 finish_urb(ahcd, urb);
639
640 ed->urb_active = NULL;
641 ed->state = ED_IDLE;
642 }
643
644 /* there are some tds completed; called in_irq(), with HCD locked */
645 static void admhc_td_complete(struct admhcd *ahcd)
646 {
647 struct ed *ed;
648
649 for (ed = ahcd->ed_head; ed; ed = ed->ed_next) {
650 if (ed->state != ED_OPER)
651 continue;
652
653 if (hc32_to_cpup(ahcd, &ed->hwINFO) & ED_SKIP)
654 continue;
655
656 if (hc32_to_cpup(ahcd, &ed->hwHeadP) & ED_H) {
657 /* TODO */
658 continue;
659 }
660
661 ed_update(ahcd, ed, 0);
662 }
663 }
664
665 /* there are some urbs/eds to unlink; called in_irq(), with HCD locked */
666 static void admhc_finish_unlinks(struct admhcd *ahcd, u16 tick)
667 {
668 struct ed *ed;
669
670 for (ed = ahcd->ed_head; ed; ed = ed->ed_next) {
671 if (ed->state != ED_UNLINK)
672 continue;
673
674 if (likely(HC_IS_RUNNING(admhcd_to_hcd(ahcd)->state)))
675 if (tick_before(tick, ed->tick))
676 continue;
677
678 /* process partial status */
679 ed_update(ahcd, ed, 1);
680 }
681 }
682
683 static void admhc_sof_refill(struct admhcd *ahcd)
684 {
685 struct ed *ed;
686 int disable_dma = 1;
687
688 for (ed = ahcd->ed_head; ed; ed = ed->ed_next) {
689
690 if (hc32_to_cpup(ahcd, &ed->hwHeadP) & ED_H) {
691 ed_update(ahcd, ed, 1);
692 ed->hwHeadP &= ~cpu_to_hc32 (ahcd, ED_H);
693 }
694
695 if (ed_next_urb(ahcd, ed)) {
696 disable_dma = 0;
697 } else {
698 struct ed *tmp;
699 tmp = ed->ed_prev;
700 ed_deschedule(ahcd, ed);
701 ed = tmp;
702 }
703 }
704
705 if (disable_dma) {
706 admhc_intr_disable(ahcd, ADMHC_INTR_SOFI);
707 admhc_dma_disable(ahcd);
708 } else {
709 admhc_intr_enable(ahcd, ADMHC_INTR_SOFI);
710 admhc_dma_enable(ahcd);
711 }
712 }