USB driver fixes
[openwrt/openwrt.git] / target / linux / adm5120 / files / drivers / usb / host / adm5120-q.c
1 /*
2 * OHCI HCD (Host Controller Driver) for USB.
3 *
4 * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
5 * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
6 *
7 * This file is licenced under the GPL.
8 */
9
10 #include <linux/irq.h>
11
12 /*-------------------------------------------------------------------------*/
13
14 /*
15 * URB goes back to driver, and isn't reissued.
16 * It's completely gone from HC data structures.
17 * PRECONDITION: ahcd lock held, irqs blocked.
18 */
19 static void
20 finish_urb(struct admhcd *ahcd, struct urb *urb)
21 __releases(ahcd->lock)
22 __acquires(ahcd->lock)
23 {
24 urb_priv_free(ahcd, urb->hcpriv);
25 urb->hcpriv = NULL;
26
27 spin_lock(&urb->lock);
28 if (likely(urb->status == -EINPROGRESS))
29 urb->status = 0;
30
31 /* report short control reads right even though the data TD always
32 * has TD_R set. (much simpler, but creates the 1-td limit.)
33 */
34 if (unlikely(urb->transfer_flags & URB_SHORT_NOT_OK)
35 && unlikely(usb_pipecontrol(urb->pipe))
36 && urb->actual_length < urb->transfer_buffer_length
37 && usb_pipein(urb->pipe)
38 && urb->status == 0) {
39 urb->status = -EREMOTEIO;
40 #ifdef ADMHC_VERBOSE_DEBUG
41 urb_print(ahcd, urb, "SHORT", usb_pipeout (urb->pipe));
42 #endif
43 }
44 spin_unlock(&urb->lock);
45
46 switch (usb_pipetype(urb->pipe)) {
47 case PIPE_ISOCHRONOUS:
48 admhcd_to_hcd(ahcd)->self.bandwidth_isoc_reqs--;
49 break;
50 case PIPE_INTERRUPT:
51 admhcd_to_hcd(ahcd)->self.bandwidth_int_reqs--;
52 break;
53 }
54
55 #ifdef ADMHC_VERBOSE_DEBUG
56 urb_print(ahcd, urb, "FINISH", 0);
57 #endif
58
59 /* urb->complete() can reenter this HCD */
60 spin_unlock(&ahcd->lock);
61 usb_hcd_giveback_urb(admhcd_to_hcd(ahcd), urb);
62 spin_lock(&ahcd->lock);
63 }
64
65
66 /*-------------------------------------------------------------------------*
67 * ED handling functions
68 *-------------------------------------------------------------------------*/
69
70 static struct ed *ed_create(struct admhcd *ahcd, unsigned int type, u32 info)
71 {
72 struct ed *ed;
73 struct td *td;
74
75 ed = ed_alloc(ahcd, GFP_ATOMIC);
76 if (!ed)
77 goto err;
78
79 /* dummy td; end of td list for this ed */
80 td = td_alloc(ahcd, GFP_ATOMIC);
81 if (!td)
82 goto err_free_ed;
83
84 switch (type) {
85 case PIPE_INTERRUPT:
86 info |= ED_INT;
87 break;
88 case PIPE_ISOCHRONOUS:
89 info |= ED_ISO;
90 break;
91 }
92
93 ed->dummy = td;
94 ed->state = ED_NEW;
95 ed->type = type;
96
97 ed->hwINFO = cpu_to_hc32(ahcd, info);
98 ed->hwTailP = cpu_to_hc32(ahcd, td->td_dma);
99 ed->hwHeadP = cpu_to_hc32(ahcd, td->td_dma);
100
101 return ed;
102
103 err_free_ed:
104 ed_free(ahcd, ed);
105 err:
106 return NULL;
107 }
108
109 /* get and maybe (re)init an endpoint. init _should_ be done only as part
110 * of enumeration, usb_set_configuration() or usb_set_interface().
111 */
112 static struct ed *ed_get(struct admhcd *ahcd, struct usb_host_endpoint *ep,
113 struct usb_device *udev, unsigned int pipe, int interval)
114 {
115 struct ed *ed;
116 unsigned long flags;
117
118 spin_lock_irqsave(&ahcd->lock, flags);
119 ed = ep->hcpriv;
120 if (!ed) {
121 u32 info;
122
123 /* FIXME: usbcore changes dev->devnum before SET_ADDRESS
124 * suceeds ... otherwise we wouldn't need "pipe".
125 */
126 info = usb_pipedevice(pipe);
127 info |= (ep->desc.bEndpointAddress & ~USB_DIR_IN) << ED_EN_SHIFT;
128 info |= le16_to_cpu(ep->desc.wMaxPacketSize) << ED_MPS_SHIFT;
129 if (udev->speed == USB_SPEED_FULL)
130 info |= ED_SPEED_FULL;
131
132 ed = ed_create(ahcd, usb_pipetype(pipe), info);
133 if (ed)
134 ep->hcpriv = ed;
135 }
136 spin_unlock_irqrestore(&ahcd->lock, flags);
137
138 return ed;
139 }
140
141 /* link an ed into the HC chain */
142 static int ed_schedule(struct admhcd *ahcd, struct ed *ed)
143 {
144 struct ed *old_tail;
145
146 if (admhcd_to_hcd(ahcd)->state == HC_STATE_QUIESCING)
147 return -EAGAIN;
148
149 if (ed->state != ED_NEW)
150 return 0;
151
152 admhc_dump_ed(ahcd, "ED-SCHED", ed, 0);
153
154 ed->state = ED_IDLE;
155
156 ed->hwINFO &= ~cpu_to_hc32(ahcd, ED_SKIP);
157
158 old_tail = ahcd->ed_tails[ed->type];
159
160 ed->ed_next = old_tail->ed_next;
161 if (ed->ed_next) {
162 ed->ed_next->ed_prev = ed;
163 ed->hwNextED = cpu_to_hc32(ahcd, ed->ed_next->dma);
164 }
165 ed->ed_prev = old_tail;
166
167 old_tail->ed_next = ed;
168 old_tail->hwNextED = cpu_to_hc32(ahcd, ed->dma);
169
170 ahcd->ed_tails[ed->type] = ed;
171
172 admhc_intr_enable(ahcd, ADMHC_INTR_SOFI);
173
174 return 0;
175 }
176
177 static void ed_deschedule(struct admhcd *ahcd, struct ed *ed)
178 {
179 admhc_dump_ed(ahcd, "ED-DESCHED", ed, 0);
180
181 /* remove this ED from the HC list */
182 ed->ed_prev->hwNextED = ed->hwNextED;
183
184 /* and remove it from our list */
185 ed->ed_prev->ed_next = ed->ed_next;
186
187 if (ed->ed_next) {
188 ed->ed_next->ed_prev = ed->ed_prev;
189 ed->ed_next = NULL;
190 }
191
192 if (ahcd->ed_tails[ed->type] == ed)
193 ahcd->ed_tails[ed->type] = ed->ed_prev;
194
195 ed->state = ED_NEW;
196 }
197
198 static void ed_start_deschedule(struct admhcd *ahcd, struct ed *ed)
199 {
200 admhc_dump_ed(ahcd, "ED-UNLINK", ed, 0);
201
202 ed->hwINFO |= cpu_to_hc32(ahcd, ED_SKIP);
203
204 ed->state = ED_UNLINK;
205
206 /* add this ED into the remove list */
207 ed->ed_rm_next = ahcd->ed_rm_list;
208 ahcd->ed_rm_list = ed;
209
210 /* SOF interrupt might get delayed; record the frame counter value that
211 * indicates when the HC isn't looking at it, so concurrent unlinks
212 * behave. frame_no wraps every 2^16 msec, and changes right before
213 * SOF is triggered.
214 */
215 ed->tick = admhc_frame_no(ahcd) + 1;
216
217 /* enable SOF interrupt */
218 admhc_intr_enable(ahcd, ADMHC_INTR_SOFI);
219 }
220
221 /*-------------------------------------------------------------------------*
222 * TD handling functions
223 *-------------------------------------------------------------------------*/
224
225 static void td_fill(struct admhcd *ahcd, u32 info, dma_addr_t data, int len,
226 struct urb_priv *up)
227 {
228 struct td *td;
229 u32 cbl = 0;
230
231 if (up->td_idx >= up->td_cnt) {
232 admhc_dbg(ahcd, "td_fill error, idx=%d, cnt=%d\n", up->td_idx,
233 up->td_cnt);
234 return;
235 }
236
237 td = up->td[up->td_idx];
238 td->data_dma = data;
239 if (!len)
240 data = 0;
241
242 #if 1
243 if (up->td_idx == up->td_cnt-1)
244 #endif
245 cbl |= TD_IE;
246
247 if (data)
248 cbl |= (len & TD_BL_MASK);
249
250 info |= TD_OWN;
251
252 /* setup hardware specific fields */
253 td->hwINFO = cpu_to_hc32(ahcd, info);
254 td->hwDBP = cpu_to_hc32(ahcd, data);
255 td->hwCBL = cpu_to_hc32(ahcd, cbl);
256
257 if (up->td_idx > 0)
258 up->td[up->td_idx-1]->hwNextTD = cpu_to_hc32(ahcd, td->td_dma);
259
260 up->td_idx++;
261 }
262
263 /*-------------------------------------------------------------------------*/
264
265 /* Prepare all TDs of a transfer, and queue them onto the ED.
266 * Caller guarantees HC is active.
267 * Usually the ED is already on the schedule, so TDs might be
268 * processed as soon as they're queued.
269 */
270 static void td_submit_urb(struct admhcd *ahcd, struct urb *urb)
271 {
272 struct urb_priv *urb_priv = urb->hcpriv;
273 dma_addr_t data;
274 int data_len = urb->transfer_buffer_length;
275 int cnt = 0;
276 u32 info = 0;
277 int is_out = usb_pipeout(urb->pipe);
278 u32 toggle = 0;
279
280 /* OHCI handles the bulk/interrupt data toggles itself. We just
281 * use the device toggle bits for resetting, and rely on the fact
282 * that resetting toggle is meaningless if the endpoint is active.
283 */
284
285 if (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), is_out)) {
286 toggle = TD_T_CARRY;
287 } else {
288 toggle = TD_T_DATA0;
289 usb_settoggle(urb->dev, usb_pipeendpoint (urb->pipe),
290 is_out, 1);
291 }
292
293 urb_priv->td_idx = 0;
294
295 if (data_len)
296 data = urb->transfer_dma;
297 else
298 data = 0;
299
300 /* NOTE: TD_CC is set so we can tell which TDs the HC processed by
301 * using TD_CC_GET, as well as by seeing them on the done list.
302 * (CC = NotAccessed ... 0x0F, or 0x0E in PSWs for ISO.)
303 */
304 switch (urb_priv->ed->type) {
305 case PIPE_INTERRUPT:
306 info = is_out
307 ? TD_T_CARRY | TD_SCC_NOTACCESSED | TD_DP_OUT
308 : TD_T_CARRY | TD_SCC_NOTACCESSED | TD_DP_IN;
309
310 /* setup service interval and starting frame number */
311 info |= (urb->start_frame & TD_FN_MASK);
312 info |= (urb->interval & TD_ISI_MASK) << TD_ISI_SHIFT;
313
314 td_fill(ahcd, info, data, data_len, urb_priv);
315 cnt++;
316
317 admhcd_to_hcd(ahcd)->self.bandwidth_int_reqs++;
318 break;
319
320 case PIPE_BULK:
321 info = is_out
322 ? TD_SCC_NOTACCESSED | TD_DP_OUT
323 : TD_SCC_NOTACCESSED | TD_DP_IN;
324
325 /* TDs _could_ transfer up to 8K each */
326 while (data_len > TD_DATALEN_MAX) {
327 td_fill(ahcd, info | ((cnt) ? TD_T_CARRY : toggle),
328 data, TD_DATALEN_MAX, urb_priv);
329 data += TD_DATALEN_MAX;
330 data_len -= TD_DATALEN_MAX;
331 cnt++;
332 }
333
334 td_fill(ahcd, info | ((cnt) ? TD_T_CARRY : toggle), data,
335 data_len, urb_priv);
336 cnt++;
337
338 if ((urb->transfer_flags & URB_ZERO_PACKET)
339 && (cnt < urb_priv->td_cnt)) {
340 td_fill(ahcd, info | ((cnt) ? TD_T_CARRY : toggle),
341 0, 0, urb_priv);
342 cnt++;
343 }
344 break;
345
346 /* control manages DATA0/DATA1 toggle per-request; SETUP resets it,
347 * any DATA phase works normally, and the STATUS ack is special.
348 */
349 case PIPE_CONTROL:
350 /* fill a TD for the setup */
351 info = TD_SCC_NOTACCESSED | TD_DP_SETUP | TD_T_DATA0;
352 td_fill(ahcd, info, urb->setup_dma, 8, urb_priv);
353 cnt++;
354
355 if (data_len > 0) {
356 /* fill a TD for the data */
357 info = TD_SCC_NOTACCESSED | TD_T_DATA1;
358 info |= is_out ? TD_DP_OUT : TD_DP_IN;
359 /* NOTE: mishandles transfers >8K, some >4K */
360 td_fill(ahcd, info, data, data_len, urb_priv);
361 cnt++;
362 }
363
364 /* fill a TD for the ACK */
365 info = (is_out || data_len == 0)
366 ? TD_SCC_NOTACCESSED | TD_DP_IN | TD_T_DATA1
367 : TD_SCC_NOTACCESSED | TD_DP_OUT | TD_T_DATA1;
368 td_fill(ahcd, info, data, 0, urb_priv);
369 cnt++;
370
371 break;
372
373 /* ISO has no retransmit, so no toggle;
374 * Each TD could handle multiple consecutive frames (interval 1);
375 * we could often reduce the number of TDs here.
376 */
377 case PIPE_ISOCHRONOUS:
378 info = TD_SCC_NOTACCESSED;
379 for (cnt = 0; cnt < urb->number_of_packets; cnt++) {
380 int frame = urb->start_frame;
381
382 frame += cnt * urb->interval;
383 frame &= TD_FN_MASK;
384 td_fill(ahcd, info | frame,
385 data + urb->iso_frame_desc[cnt].offset,
386 urb->iso_frame_desc[cnt].length,
387 urb_priv);
388 }
389 admhcd_to_hcd(ahcd)->self.bandwidth_isoc_reqs++;
390 break;
391 }
392
393 if (urb_priv->td_cnt != cnt)
394 admhc_err(ahcd, "bad number of tds created for urb %p\n", urb);
395
396 urb_priv->td_idx = 0;
397 }
398
399 /* calculate transfer length/status and update the urb
400 * PRECONDITION: irqsafe (only for urb->status locking)
401 */
402 static int td_done(struct admhcd *ahcd, struct urb *urb, struct td *td)
403 {
404 u32 info = hc32_to_cpup(ahcd, &td->hwINFO);
405 u32 dbp = hc32_to_cpup(ahcd, &td->hwDBP);
406 u32 cbl = TD_BL_GET(hc32_to_cpup(ahcd, &td->hwCBL));
407 int type = usb_pipetype(urb->pipe);
408 int cc;
409
410 cc = TD_CC_GET(info);
411
412 /* ISO ... drivers see per-TD length/status */
413 if (type == PIPE_ISOCHRONOUS) {
414 #if 0
415 /* TODO */
416 int dlen = 0;
417
418 /* NOTE: assumes FC in tdINFO == 0, and that
419 * only the first of 0..MAXPSW psws is used.
420 */
421
422 cc = TD_CC_GET(td);
423 if (tdINFO & TD_CC) /* hc didn't touch? */
424 return;
425
426 if (usb_pipeout (urb->pipe))
427 dlen = urb->iso_frame_desc[td->index].length;
428 else {
429 /* short reads are always OK for ISO */
430 if (cc == TD_DATAUNDERRUN)
431 cc = TD_CC_NOERROR;
432 dlen = tdPSW & 0x3ff;
433 }
434
435 urb->actual_length += dlen;
436 urb->iso_frame_desc[td->index].actual_length = dlen;
437 urb->iso_frame_desc[td->index].status = cc_to_error[cc];
438
439 if (cc != TD_CC_NOERROR)
440 admhc_vdbg (ahcd,
441 "urb %p iso td %p (%d) len %d cc %d\n",
442 urb, td, 1 + td->index, dlen, cc);
443 #endif
444 /* BULK, INT, CONTROL ... drivers see aggregate length/status,
445 * except that "setup" bytes aren't counted and "short" transfers
446 * might not be reported as errors.
447 */
448 } else {
449 admhc_dump_td(ahcd, "td_done", td);
450
451 /* count all non-empty packets except control SETUP packet */
452 if ((type != PIPE_CONTROL || td->index != 0) && dbp != 0) {
453 urb->actual_length += dbp - td->data_dma + cbl;
454 }
455 }
456
457 return cc;
458 }
459
460 /*-------------------------------------------------------------------------*/
461
462 static inline struct td *
463 ed_halted(struct admhcd *ahcd, struct td *td, int cc, struct td *rev)
464 {
465 #if 0
466 struct urb *urb = td->urb;
467 struct ed *ed = td->ed;
468 struct list_head *tmp = td->td_list.next;
469 __hc32 toggle = ed->hwHeadP & cpu_to_hc32 (ahcd, ED_C);
470
471 admhc_dump_ed(ahcd, "ed halted", td->ed, 1);
472 /* clear ed halt; this is the td that caused it, but keep it inactive
473 * until its urb->complete() has a chance to clean up.
474 */
475 ed->hwINFO |= cpu_to_hc32 (ahcd, ED_SKIP);
476 wmb ();
477 ed->hwHeadP &= ~cpu_to_hc32 (ahcd, ED_H);
478
479 /* put any later tds from this urb onto the donelist, after 'td',
480 * order won't matter here: no errors, and nothing was transferred.
481 * also patch the ed so it looks as if those tds completed normally.
482 */
483 while (tmp != &ed->td_list) {
484 struct td *next;
485 __hc32 info;
486
487 next = list_entry(tmp, struct td, td_list);
488 tmp = next->td_list.next;
489
490 if (next->urb != urb)
491 break;
492
493 /* NOTE: if multi-td control DATA segments get supported,
494 * this urb had one of them, this td wasn't the last td
495 * in that segment (TD_R clear), this ed halted because
496 * of a short read, _and_ URB_SHORT_NOT_OK is clear ...
497 * then we need to leave the control STATUS packet queued
498 * and clear ED_SKIP.
499 */
500 info = next->hwINFO;
501 #if 0 /* FIXME */
502 info |= cpu_to_hc32 (ahcd, TD_DONE);
503 info &= ~cpu_to_hc32 (ahcd, TD_CC);
504 #endif
505 next->hwINFO = info;
506
507 next->next_dl_td = rev;
508 rev = next;
509
510 ed->hwHeadP = next->hwNextTD | toggle;
511 }
512
513 /* help for troubleshooting: report anything that
514 * looks odd ... that doesn't include protocol stalls
515 * (or maybe some other things)
516 */
517 switch (cc) {
518 case TD_CC_DATAUNDERRUN:
519 if ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0)
520 break;
521 /* fallthrough */
522 case TD_CC_STALL:
523 if (usb_pipecontrol (urb->pipe))
524 break;
525 /* fallthrough */
526 default:
527 admhc_dbg (ahcd,
528 "urb %p path %s ep%d%s %08x cc %d --> status %d\n",
529 urb, urb->dev->devpath,
530 usb_pipeendpoint (urb->pipe),
531 usb_pipein (urb->pipe) ? "in" : "out",
532 hc32_to_cpu(ahcd, td->hwINFO),
533 cc, cc_to_error [cc]);
534 }
535
536 return rev;
537 #else
538 return NULL;
539 #endif
540 }
541
542 /*-------------------------------------------------------------------------*/
543
544 static int ed_next_urb(struct admhcd *ahcd, struct ed *ed)
545 {
546 struct urb_priv *up;
547 u32 carry;
548
549 if (ed->state != ED_IDLE)
550 return 1;
551
552 if (ed->urb_active)
553 return 1;
554
555 if (list_empty(&ed->urb_pending))
556 return 0;
557
558 up = list_entry(ed->urb_pending.next, struct urb_priv, pending);
559 list_del(&up->pending);
560 ed->urb_active = up;
561 ed->state = ED_OPER;
562
563 #ifdef ADMHC_VERBOSE_DEBUG
564 urb_print(ahcd, up->urb, "NEXT", 0);
565 admhc_dump_ed(ahcd, " ", ed, 0);
566 #endif
567
568 up->td[up->td_cnt-1]->hwNextTD = cpu_to_hc32(ahcd, ed->dummy->td_dma);
569
570 carry = hc32_to_cpup(ahcd, &ed->hwHeadP) & ED_C;
571 ed->hwHeadP = cpu_to_hc32(ahcd, up->td[0]->td_dma | carry);
572 ed->hwINFO &= ~cpu_to_hc32(ahcd, ED_SKIP);
573
574 return 1;
575 }
576
577 static void ed_update(struct admhcd *ahcd, struct ed *ed, int partial)
578 {
579 struct urb_priv *up;
580 struct urb *urb;
581 int cc;
582
583 up = ed->urb_active;
584 if (!up)
585 return;
586
587 urb = up->urb;
588
589 #ifdef ADMHC_VERBOSE_DEBUG
590 urb_print(ahcd, urb, "UPDATE", 0);
591 #endif
592 admhc_dump_ed(ahcd, "ED-UPDATE", ed, 1);
593
594 cc = TD_CC_NOERROR;
595 for (; up->td_idx < up->td_cnt; up->td_idx++) {
596 struct td *td = up->td[up->td_idx];
597
598 if (hc32_to_cpup(ahcd, &td->hwINFO) & TD_OWN)
599 break;
600
601 cc = td_done(ahcd, urb, td);
602 if (cc != TD_CC_NOERROR) {
603 admhc_vdbg(ahcd,
604 "urb %p td %p (%d) cc %d, len=%d/%d\n",
605 urb, td, td->index, cc,
606 urb->actual_length,
607 urb->transfer_buffer_length);
608
609 up->td_idx = up->td_cnt;
610 break;
611 }
612 }
613
614 if ((up->td_idx != up->td_cnt) && (!partial))
615 /* the URB is not completed yet */
616 return;
617
618 /* update packet status if needed (short is normally ok) */
619 if (cc == TD_CC_DATAUNDERRUN
620 && !(urb->transfer_flags & URB_SHORT_NOT_OK))
621 cc = TD_CC_NOERROR;
622
623 if (cc != TD_CC_NOERROR && cc < TD_CC_HCD0) {
624 spin_lock(&urb->lock);
625 if (urb->status == -EINPROGRESS)
626 urb->status = cc_to_error[cc];
627 spin_unlock(&urb->lock);
628 }
629
630 finish_urb(ahcd, urb);
631
632 ed->urb_active = NULL;
633 ed->state = ED_IDLE;
634 }
635
636 /* there are some tds completed; called in_irq(), with HCD locked */
637 static void admhc_td_complete(struct admhcd *ahcd)
638 {
639 struct ed *ed;
640
641 for (ed = ahcd->ed_head; ed; ed = ed->ed_next) {
642 if (ed->state != ED_OPER)
643 continue;
644
645 if (hc32_to_cpup(ahcd, &ed->hwINFO) & ED_SKIP)
646 continue;
647
648 if (hc32_to_cpup(ahcd, &ed->hwHeadP) & ED_H) {
649 /* TODO */
650 continue;
651 }
652
653 ed_update(ahcd, ed, 0);
654 }
655 }
656
657 /* there are some urbs/eds to unlink; called in_irq(), with HCD locked */
658 static void admhc_finish_unlinks(struct admhcd *ahcd, u16 tick)
659 {
660 struct ed *ed;
661
662 for (ed = ahcd->ed_head; ed; ed = ed->ed_next) {
663 if (ed->state != ED_UNLINK)
664 continue;
665
666 if (likely(HC_IS_RUNNING(admhcd_to_hcd(ahcd)->state)))
667 if (tick_before(tick, ed->tick))
668 continue;
669
670 /* process partial status */
671 ed_update(ahcd, ed, 1);
672 }
673 }
674
675 static void admhc_sof_refill(struct admhcd *ahcd)
676 {
677 struct ed *ed;
678 int disable_dma = 1;
679
680 for (ed = ahcd->ed_head; ed; ed = ed->ed_next) {
681
682 if (hc32_to_cpup(ahcd, &ed->hwHeadP) & ED_H) {
683 ed_update(ahcd, ed, 1);
684 ed->hwHeadP &= ~cpu_to_hc32 (ahcd, ED_H);
685 }
686
687 if (ed_next_urb(ahcd, ed)) {
688 disable_dma = 0;
689 } else {
690 struct ed *tmp;
691 tmp = ed->ed_prev;
692 ed_deschedule(ahcd, ed);
693 ed = tmp;
694 }
695 }
696
697 if (disable_dma) {
698 admhc_intr_disable(ahcd, ADMHC_INTR_SOFI);
699 admhc_dma_disable(ahcd);
700 } else {
701 admhc_intr_enable(ahcd, ADMHC_INTR_SOFI);
702 admhc_dma_enable(ahcd);
703 }
704 }