USB driver updates, the driver passes usbtests 1-10 now
authorGabor Juhos <juhosg@openwrt.org>
Fri, 23 Nov 2007 15:53:35 +0000 (15:53 +0000)
committerGabor Juhos <juhosg@openwrt.org>
Fri, 23 Nov 2007 15:53:35 +0000 (15:53 +0000)
SVN-Revision: 9592

target/linux/adm5120/files/drivers/usb/host/adm5120-dbg.c
target/linux/adm5120/files/drivers/usb/host/adm5120-drv.c
target/linux/adm5120/files/drivers/usb/host/adm5120-hcd.c
target/linux/adm5120/files/drivers/usb/host/adm5120-mem.c
target/linux/adm5120/files/drivers/usb/host/adm5120-q.c
target/linux/adm5120/files/drivers/usb/host/adm5120.h

index 648576cc0526f74670d55c3027a63a84894bb915..17d4ad446b072aad90228673c03067c066da45eb 100644 (file)
@@ -33,8 +33,6 @@ static inline char *ed_statestring(int state)
                return "UNLINK";
        case ED_OPER:
                return "OPER";
-       case ED_NEW:
-               return "NEW";
        }
        return "?STATE";
 }
@@ -70,18 +68,20 @@ static inline char *td_togglestring(u32 info)
        return "?TOGGLE";
 }
 
+/*-------------------------------------------------------------------------*/
+
 #ifdef DEBUG
 
 /* debug| print the main components of an URB
  * small: 0) header + data packets 1) just header
  */
 static void __attribute__((unused))
-urb_print(struct admhcd *ahcd, struct urb * urb, char * str, int small)
+urb_print(struct admhcd *ahcd, struct urb *urb, char * str, int small)
 {
        unsigned int pipe = urb->pipe;
 
        if (!urb->dev || !urb->dev->bus) {
-               admhc_dbg(ahcd, "%s URB: no dev", str);
+               admhc_dbg("%s URB: no dev", str);
                return;
        }
 
@@ -89,11 +89,11 @@ urb_print(struct admhcd *ahcd, struct urb * urb, char * str, int small)
        if (urb->status != 0)
 #endif
        admhc_dbg(ahcd, "URB-%s %p dev=%d ep=%d%s-%s flags=%x len=%d/%d "
-                       "stat=%d\n",
+                       "stat=%d",
                        str,
                        urb,
-                       usb_pipedevice(pipe),
-                       usb_pipeendpoint(pipe),
+                       usb_pipedevice (pipe),
+                       usb_pipeendpoint (pipe),
                        usb_pipeout(pipe)? "out" : "in",
                        pipestring(pipe),
                        urb->transfer_flags,
@@ -106,20 +106,20 @@ urb_print(struct admhcd *ahcd, struct urb * urb, char * str, int small)
                int i, len;
 
                if (usb_pipecontrol(pipe)) {
-                       admhc_dbg(admhc, "setup(8): ");
+                       admhc_dbg(ahcd, "setup(8):");
                        for (i = 0; i < 8 ; i++)
                                printk (" %02x", ((__u8 *) urb->setup_packet) [i]);
                        printk ("\n");
                }
                if (urb->transfer_buffer_length > 0 && urb->transfer_buffer) {
-                       admhc_dbg(admhc, "data(%d/%d): ",
+                       printk(KERN_DEBUG __FILE__ ": data(%d/%d):",
                                urb->actual_length,
                                urb->transfer_buffer_length);
                        len = usb_pipeout(pipe)?
                                                urb->transfer_buffer_length: urb->actual_length;
                        for (i = 0; i < 16 && i < len; i++)
                                printk (" %02x", ((__u8 *) urb->transfer_buffer) [i]);
-                       printk ("%s stat:%d\n", i < len? "...": "", urb->status);
+                       admhc_dbg("%s stat:%d\n", i < len? "...": "", urb->status);
                }
        }
 #endif /* ADMHC_VERBOSE_DEBUG */
@@ -318,18 +318,6 @@ static void admhc_dump_td(const struct admhcd *ahcd, const char *label,
                (tmp & TD_IE) ? " IE" : "");
 }
 
-static void admhc_dump_up(const struct admhcd *ahcd, const char *label,
-               const struct urb_priv *up)
-{
-       int i;
-
-       admhc_dbg(ahcd, "%s urb/%p:\n", label, up->urb);
-       for (i = 0; i < up->td_cnt; i++) {
-               struct td *td = up->td[i];
-               admhc_dump_td(ahcd, "    ->", td);
-       }
-}
-
 /* caller MUST own hcd spinlock if verbose is set! */
 static void __attribute__((unused))
 admhc_dump_ed(const struct admhcd *ahcd, const char *label,
@@ -352,22 +340,23 @@ admhc_dump_ed(const struct admhcd *ahcd, const char *label,
                ED_FA_GET(tmp));
 
        tmp = hc32_to_cpup(ahcd, &ed->hwHeadP);
-       admhc_dbg(ahcd, "  tds: head %08x tail %08x %s%s\n",
+       admhc_dbg(ahcd, "  tds: head %08x tail %08x %s%s%s\n",
                tmp & TD_MASK,
-               hc32_to_cpup(ahcd, &ed->hwTailP),
+               hc32_to_cpup (ahcd, &ed->hwTailP),
                (tmp & ED_C) ? data1 : data0,
-               (tmp & ED_H) ? " HALT" : "");
-
-       if (ed->urb_active)
-               admhc_dump_up(ahcd, "  active ", ed->urb_active);
+               (tmp & ED_H) ? " HALT" : "",
+               verbose ? " td list follows" : " (not listing)");
 
-       if ((verbose) && (!list_empty(&ed->urb_pending))) {
-               struct list_head *entry;
-               /* dump pending URBs */
-               list_for_each(entry, &ed->urb_pending) {
-                       struct urb_priv *up;
-                       up = list_entry(entry, struct urb_priv, pending);
-                       admhc_dump_up(ahcd, "  pending ", up);
+       if (verbose) {
+               struct list_head        *tmp;
+
+               /* use ed->td_list because HC concurrently modifies
+                * hwNextTD as it accumulates ed_donelist.
+                */
+               list_for_each(tmp, &ed->td_list) {
+                       struct td               *td;
+                       td = list_entry(tmp, struct td, td_list);
+                       admhc_dump_td (ahcd, "  ->", td);
                }
        }
 }
@@ -375,8 +364,6 @@ admhc_dump_ed(const struct admhcd *ahcd, const char *label,
 #else /* ifdef DEBUG */
 
 static inline void urb_print(struct urb * urb, char * str, int small) {}
-static inline void admhc_dump_up(const struct admhcd *ahcd, const char *label,
-       const struct urb_priv *up) {}
 static inline void admhc_dump_ed(const struct admhcd *ahcd, const char *label,
        const struct ed *ed, int verbose) {}
 static inline void admhc_dump_td(const struct admhcd *ahcd, const char *label,
@@ -396,44 +383,6 @@ static inline void remove_debug_files(struct admhcd *bus) { }
 
 #else
 
-static ssize_t
-show_urb_priv(struct admhcd *ahcd, char *buf, size_t count,
-               struct urb_priv *up)
-{
-       unsigned temp, size = count;
-       int i;
-
-       if (!up)
-               return 0;
-
-       temp = scnprintf(buf, size,"\n\turb %p ", up->urb);
-       size -= temp;
-       buf += temp;
-
-       for (i = 0; i< up->td_cnt; i++) {
-               struct td *td;
-               u32 dbp, cbl, info;
-
-               td = up->td[i];
-               info = hc32_to_cpup(ahcd, &td->hwINFO);
-               dbp = hc32_to_cpup(ahcd, &td->hwDBP);
-               cbl = hc32_to_cpup(ahcd, &td->hwCBL);
-
-               temp = scnprintf(buf, size,
-                       "\n\t\ttd %p %s %d %s%scc=%x (%08x,%08x)",
-                       td,
-                       td_pidstring(info),
-                       TD_BL_GET(cbl),
-                       (info & TD_OWN) ? "WORK " : "DONE ",
-                       (cbl & TD_IE) ? "IE " : "",
-                       TD_CC_GET(info), info, cbl);
-               size -= temp;
-               buf += temp;
-       }
-
-       return count - size;
-}
-
 static ssize_t
 show_list(struct admhcd *ahcd, char *buf, size_t count, struct ed *ed)
 {
@@ -446,10 +395,15 @@ show_list(struct admhcd *ahcd, char *buf, size_t count, struct ed *ed)
        while (ed) {
                u32 info = hc32_to_cpu(ahcd, ed->hwINFO);
                u32 headp = hc32_to_cpu(ahcd, ed->hwHeadP);
+               u32 tailp = hc32_to_cpu(ahcd, ed->hwTailP);
+               struct list_head *entry;
+               struct td       *td;
 
                temp = scnprintf(buf, size,
-                       "ed/%p %s %cs dev%d ep%d %s%smax %d %08x%s%s %s",
+                       "ed/%p %s %s %cs dev%d ep%d %s%smax %d %08x%s%s %s"
+                       " h:%08x t:%08x",
                        ed,
+                       ed_statestring(ed->state),
                        ed_typestring (ed->type),
                        (info & ED_SPEED_FULL) ? 'f' : 'l',
                        info & ED_FA_MASK,
@@ -460,45 +414,37 @@ show_list(struct admhcd *ahcd, char *buf, size_t count, struct ed *ed)
                        info,
                        (info & ED_SKIP) ? " S" : "",
                        (headp & ED_H) ? " H" : "",
-                       (headp & ED_C) ? "DATA1" : "DATA0");
+                       (headp & ED_C) ? data1 : data0,
+                       headp & ED_MASK,tailp);
                size -= temp;
                buf += temp;
 
-               if (ed->urb_active) {
-                       temp = scnprintf(buf, size, "\nactive urb:");
-                       size -= temp;
-                       buf += temp;
-
-                       temp = show_urb_priv(ahcd, buf, size, ed->urb_active);
+               list_for_each(entry, &ed->td_list) {
+                       u32             dbp, cbl;
+
+                       td = list_entry(entry, struct td, td_list);
+                       info = hc32_to_cpup (ahcd, &td->hwINFO);
+                       dbp = hc32_to_cpup (ahcd, &td->hwDBP);
+                       cbl = hc32_to_cpup (ahcd, &td->hwCBL);
+
+                       temp = scnprintf(buf, size,
+                               "\n\ttd/%p %s %d %s%scc=%x urb %p (%08x,%08x)",
+                               td,
+                               td_pidstring(info),
+                               TD_BL_GET(cbl),
+                               (info & TD_OWN) ? "" : "DONE ",
+                               (cbl & TD_IE) ? "IE " : "",
+                               TD_CC_GET (info), td->urb, info, cbl);
                        size -= temp;
                        buf += temp;
                }
 
-               if (!list_empty(&ed->urb_pending)) {
-                       struct list_head *entry;
-
-                       temp = scnprintf(buf, size, "\npending urbs:");
-                       size -= temp;
-                       buf += temp;
-
-                       list_for_each(entry, &ed->urb_pending) {
-                               struct urb_priv *up;
-                               up = list_entry(entry, struct urb_priv,
-                                       pending);
-
-                               temp = show_urb_priv(ahcd, buf, size, up);
-                               size -= temp;
-                               buf += temp;
-                       }
-               }
-
                temp = scnprintf(buf, size, "\n");
                size -= temp;
                buf += temp;
 
                ed = ed->ed_next;
        }
-
        return count - size;
 }
 
@@ -524,6 +470,108 @@ show_async(struct class_device *class_dev, char *buf)
 }
 static CLASS_DEVICE_ATTR(async, S_IRUGO, show_async, NULL);
 
+
+#define DBG_SCHED_LIMIT 64
+
+static ssize_t
+show_periodic(struct class_device *class_dev, char *buf)
+{
+       struct usb_bus          *bus;
+       struct usb_hcd          *hcd;
+       struct admhcd           *ahcd;
+       struct ed               **seen, *ed;
+       unsigned long           flags;
+       unsigned                temp, size, seen_count;
+       char                    *next;
+       unsigned                i;
+
+       if (!(seen = kmalloc(DBG_SCHED_LIMIT * sizeof *seen, GFP_ATOMIC)))
+               return 0;
+       seen_count = 0;
+
+       bus = class_get_devdata(class_dev);
+       hcd = bus_to_hcd(bus);
+       ahcd = hcd_to_admhcd(hcd);
+       next = buf;
+       size = PAGE_SIZE;
+
+       temp = scnprintf(next, size, "size = %d\n", NUM_INTS);
+       size -= temp;
+       next += temp;
+
+       /* dump a snapshot of the periodic schedule (and load) */
+       spin_lock_irqsave(&ahcd->lock, flags);
+       for (i = 0; i < NUM_INTS; i++) {
+               if (!(ed = ahcd->periodic [i]))
+                       continue;
+
+               temp = scnprintf(next, size, "%2d [%3d]:", i, ahcd->load [i]);
+               size -= temp;
+               next += temp;
+
+               do {
+                       temp = scnprintf(next, size, " ed%d/%p",
+                               ed->interval, ed);
+                       size -= temp;
+                       next += temp;
+                       for (temp = 0; temp < seen_count; temp++) {
+                               if (seen [temp] == ed)
+                                       break;
+                       }
+
+                       /* show more info the first time around */
+                       if (temp == seen_count) {
+                               u32     info = hc32_to_cpu (ahcd, ed->hwINFO);
+                               struct list_head        *entry;
+                               unsigned                qlen = 0;
+
+                               /* qlen measured here in TDs, not urbs */
+                               list_for_each (entry, &ed->td_list)
+                                       qlen++;
+                               temp = scnprintf(next, size,
+                                       " (%cs dev%d ep%d%s qlen %u"
+                                       " max %d %08x%s%s)",
+                                       (info & ED_SPEED_FULL) ? 'f' : 'l',
+                                       ED_FA_GET(info),
+                                       ED_EN_GET(info),
+                                       (info & ED_ISO) ? "iso" : "int",
+                                       qlen,
+                                       ED_MPS_GET(info),
+                                       info,
+                                       (info & ED_SKIP) ? " K" : "",
+                                       (ed->hwHeadP &
+                                               cpu_to_hc32(ahcd, ED_H)) ?
+                                                       " H" : "");
+                               size -= temp;
+                               next += temp;
+
+                               if (seen_count < DBG_SCHED_LIMIT)
+                                       seen [seen_count++] = ed;
+
+                               ed = ed->ed_next;
+
+                       } else {
+                               /* we've seen it and what's after */
+                               temp = 0;
+                               ed = NULL;
+                       }
+
+               } while (ed);
+
+               temp = scnprintf(next, size, "\n");
+               size -= temp;
+               next += temp;
+       }
+       spin_unlock_irqrestore(&ahcd->lock, flags);
+       kfree (seen);
+
+       return PAGE_SIZE - size;
+}
+static CLASS_DEVICE_ATTR(periodic, S_IRUGO, show_periodic, NULL);
+
+
+#undef DBG_SCHED_LIMIT
+
 static ssize_t
 show_registers(struct class_device *class_dev, char *buf)
 {
@@ -610,6 +658,7 @@ static inline void create_debug_files (struct admhcd *ahcd)
        int retval;
 
        retval = class_device_create_file(cldev, &class_device_attr_async);
+       retval = class_device_create_file(cldev, &class_device_attr_periodic);
        retval = class_device_create_file(cldev, &class_device_attr_registers);
        admhc_dbg(ahcd, "created debug files\n");
 }
@@ -619,6 +668,7 @@ static inline void remove_debug_files (struct admhcd *ahcd)
        struct class_device *cldev = admhcd_to_hcd(ahcd)->self.class_dev;
 
        class_device_remove_file(cldev, &class_device_attr_async);
+       class_device_remove_file(cldev, &class_device_attr_periodic);
        class_device_remove_file(cldev, &class_device_attr_registers);
 }
 
index 67c771ca46493c564f676ada7ccab2a398b84a1e..965c391c64b040dafe54926b79c9961b2c1a9357 100644 (file)
@@ -171,7 +171,7 @@ static const struct hc_driver adm5120_hc_driver = {
        .hub_status_data =      admhc_hub_status_data,
        .hub_control =          admhc_hub_control,
        .hub_irq_enable =       admhc_hub_irq_enable,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM
        .bus_suspend =          admhc_bus_suspend,
        .bus_resume =           admhc_bus_resume,
 #endif
@@ -202,6 +202,7 @@ static int usb_hcd_adm5120_remove(struct platform_device *pdev)
 }
 
 #ifdef CONFIG_PM
+/* TODO */
 static int usb_hcd_adm5120_suspend(struct platform_device *dev)
 {
        struct usb_hcd *hcd = platform_get_drvdata(dev);
index 48514d5cbec11124eda076f9eef14fcf2a03af77..30fe132d99fa5a293827f6d0647d804a31ac12a3 100644 (file)
@@ -45,7 +45,7 @@
 #include "../core/hcd.h"
 #include "../core/hub.h"
 
-#define DRIVER_VERSION "v0.06"
+#define DRIVER_VERSION "v0.10"
 #define DRIVER_AUTHOR  "Gabor Juhos <juhosg at openwrt.org>"
 #define DRIVER_DESC    "ADMtek USB 1.1 Host Controller Driver"
 
@@ -58,8 +58,7 @@
 
 #define        ADMHC_INTR_INIT \
                ( ADMHC_INTR_MIE | ADMHC_INTR_INSM | ADMHC_INTR_FATI \
-               | ADMHC_INTR_RESI | ADMHC_INTR_TDC | ADMHC_INTR_BABI \
-               | ADMHC_INTR_7 | ADMHC_INTR_6 )
+               | ADMHC_INTR_RESI | ADMHC_INTR_TDC | ADMHC_INTR_BABI )
 
 /*-------------------------------------------------------------------------*/
 
@@ -115,12 +114,10 @@ static int admhc_urb_enqueue(struct usb_hcd *hcd, struct usb_host_endpoint *ep,
 
                /* 1 TD for setup, 1 for ACK, plus ... */
                td_cnt = 2;
-               if (urb->transfer_buffer_length)
-                       td_cnt++;
-               break;
+               /* FALLTHROUGH */
        case PIPE_BULK:
                /* one TD for every 4096 Bytes (can be upto 8K) */
-               td_cnt = urb->transfer_buffer_length / TD_DATALEN_MAX;
+               td_cnt += urb->transfer_buffer_length / TD_DATALEN_MAX;
                /* ... and for any remaining bytes ... */
                if ((urb->transfer_buffer_length % TD_DATALEN_MAX) != 0)
                        td_cnt++;
@@ -156,7 +153,6 @@ static int admhc_urb_enqueue(struct usb_hcd *hcd, struct usb_host_endpoint *ep,
                return -ENOMEM;
 
        urb_priv->ed = ed;
-       urb_priv->urb = urb;
 
        spin_lock_irqsave(&ahcd->lock, flags);
        /* don't submit to a dead HC */
@@ -179,8 +175,13 @@ static int admhc_urb_enqueue(struct usb_hcd *hcd, struct usb_host_endpoint *ep,
                goto fail;
        }
 
-       if (ed->type == PIPE_ISOCHRONOUS) {
-               if (ed->state == ED_NEW) {
+       /* schedule the ed if needed */
+       if (ed->state == ED_IDLE) {
+               ret = ed_schedule(ahcd, ed);
+               if (ret < 0)
+                       goto fail0;
+
+               if (ed->type == PIPE_ISOCHRONOUS) {
                        u16     frame = admhc_frame_no(ahcd);
 
                        /* delay a few frames before the first TD */
@@ -192,25 +193,25 @@ static int admhc_urb_enqueue(struct usb_hcd *hcd, struct usb_host_endpoint *ep,
                        /* yes, only URB_ISO_ASAP is supported, and
                         * urb->start_frame is never used as input.
                         */
-               } else
-                       urb->start_frame = ed->last_iso + ed->interval;
-       }
+               }
+       } else if (ed->type == PIPE_ISOCHRONOUS)
+               urb->start_frame = ed->last_iso + ed->interval;
 
+       /* fill the TDs and link them to the ed; and
+        * enable that part of the schedule, if needed
+        * and update count of queued periodic urbs
+        */
        urb->hcpriv = urb_priv;
-       td_submit_urb(ahcd, urb_priv->urb);
-
-       /* append it to the ED's queue */
-       list_add_tail(&urb_priv->pending, &ed->urb_pending);
-
-       /* schedule the ED */
-       ret = ed_schedule(ahcd, ed);
+       td_submit_urb(ahcd, urb);
 
+#ifdef ADMHC_VERBOSE_DEBUG
+       admhc_dump_ed(ahcd, "admhc_urb_enqueue", urb_priv->ed, 1);
+#endif
+fail0:
        spin_unlock(&urb->lock);
 fail:
-       if (ret) {
-               urb_priv = urb->hcpriv;
+       if (ret)
                urb_priv_free(ahcd, urb_priv);
-       }
 
        spin_unlock_irqrestore(&ahcd->lock, flags);
        return ret;
@@ -225,43 +226,32 @@ fail:
 static int admhc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
 {
        struct admhcd *ahcd = hcd_to_admhcd(hcd);
-       struct urb_priv *up;
        unsigned long flags;
 
-       up = urb->hcpriv;
-       if (!up)
-               return 0;
-
-       spin_lock_irqsave(&ahcd->lock, flags);
-
 #ifdef ADMHC_VERBOSE_DEBUG
-       urb_print(ahcd, urb, "DEQEUE", 1);
+       urb_print(ahcd, urb, "DEQUEUE", 1);
 #endif
 
+       spin_lock_irqsave(&ahcd->lock, flags);
        if (HC_IS_RUNNING(hcd->state)) {
+               struct urb_priv *urb_priv;
+
                /* Unless an IRQ completed the unlink while it was being
                 * handed to us, flag it for unlink and giveback, and force
                 * some upcoming INTR_SF to call finish_unlinks()
                 */
-               if (up->ed->urb_active != up) {
-                       list_del(&up->pending);
-                       finish_urb(ahcd, urb);
-               } else {
-                       ed_start_deschedule(ahcd, up->ed);
+               urb_priv = urb->hcpriv;
+               if (urb_priv) {
+                       if (urb_priv->ed->state == ED_OPER)
+                               start_ed_unlink(ahcd, urb_priv->ed);
                }
        } else {
                /*
                 * with HC dead, we won't respect hc queue pointers
                 * any more ... just clean up every urb's memory.
                 */
-               if (up->ed->urb_active != up) {
-                       list_del(&up->pending);
+               if (urb->hcpriv)
                        finish_urb(ahcd, urb);
-               } else {
-                       finish_urb(ahcd, urb);
-                       up->ed->urb_active = NULL;
-                       up->ed->state = ED_IDLE;
-               }
        }
        spin_unlock_irqrestore(&ahcd->lock, flags);
 
@@ -273,6 +263,7 @@ static int admhc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
 /* frees config/altsetting state for endpoints,
  * including ED memory, dummy TD, and bulk/intr data toggle
  */
+
 static void admhc_endpoint_disable(struct usb_hcd *hcd,
                struct usb_host_endpoint *ep)
 {
@@ -298,8 +289,8 @@ rescan:
 
        if (!HC_IS_RUNNING(hcd->state)) {
 sanitize:
-               ed->state = ED_UNLINK;
-               admhc_finish_unlinks(ahcd, 0);
+               ed->state = ED_IDLE;
+               finish_unlinks(ahcd, 0);
        }
 
        switch (ed->state) {
@@ -312,11 +303,10 @@ sanitize:
                spin_unlock_irqrestore(&ahcd->lock, flags);
                schedule_timeout_uninterruptible(1);
                goto rescan;
-       case ED_IDLE:
-       case ED_NEW:            /* fully unlinked */
-               if (list_empty(&ed->urb_pending)) {
-                       td_free(ahcd, ed->dummy);
-                       ed_free(ahcd, ed);
+       case ED_IDLE:           /* fully unlinked */
+               if (list_empty(&ed->td_list)) {
+                       td_free (ahcd, ed->dummy);
+                       ed_free (ahcd, ed);
                        break;
                }
                /* else FALL THROUGH */
@@ -324,11 +314,10 @@ sanitize:
                /* caller was supposed to have unlinked any requests;
                 * that's not our job.  can't recover; must leak ed.
                 */
-               admhc_err(ahcd, "leak ed %p (#%02x) %s act %p%s\n",
-                       ed, ep->desc.bEndpointAddress,
-                       ed_statestring(ed->state),
-                       ed->urb_active,
-                       list_empty(&ed->urb_pending) ? "" : " (has urbs)");
+               admhc_err(ahcd, "leak ed %p (#%02x) state %d%s\n",
+                       ed, ep->desc.bEndpointAddress, ed->state,
+                       list_empty(&ed->td_list) ? "" : " (has tds)");
+               td_free(ahcd, ed->dummy);
                break;
        }
 
@@ -347,9 +336,15 @@ static int admhc_get_frame_number(struct usb_hcd *hcd)
 
 static void admhc_usb_reset(struct admhcd *ahcd)
 {
-       admhc_dbg(ahcd, "usb reset\n");
+#if 0
+       ahcd->hc_control = admhc_readl(ahcd, &ahcd->regs->control);
+       ahcd->hc_control &= OHCI_CTRL_RWC;
+       admhc_writel(ahcd, ahcd->hc_control, &ahcd->regs->control);
+#else
+       /* FIXME */
        ahcd->host_control = ADMHC_BUSS_RESET;
-       admhc_writel(ahcd, ahcd->host_control, &ahcd->regs->host_control);
+       admhc_writel(ahcd, ahcd->host_control ,&ahcd->regs->host_control);
+#endif
 }
 
 /* admhc_shutdown forcibly disables IRQs and DMA, helping kexec and
@@ -361,12 +356,12 @@ admhc_shutdown(struct usb_hcd *hcd)
 {
        struct admhcd *ahcd;
 
-       admhc_dbg(ahcd, "shutdown\n");
-
        ahcd = hcd_to_admhcd(hcd);
        admhc_intr_disable(ahcd, ADMHC_INTR_MIE);
        admhc_dma_disable(ahcd);
        admhc_usb_reset(ahcd);
+       /* flush the writes */
+       admhc_writel_flush(ahcd);
 }
 
 /*-------------------------------------------------------------------------*
@@ -398,7 +393,7 @@ static void admhc_eds_cleanup(struct admhcd *ahcd)
        ahcd->ed_head = NULL;
 }
 
-#define ED_DUMMY_INFO  0
+#define ED_DUMMY_INFO  (ED_SPEED_FULL | ED_SKIP)
 
 static int admhc_eds_init(struct admhcd *ahcd)
 {
@@ -514,6 +509,17 @@ static int admhc_run(struct admhcd *ahcd)
                /* also: power/overcurrent flags in rhdesc */
        }
 
+#if 0  /* TODO: not applicable */
+       /* Reset USB nearly "by the book".  RemoteWakeupConnected was
+        * saved if boot firmware (BIOS/SMM/...) told us it's connected,
+        * or if bus glue did the same (e.g. for PCI add-in cards with
+        * PCI PM support).
+        */
+       if ((ahcd->hc_control & OHCI_CTRL_RWC) != 0
+                       && !device_may_wakeup(hcd->self.controller))
+               device_init_wakeup(hcd->self.controller, 1);
+#endif
+
        switch (ahcd->host_control & ADMHC_HC_BUSS) {
        case ADMHC_BUSS_OPER:
                temp = 0;
@@ -531,14 +537,19 @@ static int admhc_run(struct admhcd *ahcd)
                break;
        }
        admhc_writel(ahcd, ahcd->host_control, &ahcd->regs->host_control);
-       msleep(temp);
 
+       /* flush the writes */
+       admhc_writel_flush(ahcd);
+
+       msleep(temp);
        temp = admhc_read_rhdesc(ahcd);
        if (!(temp & ADMHC_RH_NPS)) {
                /* power down each port */
                for (temp = 0; temp < ahcd->num_ports; temp++)
                        admhc_write_portstatus(ahcd, temp, ADMHC_PS_CPP);
        }
+       /* flush those writes */
+       admhc_writel_flush(ahcd);
 
        /* 2msec timelimit here means no irqs/preempt */
        spin_lock_irq(&ahcd->lock);
@@ -566,10 +577,25 @@ static int admhc_run(struct admhcd *ahcd)
        hcd->poll_rh = 1;
        hcd->uses_new_polling = 1;
 
+#if 0
+       /* wake on ConnectStatusChange, matching external hubs */
+       admhc_writel(ahcd, RH_HS_DRWE, &ahcd->regs->roothub.status);
+#else
+       /* FIXME roothub_write_status (ahcd, ADMHC_RH_DRWE); */
+#endif
+
+       /* Choose the interrupts we care about now, others later on demand */
+       admhc_intr_ack(ahcd, ~0);
+       admhc_intr_enable(ahcd, ADMHC_INTR_INIT);
+
+       admhc_writel(ahcd, ADMHC_RH_NPS | ADMHC_RH_LPSC, &ahcd->regs->rhdesc);
+
+       /* flush those writes */
+       admhc_writel_flush(ahcd);
+
        /* start controller operations */
        ahcd->host_control = ADMHC_BUSS_OPER;
        admhc_writel(ahcd, ahcd->host_control, &ahcd->regs->host_control);
-       hcd->state = HC_STATE_RUNNING;
 
        temp = 20;
        while ((admhc_readl(ahcd, &ahcd->regs->host_control)
@@ -582,24 +608,31 @@ static int admhc_run(struct admhcd *ahcd)
                mdelay(1);
        }
 
+       hcd->state = HC_STATE_RUNNING;
+
+       ahcd->next_statechange = jiffies + STATECHANGE_DELAY;
+
 #if 0
-       /* FIXME */
-       /* wake on ConnectStatusChange, matching external hubs */
-       admhc_writel(ahcd, ADMHC_RH_DRWE, &ahcd->regs->rhdesc);
-#endif
+       /* FIXME: enabling DMA is always failed here for an unknown reason */
+       admhc_dma_enable(ahcd);
 
-       /* Choose the interrupts we care about now, others later on demand */
-       temp = ADMHC_INTR_INIT;
-       admhc_intr_ack(ahcd, ~0);
-       admhc_intr_enable(ahcd, temp);
+       temp = 200;
+       while ((admhc_readl(ahcd, &ahcd->regs->host_control)
+                       & ADMHC_HC_DMAE) != ADMHC_HC_DMAE) {
+               if (--temp == 0) {
+                       spin_unlock_irq(&ahcd->lock);
+                       admhc_err(ahcd, "unable to enable DMA!\n");
+                       admhc_dump(ahcd, 1);
+                       return -1;
+               }
+               mdelay(1);
+       }
 
-       admhc_writel(ahcd, ADMHC_RH_NPS | ADMHC_RH_LPSC, &ahcd->regs->rhdesc);
+#endif
 
-       ahcd->next_statechange = jiffies + STATECHANGE_DELAY;
        spin_unlock_irq(&ahcd->lock);
 
        mdelay(ADMHC_POTPGT);
-       hcd->state = HC_STATE_RUNNING;
 
        return 0;
 }
@@ -615,38 +648,25 @@ static irqreturn_t admhc_irq(struct usb_hcd *hcd)
        u32 ints;
 
        ints = admhc_readl(ahcd, &regs->int_status);
-       if (!(ints & ADMHC_INTR_INTA)) {
+       if ((ints & ADMHC_INTR_INTA) == 0) {
                /* no unmasked interrupt status is set */
-               admhc_err(ahcd, "spurious interrupt %08x\n", ints);
                return IRQ_NONE;
        }
 
        ints &= admhc_readl(ahcd, &regs->int_enable);
-       if (!ints) {
-               admhc_err(ahcd, "hardware irq problems?\n");
-               return IRQ_NONE;
-       }
-
-       if (ints & ADMHC_INTR_6) {
-               admhc_err(ahcd, "unknown interrupt 6\n");
-               admhc_dump(ahcd, 0);
-       }
-
-       if (ints & ADMHC_INTR_7) {
-               admhc_err(ahcd, "unknown interrupt 7\n");
-               admhc_dump(ahcd, 0);
-       }
 
        if (ints & ADMHC_INTR_FATI) {
+               /* e.g. due to PCI Master/Target Abort */
                admhc_disable(ahcd);
                admhc_err(ahcd, "Fatal Error, controller disabled\n");
+               admhc_dump(ahcd, 1);
                admhc_usb_reset(ahcd);
        }
 
        if (ints & ADMHC_INTR_BABI) {
-               admhc_disable(ahcd);
+               admhc_intr_disable(ahcd, ADMHC_INTR_BABI);
+               admhc_intr_ack(ahcd, ADMHC_INTR_BABI);
                admhc_err(ahcd, "Babble Detected\n");
-               admhc_usb_reset(ahcd);
        }
 
        if (ints & ADMHC_INTR_INSM) {
@@ -682,6 +702,7 @@ static irqreturn_t admhc_irq(struct usb_hcd *hcd)
 
        if (ints & ADMHC_INTR_TDC) {
                admhc_vdbg(ahcd, "Transfer Descriptor Complete\n");
+               admhc_intr_ack(ahcd, ADMHC_INTR_TDC);
                if (HC_IS_RUNNING(hcd->state))
                        admhc_intr_disable(ahcd, ADMHC_INTR_TDC);
                spin_lock(&ahcd->lock);
@@ -693,19 +714,45 @@ static irqreturn_t admhc_irq(struct usb_hcd *hcd)
 
        if (ints & ADMHC_INTR_SO) {
                /* could track INTR_SO to reduce available PCI/... bandwidth */
-               admhc_err(ahcd, "Schedule Overrun\n");
+               admhc_vdbg(ahcd, "Schedule Overrun\n");
        }
 
+#if 1
+       spin_lock(&ahcd->lock);
+       if (ahcd->ed_rm_list)
+               finish_unlinks(ahcd, admhc_frame_no(ahcd));
+
+       if ((ints & ADMHC_INTR_SOFI) != 0 && !ahcd->ed_rm_list
+                       && HC_IS_RUNNING(hcd->state))
+               admhc_intr_disable(ahcd, ADMHC_INTR_SOFI);
+       spin_unlock(&ahcd->lock);
+#else
        if (ints & ADMHC_INTR_SOFI) {
+               admhc_vdbg(ahcd, "Start Of Frame\n");
                spin_lock(&ahcd->lock);
+
                /* handle any pending ED removes */
-               admhc_finish_unlinks(ahcd, admhc_frame_no(ahcd));
+               finish_unlinks(ahcd, admhc_frameno(ahcd));
+
+               /* leaving INTR_SOFI enabled when there's still unlinking
+                * to be done in the (next frame).
+                */
+               if ((ahcd->ed_rm_list == NULL) ||
+                       HC_IS_RUNNING(hcd->state) == 0)
+                       /*
+                        * disable INTR_SOFI if there are no unlinking to be
+                        * done (in the next frame)
+                        */
+                       admhc_intr_disable(ahcd, ADMHC_INTR_SOFI);
+
                spin_unlock(&ahcd->lock);
        }
+#endif
 
        if (HC_IS_RUNNING(hcd->state)) {
                admhc_intr_ack(ahcd, ints);
                admhc_intr_enable(ahcd, ADMHC_INTR_MIE);
+               admhc_writel_flush(ahcd);
        }
 
        return IRQ_HANDLED;
@@ -722,7 +769,7 @@ static void admhc_stop(struct usb_hcd *hcd)
        flush_scheduled_work();
 
        admhc_usb_reset(ahcd);
-       admhc_intr_disable(ahcd, ~0);
+       admhc_intr_disable(ahcd, ADMHC_INTR_MIE);
 
        free_irq(hcd->irq, hcd);
        hcd->irq = -1;
index daf59bc434c62f8370ad08089a9013d957e40fcd..924221be15dd27e3f1389cf011c042c10a55f084 100644 (file)
@@ -27,6 +27,7 @@ static void admhc_hcd_init(struct admhcd *ahcd)
 {
        ahcd->next_statechange = jiffies;
        spin_lock_init(&ahcd->lock);
+       INIT_LIST_HEAD(&ahcd->pending);
 }
 
 /*-------------------------------------------------------------------------*/
@@ -75,6 +76,19 @@ static void admhc_mem_cleanup(struct admhcd *ahcd)
 
 /*-------------------------------------------------------------------------*/
 
+/* ahcd "done list" processing needs this mapping */
+static inline struct td *dma_to_td(struct admhcd *ahcd, dma_addr_t td_dma)
+{
+       struct td *td;
+
+       td_dma &= TD_MASK;
+       td = ahcd->td_hash[TD_HASH_FUNC(td_dma)];
+       while (td && td->td_dma != td_dma)
+               td = td->td_hash;
+
+       return td;
+}
+
 /* TDs ... */
 static struct td *td_alloc(struct admhcd *ahcd, gfp_t mem_flags)
 {
@@ -87,13 +101,29 @@ static struct td *td_alloc(struct admhcd *ahcd, gfp_t mem_flags)
 
        /* in case ahcd fetches it, make it look dead */
        memset(td, 0, sizeof *td);
+       td->hwNextTD = cpu_to_hc32(ahcd, dma);
        td->td_dma = dma;
+       /* hashed in td_fill */
 
        return td;
 }
 
 static void td_free(struct admhcd *ahcd, struct td *td)
 {
+       struct td **prev = &ahcd->td_hash[TD_HASH_FUNC(td->td_dma)];
+
+       while (*prev && *prev != td)
+               prev = &(*prev)->td_hash;
+       if (*prev)
+               *prev = td->td_hash;
+#if 0
+       /* TODO: remove */
+       else if ((td->hwINFO & cpu_to_hc32(ahcd, TD_DONE)) != 0)
+               admhc_dbg (ahcd, "no hash for td %p\n", td);
+#else
+       else if ((td->flags & TD_FLAG_DONE) != 0)
+               admhc_dbg (ahcd, "no hash for td %p\n", td);
+#endif
        dma_pool_free(ahcd->td_cache, td, td->td_dma);
 }
 
@@ -112,7 +142,8 @@ static struct ed *ed_alloc(struct admhcd *ahcd, gfp_t mem_flags)
        memset(ed, 0, sizeof(*ed));
        ed->dma = dma;
 
-       INIT_LIST_HEAD(&ed->urb_pending);
+       INIT_LIST_HEAD(&ed->td_list);
+       INIT_LIST_HEAD(&ed->urb_list);
 
        return ed;
 }
@@ -133,6 +164,7 @@ static void urb_priv_free(struct admhcd *ahcd, struct urb_priv *urb_priv)
                if (urb_priv->td[i])
                        td_free(ahcd, urb_priv->td[i]);
 
+       list_del(&urb_priv->pending);
        kfree(urb_priv);
 }
 
@@ -140,23 +172,20 @@ static struct urb_priv *urb_priv_alloc(struct admhcd *ahcd, int num_tds,
                gfp_t mem_flags)
 {
        struct urb_priv *priv;
-       int i;
 
        /* allocate the private part of the URB */
        priv = kzalloc(sizeof(*priv) + sizeof(struct td) * num_tds, mem_flags);
        if (!priv)
                goto err;
 
-       /* allocate the TDs */
-       for (i = 0; i < num_tds; i++) {
-               priv->td[i] = td_alloc(ahcd, mem_flags);
-               if (priv->td[i] == NULL)
+       /* allocate the TDs (deferring hash chain updates) */
+       for (priv->td_cnt = 0; priv->td_cnt < num_tds; priv->td_cnt++) {
+               priv->td[priv->td_cnt] = td_alloc(ahcd, mem_flags);
+               if (priv->td[priv->td_cnt] == NULL)
                        goto err_free;
-               priv->td[i]->index = i;
        }
 
        INIT_LIST_HEAD(&priv->pending);
-       priv->td_cnt = num_tds;
 
        return priv;
 
index c96c327e8e88575dd200752824373d0c9dbd2025..24acc87e780f74a2dacc0cdb0c8ed967f47d3800 100644 (file)
@@ -38,7 +38,7 @@ __acquires(ahcd->lock)
                        && urb->status == 0) {
                urb->status = -EREMOTEIO;
 #ifdef ADMHC_VERBOSE_DEBUG
-               urb_print(ahcd, urb, "SHORT", usb_pipeout (urb->pipe));
+               urb_print(urb, "SHORT", usb_pipeout (urb->pipe));
 #endif
        }
        spin_unlock(&urb->lock);
@@ -53,7 +53,7 @@ __acquires(ahcd->lock)
        }
 
 #ifdef ADMHC_VERBOSE_DEBUG
-       urb_print(ahcd, urb, "FINISH", 0);
+       urb_print(urb, "RET", usb_pipeout (urb->pipe));
 #endif
 
        /* urb->complete() can reenter this HCD */
@@ -67,6 +67,189 @@ __acquires(ahcd->lock)
  * ED handling functions
  *-------------------------------------------------------------------------*/
 
+#if 0  /* FIXME */
+/* search for the right schedule branch to use for a periodic ed.
+ * does some load balancing; returns the branch, or negative errno.
+ */
+static int balance(struct admhcd *ahcd, int interval, int load)
+{
+       int     i, branch = -ENOSPC;
+
+       /* iso periods can be huge; iso tds specify frame numbers */
+       if (interval > NUM_INTS)
+               interval = NUM_INTS;
+
+       /* search for the least loaded schedule branch of that period
+        * that has enough bandwidth left unreserved.
+        */
+       for (i = 0; i < interval ; i++) {
+               if (branch < 0 || ahcd->load [branch] > ahcd->load [i]) {
+                       int     j;
+
+                       /* usb 1.1 says 90% of one frame */
+                       for (j = i; j < NUM_INTS; j += interval) {
+                               if ((ahcd->load [j] + load) > 900)
+                                       break;
+                       }
+                       if (j < NUM_INTS)
+                               continue;
+                       branch = i;
+               }
+       }
+       return branch;
+}
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+#if 0  /* FIXME */
+/* both iso and interrupt requests have periods; this routine puts them
+ * into the schedule tree in the apppropriate place.  most iso devices use
+ * 1msec periods, but that's not required.
+ */
+static void periodic_link (struct admhcd *ahcd, struct ed *ed)
+{
+       unsigned        i;
+
+       admhc_vdbg (ahcd, "link %sed %p branch %d [%dus.], interval %d\n",
+               (ed->hwINFO & cpu_to_hc32 (ahcd, ED_ISO)) ? "iso " : "",
+               ed, ed->branch, ed->load, ed->interval);
+
+       for (i = ed->branch; i < NUM_INTS; i += ed->interval) {
+               struct ed       **prev = &ahcd->periodic [i];
+               __hc32          *prev_p = &ahcd->hcca->int_table [i];
+               struct ed       *here = *prev;
+
+               /* sorting each branch by period (slow before fast)
+                * lets us share the faster parts of the tree.
+                * (plus maybe: put interrupt eds before iso)
+                */
+               while (here && ed != here) {
+                       if (ed->interval > here->interval)
+                               break;
+                       prev = &here->ed_next;
+                       prev_p = &here->hwNextED;
+                       here = *prev;
+               }
+               if (ed != here) {
+                       ed->ed_next = here;
+                       if (here)
+                               ed->hwNextED = *prev_p;
+                       wmb ();
+                       *prev = ed;
+                       *prev_p = cpu_to_hc32(ahcd, ed->dma);
+                       wmb();
+               }
+               ahcd->load [i] += ed->load;
+       }
+       admhcd_to_hcd(ahcd)->self.bandwidth_allocated += ed->load / ed->interval;
+}
+#endif
+
+/* link an ed into the HC chain */
+
+static int ed_schedule(struct admhcd *ahcd, struct ed *ed)
+{
+       struct ed *old_tail;
+
+       if (admhcd_to_hcd(ahcd)->state == HC_STATE_QUIESCING)
+               return -EAGAIN;
+
+       ed->state = ED_OPER;
+
+       old_tail = ahcd->ed_tails[ed->type];
+
+       ed->ed_next = old_tail->ed_next;
+       if (ed->ed_next) {
+               ed->ed_next->ed_prev = ed;
+               ed->hwNextED = cpu_to_hc32(ahcd, ed->ed_next->dma);
+       }
+       ed->ed_prev = old_tail;
+
+       old_tail->ed_next = ed;
+       old_tail->hwNextED = cpu_to_hc32(ahcd, ed->dma);
+
+       ahcd->ed_tails[ed->type] = ed;
+
+       admhc_dma_enable(ahcd);
+
+       return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+#if 0  /* FIXME */
+/* scan the periodic table to find and unlink this ED */
+static void periodic_unlink (struct admhcd *ahcd, struct ed *ed)
+{
+       int     i;
+
+       for (i = ed->branch; i < NUM_INTS; i += ed->interval) {
+               struct ed       *temp;
+               struct ed       **prev = &ahcd->periodic [i];
+               __hc32          *prev_p = &ahcd->hcca->int_table [i];
+
+               while (*prev && (temp = *prev) != ed) {
+                       prev_p = &temp->hwNextED;
+                       prev = &temp->ed_next;
+               }
+               if (*prev) {
+                       *prev_p = ed->hwNextED;
+                       *prev = ed->ed_next;
+               }
+               ahcd->load [i] -= ed->load;
+       }
+
+       admhcd_to_hcd(ahcd)->self.bandwidth_allocated -= ed->load / ed->interval;
+       admhc_vdbg (ahcd, "unlink %sed %p branch %d [%dus.], interval %d\n",
+               (ed->hwINFO & cpu_to_hc32 (ahcd, ED_ISO)) ? "iso " : "",
+               ed, ed->branch, ed->load, ed->interval);
+}
+#endif
+
+/* unlink an ed from the HC chain.
+ * just the link to the ed is unlinked.
+ * the link from the ed still points to another operational ed or 0
+ * so the HC can eventually finish the processing of the unlinked ed
+ * (assuming it already started that, which needn't be true).
+ *
+ * ED_UNLINK is a transient state: the HC may still see this ED, but soon
+ * it won't.  ED_SKIP means the HC will finish its current transaction,
+ * but won't start anything new.  The TD queue may still grow; device
+ * drivers don't know about this HCD-internal state.
+ *
+ * When the HC can't see the ED, something changes ED_UNLINK to one of:
+ *
+ *  - ED_OPER: when there's any request queued, the ED gets rescheduled
+ *    immediately.  HC should be working on them.
+ *
+ *  - ED_IDLE:  when there's no TD queue. there's no reason for the HC
+ *    to care about this ED; safe to disable the endpoint.
+ *
+ * When finish_unlinks() runs later, after SOF interrupt, it will often
+ * complete one or more URB unlinks before making that state change.
+ */
+static void ed_deschedule(struct admhcd *ahcd, struct ed *ed)
+{
+       ed->hwINFO |= cpu_to_hc32(ahcd, ED_SKIP);
+       wmb();
+       ed->state = ED_UNLINK;
+
+       /* remove this ED from the HC list */
+       ed->ed_prev->hwNextED = ed->hwNextED;
+
+       /* and remove it from our list also */
+       ed->ed_prev->ed_next = ed->ed_next;
+
+       if (ed->ed_next)
+               ed->ed_next->ed_prev = ed->ed_prev;
+
+       if (ahcd->ed_tails[ed->type] == ed)
+               ahcd->ed_tails[ed->type] = ed->ed_prev;
+}
+
+/*-------------------------------------------------------------------------*/
+
 static struct ed *ed_create(struct admhcd *ahcd, unsigned int type, u32 info)
 {
        struct ed *ed;
@@ -90,15 +273,13 @@ static struct ed *ed_create(struct admhcd *ahcd, unsigned int type, u32 info)
                break;
        }
 
-       info |= ED_SKIP;
-
        ed->dummy = td;
-       ed->state = ED_NEW;
+       ed->state = ED_IDLE;
        ed->type = type;
 
        ed->hwINFO = cpu_to_hc32(ahcd, info);
        ed->hwTailP = cpu_to_hc32(ahcd, td->td_dma);
-       ed->hwHeadP = cpu_to_hc32(ahcd, td->td_dma);
+       ed->hwHeadP = ed->hwTailP;      /* ED_C, ED_H zeroed */
 
        return ed;
 
@@ -114,7 +295,10 @@ err:
 static struct ed *ed_get(struct admhcd *ahcd,  struct usb_host_endpoint *ep,
        struct usb_device *udev, unsigned int pipe, int interval)
 {
-       struct ed *ed;
+       struct ed               *ed;
+       unsigned long           flags;
+
+       spin_lock_irqsave(&ahcd->lock, flags);
 
        ed = ep->hcpriv;
        if (!ed) {
@@ -134,104 +318,33 @@ static struct ed *ed_get(struct admhcd *ahcd,    struct usb_host_endpoint *ep,
                        ep->hcpriv = ed;
        }
 
-       return ed;
-}
-
-static void ed_next_urb(struct admhcd *ahcd, struct ed *ed)
-{
-       struct urb_priv *up;
-       u32 carry;
-
-       up = list_entry(ed->urb_pending.next, struct urb_priv, pending);
-       list_del(&up->pending);
-
-       ed->urb_active = up;
-       ed->state = ED_OPER;
-
-#ifdef ADMHC_VERBOSE_DEBUG
-       urb_print(ahcd, up->urb, "NEXT", 0);
-       admhc_dump_ed(ahcd, " ", ed, 0);
-#endif
-
-       up->td[up->td_cnt-1]->hwNextTD = cpu_to_hc32(ahcd, ed->dummy->td_dma);
+       spin_unlock_irqrestore(&ahcd->lock, flags);
 
-       carry = hc32_to_cpup(ahcd, &ed->hwHeadP) & ED_C;
-       ed->hwHeadP = cpu_to_hc32(ahcd, up->td[0]->td_dma | carry);
-       ed->hwINFO &= ~cpu_to_hc32(ahcd, ED_SKIP);
-}
-
-/* link an ed into the HC chain */
-static int ed_schedule(struct admhcd *ahcd, struct ed *ed)
-{
-       struct ed *old_tail;
-
-       if (admhcd_to_hcd(ahcd)->state == HC_STATE_QUIESCING)
-               return -EAGAIN;
-
-       if (ed->state == ED_NEW) {
-               ed->state = ED_IDLE;
-
-               old_tail = ahcd->ed_tails[ed->type];
-
-               ed->ed_next = old_tail->ed_next;
-               if (ed->ed_next) {
-                       ed->ed_next->ed_prev = ed;
-                       ed->hwNextED = cpu_to_hc32(ahcd, ed->ed_next->dma);
-               }
-               ed->ed_prev = old_tail;
-
-               old_tail->ed_next = ed;
-               old_tail->hwNextED = cpu_to_hc32(ahcd, ed->dma);
-
-               ahcd->ed_tails[ed->type] = ed;
-               ed->hwINFO &= ~cpu_to_hc32(ahcd, ED_SKIP);
-       }
-
-#ifdef ADMHC_VERBOSE_DEBUG
-       admhc_dump_ed(ahcd, "ED-SCHED", ed, 0);
-#endif
-
-       if (!ed->urb_active) {
-               ed_next_urb(ahcd, ed);
-               admhc_dma_enable(ahcd);
-       }
-
-       return 0;
+       return ed;
 }
 
-static void ed_deschedule(struct admhcd *ahcd, struct ed *ed)
-{
-
-#ifdef ADMHC_VERBOSE_DEBUG
-       admhc_dump_ed(ahcd, "ED-DESCHED", ed, 0);
-#endif
-
-       /* remove this ED from the HC list */
-       ed->ed_prev->hwNextED = ed->hwNextED;
-
-       /* and remove it from our list */
-       ed->ed_prev->ed_next = ed->ed_next;
-
-       if (ed->ed_next) {
-               ed->ed_next->ed_prev = ed->ed_prev;
-               ed->ed_next = NULL;
-       }
-
-       if (ahcd->ed_tails[ed->type] == ed)
-               ahcd->ed_tails[ed->type] = ed->ed_prev;
-
-       ed->state = ED_NEW;
-}
+/*-------------------------------------------------------------------------*/
 
-static void ed_start_deschedule(struct admhcd *ahcd, struct ed *ed)
+/* request unlinking of an endpoint from an operational HC.
+ * put the ep on the rm_list
+ * real work is done at the next start frame (SOFI) hardware interrupt
+ * caller guarantees HCD is running, so hardware access is safe,
+ * and that ed->state is ED_OPER
+ */
+static void start_ed_unlink(struct admhcd *ahcd, struct ed *ed)
 {
+       ed->hwINFO |= cpu_to_hc32 (ahcd, ED_DEQUEUE);
+       ed_deschedule(ahcd, ed);
 
-#ifdef ADMHC_VERBOSE_DEBUG
-       admhc_dump_ed(ahcd, "ED-UNLINK", ed, 0);
-#endif
+       /* add this ED into the remove list */
+       ed->ed_rm_next = ahcd->ed_rm_list;
+       ahcd->ed_rm_list = ed;
 
-       ed->hwINFO |= cpu_to_hc32(ahcd, ED_SKIP);
-       ed->state = ED_UNLINK;
+       /* enable SOF interrupt */
+       admhc_intr_ack(ahcd, ADMHC_INTR_SOFI);
+       admhc_intr_enable(ahcd, ADMHC_INTR_SOFI);
+       /* flush those writes */
+       admhc_writel_flush(ahcd);
 
        /* SOF interrupt might get delayed; record the frame counter value that
         * indicates when the HC isn't looking at it, so concurrent unlinks
@@ -239,34 +352,47 @@ static void ed_start_deschedule(struct admhcd *ahcd, struct ed *ed)
         * SOF is triggered.
         */
        ed->tick = admhc_frame_no(ahcd) + 1;
-
-       admhc_intr_enable(ahcd, ADMHC_INTR_SOFI);
 }
 
 /*-------------------------------------------------------------------------*
  * TD handling functions
  *-------------------------------------------------------------------------*/
 
-static void td_fill(struct admhcd *ahcd, u32 info, dma_addr_t data, int len,
-               struct urb_priv *up)
+/* enqueue next TD for this URB (OHCI spec 5.2.8.2) */
+
+static void
+td_fill(struct admhcd *ahcd, u32 info, dma_addr_t data, int len,
+       struct urb *urb, int index)
 {
-       struct td *td;
-       u32 cbl = 0;
+       struct td               *td, *td_pt;
+       struct urb_priv         *urb_priv = urb->hcpriv;
+       int                     hash;
+       u32                     cbl = 0;
+
+#if 1
+       if (index == (urb_priv->td_cnt - 1) &&
+                       ((urb->transfer_flags & URB_NO_INTERRUPT) == 0))
+               cbl |= TD_IE;
+#else
+       if (index == (urb_priv->td_cnt - 1))
+               cbl |= TD_IE;
+#endif
 
-       if (up->td_idx >= up->td_cnt) {
-               admhc_err(ahcd, "td_fill error, idx=%d, cnt=%d\n", up->td_idx,
-                               up->td_cnt);
-               BUG();
-       }
+       /* use this td as the next dummy */
+       td_pt = urb_priv->td[index];
 
-       td = up->td[up->td_idx];
+       /* fill the old dummy TD */
+       td = urb_priv->td[index] = urb_priv->ed->dummy;
+       urb_priv->ed->dummy = td_pt;
+
+       td->ed = urb_priv->ed;
+       td->next_dl_td = NULL;
+       td->index = index;
+       td->urb = urb;
        td->data_dma = data;
        if (!len)
                data = 0;
 
-       if (up->td_idx == up->td_cnt-1)
-               cbl |= TD_IE;
-
        if (data)
                cbl |= (len & TD_BL_MASK);
 
@@ -276,11 +402,19 @@ static void td_fill(struct admhcd *ahcd, u32 info, dma_addr_t data, int len,
        td->hwINFO = cpu_to_hc32(ahcd, info);
        td->hwDBP = cpu_to_hc32(ahcd, data);
        td->hwCBL = cpu_to_hc32(ahcd, cbl);
+       td->hwNextTD = cpu_to_hc32(ahcd, td_pt->td_dma);
+
+       /* append to queue */
+       list_add_tail(&td->td_list, &td->ed->td_list);
 
-       if (up->td_idx > 0)
-               up->td[up->td_idx-1]->hwNextTD = cpu_to_hc32(ahcd, td->td_dma);
+       /* hash it for later reverse mapping */
+       hash = TD_HASH_FUNC(td->td_dma);
+       td->td_hash = ahcd->td_hash[hash];
+       ahcd->td_hash[hash] = td;
 
-       up->td_idx++;
+       /* HC might read the TD (or cachelines) right away ... */
+       wmb();
+       td->ed->hwTailP = td->hwNextTD;
 }
 
 /*-------------------------------------------------------------------------*/
@@ -298,7 +432,9 @@ static void td_submit_urb(struct admhcd *ahcd, struct urb *urb)
        int             cnt = 0;
        u32             info = 0;
        int             is_out = usb_pipeout(urb->pipe);
+       int             periodic = 0;
        u32             toggle = 0;
+       struct td       *td;
 
        /* OHCI handles the bulk/interrupt data toggles itself.  We just
         * use the device toggle bits for resetting, and rely on the fact
@@ -314,6 +450,7 @@ static void td_submit_urb(struct admhcd *ahcd, struct urb *urb)
        }
 
        urb_priv->td_idx = 0;
+       list_add(&urb_priv->pending, &ahcd->pending);
 
        if (data_len)
                data = urb->transfer_dma;
@@ -334,7 +471,7 @@ static void td_submit_urb(struct admhcd *ahcd, struct urb *urb)
                info |= (urb->start_frame & TD_FN_MASK);
                info |= (urb->interval & TD_ISI_MASK) << TD_ISI_SHIFT;
 
-               td_fill(ahcd, info, data, data_len, urb_priv);
+               td_fill(ahcd, info, data, data_len, urb, cnt);
                cnt++;
 
                admhcd_to_hcd(ahcd)->self.bandwidth_int_reqs++;
@@ -348,20 +485,20 @@ static void td_submit_urb(struct admhcd *ahcd, struct urb *urb)
                /* TDs _could_ transfer up to 8K each */
                while (data_len > TD_DATALEN_MAX) {
                        td_fill(ahcd, info | ((cnt) ? TD_T_CARRY : toggle),
-                               data, TD_DATALEN_MAX, urb_priv);
+                               data, TD_DATALEN_MAX, urb, cnt);
                        data += TD_DATALEN_MAX;
                        data_len -= TD_DATALEN_MAX;
                        cnt++;
                }
 
                td_fill(ahcd, info | ((cnt) ? TD_T_CARRY : toggle), data,
-                       data_len, urb_priv);
+                       data_len, urb, cnt);
                cnt++;
 
                if ((urb->transfer_flags & URB_ZERO_PACKET)
                                && (cnt < urb_priv->td_cnt)) {
                        td_fill(ahcd, info | ((cnt) ? TD_T_CARRY : toggle),
-                               0, 0, urb_priv);
+                               0, 0, urb, cnt);
                        cnt++;
                }
                break;
@@ -372,24 +509,21 @@ static void td_submit_urb(struct admhcd *ahcd, struct urb *urb)
        case PIPE_CONTROL:
                /* fill a TD for the setup */
                info = TD_SCC_NOTACCESSED | TD_DP_SETUP | TD_T_DATA0;
-               td_fill(ahcd, info, urb->setup_dma, 8, urb_priv);
-               cnt++;
+               td_fill(ahcd, info, urb->setup_dma, 8, urb, cnt++);
 
                if (data_len > 0) {
                        /* fill a TD for the data */
                        info = TD_SCC_NOTACCESSED | TD_T_DATA1;
                        info |= is_out ? TD_DP_OUT : TD_DP_IN;
                        /* NOTE:  mishandles transfers >8K, some >4K */
-                       td_fill(ahcd, info, data, data_len, urb_priv);
-                       cnt++;
+                       td_fill(ahcd, info, data, data_len, urb, cnt++);
                }
 
                /* fill a TD for the ACK */
                info = (is_out || data_len == 0)
                        ? TD_SCC_NOTACCESSED | TD_DP_IN | TD_T_DATA1
                        : TD_SCC_NOTACCESSED | TD_DP_OUT | TD_T_DATA1;
-               td_fill(ahcd, info, data, 0, urb_priv);
-               cnt++;
+               td_fill(ahcd, info, data, 0, urb, cnt++);
 
                break;
 
@@ -406,8 +540,7 @@ static void td_submit_urb(struct admhcd *ahcd, struct urb *urb)
                        frame &= TD_FN_MASK;
                        td_fill(ahcd, info | frame,
                                data + urb->iso_frame_desc[cnt].offset,
-                               urb->iso_frame_desc[cnt].length,
-                               urb_priv);
+                               urb->iso_frame_desc[cnt].length, urb, cnt);
                }
                admhcd_to_hcd(ahcd)->self.bandwidth_isoc_reqs++;
                break;
@@ -415,18 +548,19 @@ static void td_submit_urb(struct admhcd *ahcd, struct urb *urb)
 
        if (urb_priv->td_cnt != cnt)
                admhc_err(ahcd, "bad number of tds created for urb %p\n", urb);
-
-       urb_priv->td_idx = 0;
 }
 
+/*-------------------------------------------------------------------------*
+ * Done List handling functions
+ *-------------------------------------------------------------------------*/
+
 /* calculate transfer length/status and update the urb
  * PRECONDITION:  irqsafe (only for urb->status locking)
  */
 static int td_done(struct admhcd *ahcd, struct urb *urb, struct td *td)
 {
+       struct urb_priv *urb_priv = urb->hcpriv;
        u32     info = hc32_to_cpup(ahcd, &td->hwINFO);
-       u32     dbp = hc32_to_cpup(ahcd, &td->hwDBP);
-       u32     cbl = TD_BL_GET(hc32_to_cpup(ahcd, &td->hwCBL));
        int     type = usb_pipetype(urb->pipe);
        int     cc;
 
@@ -447,17 +581,16 @@ static int td_done(struct admhcd *ahcd, struct urb *urb, struct td *td)
                        return;
 
                if (usb_pipeout (urb->pipe))
-                       dlen = urb->iso_frame_desc[td->index].length;
+                       dlen = urb->iso_frame_desc [td->index].length;
                else {
                        /* short reads are always OK for ISO */
                        if (cc == TD_DATAUNDERRUN)
                                cc = TD_CC_NOERROR;
                        dlen = tdPSW & 0x3ff;
                }
-
                urb->actual_length += dlen;
-               urb->iso_frame_desc[td->index].actual_length = dlen;
-               urb->iso_frame_desc[td->index].status = cc_to_error[cc];
+               urb->iso_frame_desc [td->index].actual_length = dlen;
+               urb->iso_frame_desc [td->index].status = cc_to_error [cc];
 
                if (cc != TD_CC_NOERROR)
                        admhc_vdbg (ahcd,
@@ -469,144 +602,354 @@ static int td_done(struct admhcd *ahcd, struct urb *urb, struct td *td)
         * might not be reported as errors.
         */
        } else {
-
-#ifdef ADMHC_VERBOSE_DEBUG
-               admhc_dump_td(ahcd, "td_done", td);
-#endif
+               u32     bl = TD_BL_GET(hc32_to_cpup(ahcd, &td->hwCBL));
+               u32     tdDBP = hc32_to_cpup(ahcd, &td->hwDBP);
+
+               /* update packet status if needed (short is normally ok) */
+               if (cc == TD_CC_DATAUNDERRUN
+                               && !(urb->transfer_flags & URB_SHORT_NOT_OK))
+                       cc = TD_CC_NOERROR;
+
+               if (cc != TD_CC_NOERROR && cc < TD_CC_HCD0) {
+                       spin_lock(&urb->lock);
+                       if (urb->status == -EINPROGRESS)
+                               urb->status = cc_to_error[cc];
+                       spin_unlock(&urb->lock);
+               }
 
                /* count all non-empty packets except control SETUP packet */
-               if ((type != PIPE_CONTROL || td->index != 0) && dbp != 0) {
-                       urb->actual_length += dbp - td->data_dma + cbl;
+               if ((type != PIPE_CONTROL || td->index != 0) && tdDBP != 0) {
+                       urb->actual_length += tdDBP - td->data_dma + bl;
                }
+
+               if (cc != TD_CC_NOERROR && cc < TD_CC_HCD0)
+                       admhc_vdbg(ahcd,
+                               "urb %p td %p (%d) cc %d, len=%d/%d\n",
+                               urb, td, td->index, cc,
+                               urb->actual_length,
+                               urb->transfer_buffer_length);
        }
 
+       list_del(&td->td_list);
+       urb_priv->td_idx++;
+
        return cc;
 }
 
 /*-------------------------------------------------------------------------*/
 
-static void ed_update(struct admhcd *ahcd, struct ed *ed, int force)
+static inline struct td *
+ed_halted(struct admhcd *ahcd, struct td *td, int cc, struct td *rev)
 {
-       struct urb_priv *up;
-       struct urb *urb;
-       int cc;
+       struct urb              *urb = td->urb;
+       struct ed               *ed = td->ed;
+       struct list_head        *tmp = td->td_list.next;
+       __hc32                  toggle = ed->hwHeadP & cpu_to_hc32 (ahcd, ED_C);
+
+       admhc_dump_ed(ahcd, "ed halted", td->ed, 1);
+       /* clear ed halt; this is the td that caused it, but keep it inactive
+        * until its urb->complete() has a chance to clean up.
+        */
+       ed->hwINFO |= cpu_to_hc32 (ahcd, ED_SKIP);
+       wmb();
+       ed->hwHeadP &= ~cpu_to_hc32 (ahcd, ED_H);
 
-       up = ed->urb_active;
-       if (!up)
-               return;
+       /* put any later tds from this urb onto the donelist, after 'td',
+        * order won't matter here: no errors, and nothing was transferred.
+        * also patch the ed so it looks as if those tds completed normally.
+        */
+       while (tmp != &ed->td_list) {
+               struct td       *next;
+               __hc32          info;
 
-       urb = up->urb;
+               next = list_entry(tmp, struct td, td_list);
+               tmp = next->td_list.next;
 
-#ifdef ADMHC_VERBOSE_DEBUG
-       urb_print(ahcd, urb, "UPDATE", 0);
-       admhc_dump_ed(ahcd, "ED-UPDATE", ed, 1);
+               if (next->urb != urb)
+                       break;
+
+               /* NOTE: if multi-td control DATA segments get supported,
+                * this urb had one of them, this td wasn't the last td
+                * in that segment (TD_R clear), this ed halted because
+                * of a short read, _and_ URB_SHORT_NOT_OK is clear ...
+                * then we need to leave the control STATUS packet queued
+                * and clear ED_SKIP.
+                */
+               info = next->hwINFO;
+#if 0          /* FIXME */
+               info |= cpu_to_hc32 (ahcd, TD_DONE);
 #endif
+               info &= ~cpu_to_hc32 (ahcd, TD_CC);
+               next->hwINFO = info;
 
-       cc = TD_CC_NOERROR;
-       for (; up->td_idx < up->td_cnt; up->td_idx++) {
-               struct td *td = up->td[up->td_idx];
+               next->next_dl_td = rev;
+               rev = next;
 
-               if (hc32_to_cpup(ahcd, &td->hwINFO) & TD_OWN)
+               ed->hwHeadP = next->hwNextTD | toggle;
+       }
+
+       /* help for troubleshooting:  report anything that
+        * looks odd ... that doesn't include protocol stalls
+        * (or maybe some other things)
+        */
+       switch (cc) {
+       case TD_CC_DATAUNDERRUN:
+               if ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0)
                        break;
+               /* fallthrough */
+       case TD_CC_STALL:
+               if (usb_pipecontrol(urb->pipe))
+                       break;
+               /* fallthrough */
+       default:
+               admhc_dbg (ahcd,
+                       "urb %p path %s ep%d%s %08x cc %d --> status %d\n",
+                       urb, urb->dev->devpath,
+                       usb_pipeendpoint (urb->pipe),
+                       usb_pipein (urb->pipe) ? "in" : "out",
+                       hc32_to_cpu(ahcd, td->hwINFO),
+                       cc, cc_to_error [cc]);
+       }
 
-               cc = td_done(ahcd, urb, td);
-               if (cc != TD_CC_NOERROR) {
-                       admhc_vdbg(ahcd,
-                               "urb %p td %p (%d) cc %d, len=%d/%d\n",
-                               urb, td, td->index, cc,
-                               urb->actual_length,
-                               urb->transfer_buffer_length);
+       return rev;
+}
 
-                       up->td_idx = up->td_cnt;
-                       break;
+/*-------------------------------------------------------------------------*/
+
+/* there are some urbs/eds to unlink; called in_irq(), with HCD locked */
+static void
+finish_unlinks(struct admhcd *ahcd, u16 tick)
+{
+       struct ed       *ed, **last;
+
+rescan_all:
+       for (last = &ahcd->ed_rm_list, ed = *last; ed != NULL; ed = *last) {
+               struct list_head        *entry, *tmp;
+               int                     completed, modified;
+               __hc32                  *prev;
+
+               /* only take off EDs that the HC isn't using, accounting for
+                * frame counter wraps and EDs with partially retired TDs
+                */
+               if (likely(HC_IS_RUNNING(admhcd_to_hcd(ahcd)->state))) {
+                       if (tick_before (tick, ed->tick)) {
+skip_ed:
+                               last = &ed->ed_rm_next;
+                               continue;
+                       }
+
+                       if (!list_empty (&ed->td_list)) {
+                               struct td       *td;
+                               u32             head;
+
+                               td = list_entry(ed->td_list.next, struct td,
+                                                       td_list);
+                               head = hc32_to_cpu(ahcd, ed->hwHeadP) &
+                                                               TD_MASK;
+
+                               /* INTR_WDH may need to clean up first */
+                               if (td->td_dma != head)
+                                       goto skip_ed;
+                       }
                }
-       }
 
-       if ((up->td_idx != up->td_cnt) && (!force))
-               /* the URB is not completed yet */
-               return;
+               /* reentrancy:  if we drop the schedule lock, someone might
+                * have modified this list.  normally it's just prepending
+                * entries (which we'd ignore), but paranoia won't hurt.
+                */
+               *last = ed->ed_rm_next;
+               ed->ed_rm_next = NULL;
+               modified = 0;
+
+               /* unlink urbs as requested, but rescan the list after
+                * we call a completion since it might have unlinked
+                * another (earlier) urb
+                *
+                * When we get here, the HC doesn't see this ed.  But it
+                * must not be rescheduled until all completed URBs have
+                * been given back to the driver.
+                */
+rescan_this:
+               completed = 0;
+               prev = &ed->hwHeadP;
+               list_for_each_safe (entry, tmp, &ed->td_list) {
+                       struct td       *td;
+                       struct urb      *urb;
+                       struct urb_priv *urb_priv;
+                       __hc32          savebits;
+
+                       td = list_entry(entry, struct td, td_list);
+                       urb = td->urb;
+                       urb_priv = td->urb->hcpriv;
+
+                       if (urb->status == -EINPROGRESS) {
+                               prev = &td->hwNextTD;
+                               continue;
+                       }
+
+                       if ((urb_priv) == NULL)
+                               continue;
 
-       /* update packet status if needed (short is normally ok) */
-       if (cc == TD_CC_DATAUNDERRUN
-                       && !(urb->transfer_flags & URB_SHORT_NOT_OK))
-               cc = TD_CC_NOERROR;
+                       /* patch pointer hc uses */
+                       savebits = *prev & ~cpu_to_hc32(ahcd, TD_MASK);
+                       *prev = td->hwNextTD | savebits;
 
-       if (cc != TD_CC_NOERROR && cc < TD_CC_HCD0) {
-               spin_lock(&urb->lock);
-               if (urb->status == -EINPROGRESS)
-                       urb->status = cc_to_error[cc];
-               spin_unlock(&urb->lock);
-       }
+                       /* HC may have partly processed this TD */
+                       urb_print(urb, "PARTIAL", 1);
+                       td_done(ahcd, urb, td);
 
-       finish_urb(ahcd, urb);
+                       /* if URB is done, clean up */
+                       if (urb_priv->td_idx == urb_priv->td_cnt) {
+                               modified = completed = 1;
+                               finish_urb(ahcd, urb);
+                       }
+               }
+               if (completed && !list_empty (&ed->td_list))
+                       goto rescan_this;
 
-       ed->urb_active = NULL;
-       ed->state = ED_IDLE;
+               /* ED's now officially unlinked, hc doesn't see */
+               ed->state = ED_IDLE;
+               ed->hwHeadP &= ~cpu_to_hc32(ahcd, ED_H);
+               ed->hwNextED = 0;
+               wmb ();
+               ed->hwINFO &= ~cpu_to_hc32 (ahcd, ED_SKIP | ED_DEQUEUE);
+
+               /* but if there's work queued, reschedule */
+               if (!list_empty (&ed->td_list)) {
+                       if (HC_IS_RUNNING(admhcd_to_hcd(ahcd)->state))
+                               ed_schedule(ahcd, ed);
+               }
+
+               if (modified)
+                       goto rescan_all;
+       }
 }
 
-/* there are some tds completed; called in_irq(), with HCD locked */
-static void admhc_td_complete(struct admhcd *ahcd)
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Process normal completions (error or success) and clean the schedules.
+ *
+ * This is the main path for handing urbs back to drivers.  The only other
+ * path is finish_unlinks(), which unlinks URBs using ed_rm_list, instead of
+ * scanning the (re-reversed) donelist as this does.
+ */
+
+static void ed_unhalt(struct admhcd *ahcd, struct ed *ed, struct urb *urb)
 {
-       struct ed *ed;
-       int more = 0;
+       struct list_head *entry,*tmp;
+       struct urb_priv *urb_priv = urb->hcpriv;
+       __hc32 toggle = ed->hwHeadP & cpu_to_hc32 (ahcd, ED_C);
 
-       for (ed = ahcd->ed_head; ed; ed = ed->ed_next) {
-               if (ed->state != ED_OPER)
-                       continue;
 
-               if (hc32_to_cpup(ahcd, &ed->hwHeadP) & ED_H) {
-                       admhc_dump_ed(ahcd, "ed halted", ed, 1);
-                       ed_update(ahcd, ed, 1);
-                       ed->hwHeadP &= ~cpu_to_hc32(ahcd, ED_H);
-               } else
-                       ed_update(ahcd, ed, 0);
+#ifdef ADMHC_VERBOSE_DEBUG
+       admhc_dump_ed(ahcd, "UNHALT", ed, 0);
+#endif
+       /* clear ed halt; this is the td that caused it, but keep it inactive
+        * until its urb->complete() has a chance to clean up.
+        */
+       ed->hwINFO |= cpu_to_hc32 (ahcd, ED_SKIP);
+       wmb();
+       ed->hwHeadP &= ~cpu_to_hc32 (ahcd, ED_H);
 
-               if (ed->urb_active) {
-                       more = 1;
-                       continue;
-               }
+       list_for_each_safe(entry, tmp, &ed->td_list) {
+               struct td *td = list_entry(entry, struct td, td_list);
+               __hc32 info;
 
-               if (!(list_empty(&ed->urb_pending))) {
-                       more = 1;
-                       ed_next_urb(ahcd, ed);
-                       continue;
-               }
+               if (td->urb != urb)
+                       break;
 
-               ed_start_deschedule(ahcd, ed);
+               info = td->hwINFO;
+               info &= ~cpu_to_hc32(ahcd, TD_CC | TD_OWN);
+               td->hwINFO = info;
+
+               ed->hwHeadP = td->hwNextTD | toggle;
+               wmb();
        }
 
-       if (!more)
-               admhc_dma_disable(ahcd);
+}
 
+static inline int is_ed_halted(struct admhcd *ahcd, struct ed *ed)
+{
+       return ((hc32_to_cpup(ahcd, &ed->hwHeadP) & ED_H) == ED_H);
 }
 
-/* there are some urbs/eds to unlink; called in_irq(), with HCD locked */
-static void admhc_finish_unlinks(struct admhcd *ahcd, u16 tick)
+static inline int is_td_halted(struct admhcd *ahcd, struct ed *ed,
+               struct td *td)
 {
-       struct ed *ed;
-       int more = 0;
+       return ((hc32_to_cpup(ahcd, &ed->hwHeadP) & TD_MASK) ==
+               (hc32_to_cpup(ahcd, &td->hwNextTD) & TD_MASK));
+}
 
-       for (ed = ahcd->ed_head; ed; ed = ed->ed_next) {
-               if (ed->state != ED_UNLINK)
-                       continue;
+static void ed_update(struct admhcd *ahcd, struct ed *ed)
+{
+       struct list_head *entry,*tmp;
 
-               if (likely(HC_IS_RUNNING(admhcd_to_hcd(ahcd)->state)))
-                       if (tick_before(tick, ed->tick)) {
-                               more = 1;
-                               continue;
-                       }
+#ifdef ADMHC_VERBOSE_DEBUG
+       admhc_dump_ed(ahcd, "UPDATE", ed, 0);
+#endif
 
-               /* process partial status */
-               if (ed->urb_active)
-                       ed_update(ahcd, ed, 1);
+       list_for_each_safe(entry, tmp, &ed->td_list) {
+               struct td *td = list_entry(entry, struct td, td_list);
+               struct urb *urb = td->urb;
+               struct urb_priv *urb_priv = urb->hcpriv;
+               int cc;
+
+               if (hc32_to_cpup(ahcd, &td->hwINFO) & TD_OWN)
+                       break;
+
+               /* update URB's length and status from TD */
+               cc = td_done(ahcd, urb, td);
+               if (is_ed_halted(ahcd, ed) && is_td_halted(ahcd, ed, td))
+                       ed_unhalt(ahcd, ed, urb);
+
+               /* If all this urb's TDs are done, call complete() */
+               if (urb_priv->td_idx == urb_priv->td_cnt)
+                       finish_urb(ahcd, urb);
+
+               /* clean schedule:  unlink EDs that are no longer busy */
+               if (list_empty(&ed->td_list)) {
+                       if (ed->state == ED_OPER)
+                               start_ed_unlink(ahcd, ed);
+
+               /* ... reenabling halted EDs only after fault cleanup */
+               } else if ((ed->hwINFO & cpu_to_hc32 (ahcd,
+                                               ED_SKIP | ED_DEQUEUE))
+                                       == cpu_to_hc32 (ahcd, ED_SKIP)) {
+                       td = list_entry(ed->td_list.next, struct td, td_list);
+#if 0
+                       if (!(td->hwINFO & cpu_to_hc32 (ahcd, TD_DONE))) {
+                               ed->hwINFO &= ~cpu_to_hc32 (ahcd, ED_SKIP);
+                               /* ... hc may need waking-up */
+                               switch (ed->type) {
+                               case PIPE_CONTROL:
+                                       admhc_writel (ahcd, OHCI_CLF,
+                                               &ahcd->regs->cmdstatus);
+                                       break;
+                               case PIPE_BULK:
+                                       admhc_writel (ahcd, OHCI_BLF,
+                                               &ahcd->regs->cmdstatus);
+                                       break;
+                               }
+                       }
+#else
+                       if ((td->hwINFO & cpu_to_hc32(ahcd, TD_OWN)))
+                               ed->hwINFO &= ~cpu_to_hc32(ahcd, ED_SKIP);
+#endif
+               }
 
-               if (list_empty(&ed->urb_pending))
-                       ed_deschedule(ahcd, ed);
-               else
-                       ed_schedule(ahcd, ed);
        }
+}
 
-       if (!more)
-               if (likely(HC_IS_RUNNING(admhcd_to_hcd(ahcd)->state)))
-                       admhc_intr_disable(ahcd, ADMHC_INTR_SOFI);
+/* there are some tds completed; called in_irq(), with HCD locked */
+static void admhc_td_complete(struct admhcd *ahcd)
+{
+       struct ed       *ed;
+
+       for (ed = ahcd->ed_head; ed; ed = ed->ed_next) {
+               if (ed->state != ED_OPER)
+                       continue;
+
+               ed_update(ahcd, ed);
+       }
 }
index 72740783b6525f0f3dc2ddb433d960b49eb3c0e3..bf84c00994926bb9904fdd0882593e72fc535a73 100644 (file)
@@ -56,24 +56,21 @@ struct ed {
        dma_addr_t              dma;            /* addr of ED */
        struct td               *dummy;         /* next TD to activate */
 
-       struct urb_priv         *urb_active;    /* active URB */
-       struct list_head        urb_pending;    /* pending URBs */
-
-       struct list_head        ed_list;        /* list of all EDs*/
-       struct list_head        rm_list;        /* for remove list */
+       struct list_head        urb_list;       /* list of our URBs */
 
        /* host's view of schedule */
        struct ed               *ed_next;       /* on schedule list */
        struct ed               *ed_prev;       /* for non-interrupt EDs */
+       struct ed               *ed_rm_next;    /* on rm list */
+       struct list_head        td_list;        /* "shadow list" of our TDs */
 
        /* create --> IDLE --> OPER --> ... --> IDLE --> destroy
         * usually:  OPER --> UNLINK --> (IDLE | OPER) --> ...
         */
-       u8                      state;
-#define ED_NEW         0x00            /* just allocated */
-#define ED_IDLE                0x01            /* linked into HC, but not running */
-#define ED_OPER                0x02            /* linked into HC and running */
-#define ED_UNLINK      0x03            /* being unlinked from HC */
+       u8                      state;          /* ED_{IDLE,UNLINK,OPER} */
+#define ED_IDLE                0x00            /* NOT linked to HC */
+#define ED_UNLINK      0x01            /* being unlinked from hc */
+#define ED_OPER                0x02            /* IS linked to hc */
 
        u8                      type;           /* PIPE_{BULK,...} */
 
@@ -121,10 +118,10 @@ struct td {
 #define TD_DP_SHIFT    21                      /* direction/pid */
 #define TD_DP_MASK     0x3
 #define TD_DP          (TD_DP_MASK << TD_DP_SHIFT)
+#define TD_DP_GET      (((x) >> TD_DP_SHIFT) & TD_DP_MASK)
 #define TD_DP_SETUP    (0x0 << TD_DP_SHIFT)    /* SETUP pid */
 #define TD_DP_OUT      (0x1 << TD_DP_SHIFT)    /* OUT pid */
 #define TD_DP_IN       (0x2 << TD_DP_SHIFT)    /* IN pid */
-#define TD_DP_GET(x)   (((x) >> TD_DP_SHIFT) & TD_DP_MASK)
 #define TD_ISI_SHIFT   8                       /* Interrupt Service Interval */
 #define TD_ISI_MASK    0x3f
 #define TD_ISI_GET(x)  (((x) >> TD_ISI_SHIFT) & TD_ISI_MASK)
@@ -142,11 +139,19 @@ struct td {
 
        /* rest are purely for the driver's use */
        __u8            index;
+       struct ed       *ed;
+       struct td       *td_hash;       /* dma-->td hashtable */
+       struct td       *next_dl_td;
        struct urb      *urb;
 
        dma_addr_t      td_dma;         /* addr of this TD */
        dma_addr_t      data_dma;       /* addr of data it points to */
 
+       struct list_head td_list;       /* "shadow list", TDs on same ED */
+
+       u32             flags;
+#define TD_FLAG_DONE   (1 << 17)       /* retired to done list */
+#define TD_FLAG_ISO    (1 << 16)       /* copy of ED_ISO */
 } __attribute__ ((aligned(TD_ALIGN))); /* c/b/i need 16; only iso needs 32 */
 
 /*
@@ -348,7 +353,6 @@ struct admhcd_regs {
 /* hcd-private per-urb state */
 struct urb_priv {
        struct ed               *ed;
-       struct urb              *urb;
        struct list_head        pending;        /* URBs on the same ED */
 
        u32                     td_cnt;         /* # tds in this request */
@@ -379,10 +383,13 @@ struct admhcd {
         * hcd adds to schedule for a live hc any time, but removals finish
         * only at the start of the next frame.
         */
+
        struct ed               *ed_head;
        struct ed               *ed_tails[4];
 
-//     struct ed               *periodic[NUM_INTS];    /* shadow int_table */
+       struct ed               *ed_rm_list;    /* to be removed */
+
+       struct ed               *periodic[NUM_INTS];    /* shadow int_table */
 
 #if 0  /* TODO: remove? */
        /*
@@ -398,6 +405,7 @@ struct admhcd {
        struct dma_pool         *td_cache;
        struct dma_pool         *ed_cache;
        struct td               *td_hash[TD_HASH_SIZE];
+       struct list_head        pending;
 
        /*
         * driver state
@@ -549,6 +557,15 @@ static inline void admhc_writel(const struct admhcd *ahcd,
 #endif
 }
 
+static inline void admhc_writel_flush(const struct admhcd *ahcd)
+{
+#if 0
+       /* TODO: remove? */
+       (void) admhc_readl(ahcd, &ahcd->regs->gencontrol);
+#endif
+}
+
+
 /*-------------------------------------------------------------------------*/
 
 /* cpu to ahcd */
@@ -635,18 +652,27 @@ static inline void admhc_disable(struct admhcd *ahcd)
        admhcd_to_hcd(ahcd)->state = HC_STATE_HALT;
 }
 
-#define        FI                      0x2edf          /* 12000 bits per frame (-1) */
-#define        FSLDP(fi)               (0x7fff & ((6 * ((fi) - 1200)) / 7))
-#define        FIT                     ADMHC_SFI_FIT
-#define LSTHRESH               0x628           /* lowspeed bit threshold */
+#define        FI              0x2edf          /* 12000 bits per frame (-1) */
+#define        FSLDP(fi)       (0x7fff & ((6 * ((fi) - 1200)) / 7))
+#define        FIT             ADMHC_SFI_FIT
+#define LSTHRESH       0x628           /* lowspeed bit threshold */
 
 static inline void periodic_reinit(struct admhcd *ahcd)
 {
+#if 0
+       u32     fi = ahcd->fminterval & ADMHC_SFI_FI_MASK;
+       u32     fit = admhc_readl(ahcd, &ahcd->regs->fminterval) & FIT;
+
+       /* TODO: adjust FSLargestDataPacket value too? */
+       admhc_writel(ahcd, (fit ^ FIT) | ahcd->fminterval,
+                                       &ahcd->regs->fminterval);
+#else
        u32     fit = admhc_readl(ahcd, &ahcd->regs->fminterval) & FIT;
 
        /* TODO: adjust FSLargestDataPacket value too? */
        admhc_writel(ahcd, (fit ^ FIT) | ahcd->fminterval,
                                        &ahcd->regs->fminterval);
+#endif
 }
 
 static inline u32 admhc_read_rhdesc(struct admhcd *ahcd)