3 * ETRAX 100LX USB Host Controller Driver
5 * Copyright (C) 2005 - 2008 Axis Communications AB
7 * Author: Konrad Eriksson <konrad.eriksson@axis.se>
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/moduleparam.h>
15 #include <linux/spinlock.h>
16 #include <linux/usb.h>
17 #include <linux/platform_device.h>
21 #include <asm/arch/dma.h>
22 #include <asm/arch/io_interface_mux.h>
24 #include "../core/hcd.h"
25 #include "../core/hub.h"
26 #include "hc-crisv10.h"
27 #include "hc-cris-dbg.h"
30 /***************************************************************************/
31 /***************************************************************************/
32 /* Host Controller settings */
33 /***************************************************************************/
34 /***************************************************************************/
36 #define VERSION "1.00-openwrt_diff"
37 #define COPYRIGHT "(c) 2005, 2006 Axis Communications AB"
38 #define DESCRIPTION "ETRAX 100LX USB Host Controller"
40 #define ETRAX_USB_HC_IRQ USB_HC_IRQ_NBR
41 #define ETRAX_USB_RX_IRQ USB_DMA_RX_IRQ_NBR
42 #define ETRAX_USB_TX_IRQ USB_DMA_TX_IRQ_NBR
44 /* Number of physical ports in Etrax 100LX */
45 #define USB_ROOT_HUB_PORTS 2
47 const char hc_name
[] = "hc-crisv10";
48 const char product_desc
[] = DESCRIPTION
;
50 /* The number of epids is, among other things, used for pre-allocating
51 ctrl, bulk and isoc EP descriptors (one for each epid).
52 Assumed to be > 1 when initiating the DMA lists. */
53 #define NBR_OF_EPIDS 32
55 /* Support interrupt traffic intervals up to 128 ms. */
56 #define MAX_INTR_INTERVAL 128
58 /* If periodic traffic (intr or isoc) is to be used, then one entry in the EP
59 table must be "invalid". By this we mean that we shouldn't care about epid
60 attentions for this epid, or at least handle them differently from epid
61 attentions for "valid" epids. This define determines which one to use
63 #define INVALID_EPID 31
64 /* A special epid for the bulk dummys. */
69 MODULE_DESCRIPTION(DESCRIPTION
);
70 MODULE_LICENSE("GPL");
71 MODULE_AUTHOR("Konrad Eriksson <konrad.eriksson@axis.se>");
74 /* Module parameters */
76 /* 0 = No ports enabled
77 1 = Only port 1 enabled (on board ethernet on devboard)
78 2 = Only port 2 enabled (external connector on devboard)
79 3 = Both ports enabled
81 static unsigned int ports
= 3;
82 module_param(ports
, uint
, S_IRUGO
);
83 MODULE_PARM_DESC(ports
, "Bitmask indicating USB ports to use");
86 /***************************************************************************/
87 /***************************************************************************/
88 /* Shared global variables for this module */
89 /***************************************************************************/
90 /***************************************************************************/
92 /* EP descriptor lists for non period transfers. Must be 32-bit aligned. */
93 static volatile struct USB_EP_Desc TxBulkEPList
[NBR_OF_EPIDS
] __attribute__ ((aligned (4)));
95 static volatile struct USB_EP_Desc TxCtrlEPList
[NBR_OF_EPIDS
] __attribute__ ((aligned (4)));
97 /* EP descriptor lists for period transfers. Must be 32-bit aligned. */
98 static volatile struct USB_EP_Desc TxIntrEPList
[MAX_INTR_INTERVAL
] __attribute__ ((aligned (4)));
99 static volatile struct USB_SB_Desc TxIntrSB_zout
__attribute__ ((aligned (4)));
101 static volatile struct USB_EP_Desc TxIsocEPList
[NBR_OF_EPIDS
] __attribute__ ((aligned (4)));
102 static volatile struct USB_SB_Desc TxIsocSB_zout
__attribute__ ((aligned (4)));
104 static volatile struct USB_SB_Desc TxIsocSBList
[NBR_OF_EPIDS
] __attribute__ ((aligned (4)));
106 /* After each enabled bulk EP IN we put two disabled EP descriptors with the eol flag set,
107 causing the DMA to stop the DMA channel. The first of these two has the intr flag set, which
108 gives us a dma8_sub0_descr interrupt. When we receive this, we advance the DMA one step in the
109 EP list and then restart the bulk channel, thus forcing a switch between bulk EP descriptors
111 static volatile struct USB_EP_Desc TxBulkDummyEPList
[NBR_OF_EPIDS
][2] __attribute__ ((aligned (4)));
113 /* List of URB pointers, where each points to the active URB for a epid.
114 For Bulk, Ctrl and Intr this means which URB that currently is added to
115 DMA lists (Isoc URBs are all directly added to DMA lists). As soon as
116 URB has completed is the queue examined and the first URB in queue is
117 removed and moved to the activeUrbList while its state change to STARTED and
118 its transfer(s) gets added to DMA list (exception Isoc where URBs enter
119 state STARTED directly and added transfers added to DMA lists). */
120 static struct urb
*activeUrbList
[NBR_OF_EPIDS
];
122 /* Additional software state info for each epid */
123 static struct etrax_epid epid_state
[NBR_OF_EPIDS
];
125 /* Timer handles for bulk traffic timer used to avoid DMA bug where DMA stops
126 even if there is new data waiting to be processed */
127 static struct timer_list bulk_start_timer
= TIMER_INITIALIZER(NULL
, 0, 0);
128 static struct timer_list bulk_eot_timer
= TIMER_INITIALIZER(NULL
, 0, 0);
130 /* We want the start timer to expire before the eot timer, because the former
131 might start traffic, thus making it unnecessary for the latter to time
133 #define BULK_START_TIMER_INTERVAL (HZ/50) /* 20 ms */
134 #define BULK_EOT_TIMER_INTERVAL (HZ/16) /* 60 ms */
136 /* Delay before a URB completion happen when it's scheduled to be delayed */
137 #define LATER_TIMER_DELAY (HZ/50) /* 20 ms */
139 /* Simplifying macros for checking software state info of a epid */
140 /* ----------------------------------------------------------------------- */
141 #define epid_inuse(epid) epid_state[epid].inuse
142 #define epid_out_traffic(epid) epid_state[epid].out_traffic
143 #define epid_isoc(epid) (epid_state[epid].type == PIPE_ISOCHRONOUS ? 1 : 0)
144 #define epid_intr(epid) (epid_state[epid].type == PIPE_INTERRUPT ? 1 : 0)
147 /***************************************************************************/
148 /***************************************************************************/
149 /* DEBUG FUNCTIONS */
150 /***************************************************************************/
151 /***************************************************************************/
152 /* Note that these functions are always available in their "__" variants,
153 for use in error situations. The "__" missing variants are controlled by
154 the USB_DEBUG_DESC/USB_DEBUG_URB macros. */
155 static void __dump_urb(struct urb
* purb
)
157 struct crisv10_urb_priv
*urb_priv
= purb
->hcpriv
;
160 urb_num
= urb_priv
->urb_num
;
162 printk("\nURB:0x%x[%d]\n", (unsigned int)purb
, urb_num
);
163 printk("dev :0x%08lx\n", (unsigned long)purb
->dev
);
164 printk("pipe :0x%08x\n", purb
->pipe
);
165 printk("status :%d\n", purb
->status
);
166 printk("transfer_flags :0x%08x\n", purb
->transfer_flags
);
167 printk("transfer_buffer :0x%08lx\n", (unsigned long)purb
->transfer_buffer
);
168 printk("transfer_buffer_length:%d\n", purb
->transfer_buffer_length
);
169 printk("actual_length :%d\n", purb
->actual_length
);
170 printk("setup_packet :0x%08lx\n", (unsigned long)purb
->setup_packet
);
171 printk("start_frame :%d\n", purb
->start_frame
);
172 printk("number_of_packets :%d\n", purb
->number_of_packets
);
173 printk("interval :%d\n", purb
->interval
);
174 printk("error_count :%d\n", purb
->error_count
);
175 printk("context :0x%08lx\n", (unsigned long)purb
->context
);
176 printk("complete :0x%08lx\n\n", (unsigned long)purb
->complete
);
179 static void __dump_in_desc(volatile struct USB_IN_Desc
*in
)
181 printk("\nUSB_IN_Desc at 0x%08lx\n", (unsigned long)in
);
182 printk(" sw_len : 0x%04x (%d)\n", in
->sw_len
, in
->sw_len
);
183 printk(" command : 0x%04x\n", in
->command
);
184 printk(" next : 0x%08lx\n", in
->next
);
185 printk(" buf : 0x%08lx\n", in
->buf
);
186 printk(" hw_len : 0x%04x (%d)\n", in
->hw_len
, in
->hw_len
);
187 printk(" status : 0x%04x\n\n", in
->status
);
190 static void __dump_sb_desc(volatile struct USB_SB_Desc
*sb
)
192 char tt
= (sb
->command
& 0x30) >> 4;
209 tt_string
= "unknown (weird)";
212 printk(" USB_SB_Desc at 0x%08lx ", (unsigned long)sb
);
213 printk(" command:0x%04x (", sb
->command
);
214 printk("rem:%d ", (sb
->command
& 0x3f00) >> 8);
215 printk("full:%d ", (sb
->command
& 0x40) >> 6);
216 printk("tt:%d(%s) ", tt
, tt_string
);
217 printk("intr:%d ", (sb
->command
& 0x8) >> 3);
218 printk("eot:%d ", (sb
->command
& 0x2) >> 1);
219 printk("eol:%d)", sb
->command
& 0x1);
220 printk(" sw_len:0x%04x(%d)", sb
->sw_len
, sb
->sw_len
);
221 printk(" next:0x%08lx", sb
->next
);
222 printk(" buf:0x%08lx\n", sb
->buf
);
226 static void __dump_ep_desc(volatile struct USB_EP_Desc
*ep
)
228 printk("USB_EP_Desc at 0x%08lx ", (unsigned long)ep
);
229 printk(" command:0x%04x (", ep
->command
);
230 printk("ep_id:%d ", (ep
->command
& 0x1f00) >> 8);
231 printk("enable:%d ", (ep
->command
& 0x10) >> 4);
232 printk("intr:%d ", (ep
->command
& 0x8) >> 3);
233 printk("eof:%d ", (ep
->command
& 0x2) >> 1);
234 printk("eol:%d)", ep
->command
& 0x1);
235 printk(" hw_len:0x%04x(%d)", ep
->hw_len
, ep
->hw_len
);
236 printk(" next:0x%08lx", ep
->next
);
237 printk(" sub:0x%08lx\n", ep
->sub
);
240 static inline void __dump_ep_list(int pipe_type
)
242 volatile struct USB_EP_Desc
*ep
;
243 volatile struct USB_EP_Desc
*first_ep
;
244 volatile struct USB_SB_Desc
*sb
;
249 first_ep
= &TxBulkEPList
[0];
252 first_ep
= &TxCtrlEPList
[0];
255 first_ep
= &TxIntrEPList
[0];
257 case PIPE_ISOCHRONOUS
:
258 first_ep
= &TxIsocEPList
[0];
261 warn("Cannot dump unknown traffic type");
266 printk("\n\nDumping EP list...\n\n");
270 /* Cannot phys_to_virt on 0 as it turns into 80000000, which is != 0. */
271 sb
= ep
->sub
? phys_to_virt(ep
->sub
) : 0;
274 sb
= sb
->next
? phys_to_virt(sb
->next
) : 0;
276 ep
= (volatile struct USB_EP_Desc
*)(phys_to_virt(ep
->next
));
278 } while (ep
!= first_ep
);
281 static inline void __dump_ept_data(int epid
)
284 __u32 r_usb_ept_data
;
286 if (epid
< 0 || epid
> 31) {
287 printk("Cannot dump ept data for invalid epid %d\n", epid
);
291 local_irq_save(flags
);
292 *R_USB_EPT_INDEX
= IO_FIELD(R_USB_EPT_INDEX
, value
, epid
);
294 r_usb_ept_data
= *R_USB_EPT_DATA
;
295 local_irq_restore(flags
);
297 printk(" R_USB_EPT_DATA = 0x%x for epid %d :\n", r_usb_ept_data
, epid
);
298 if (r_usb_ept_data
== 0) {
299 /* No need for more detailed printing. */
302 printk(" valid : %d\n", (r_usb_ept_data
& 0x80000000) >> 31);
303 printk(" hold : %d\n", (r_usb_ept_data
& 0x40000000) >> 30);
304 printk(" error_count_in : %d\n", (r_usb_ept_data
& 0x30000000) >> 28);
305 printk(" t_in : %d\n", (r_usb_ept_data
& 0x08000000) >> 27);
306 printk(" low_speed : %d\n", (r_usb_ept_data
& 0x04000000) >> 26);
307 printk(" port : %d\n", (r_usb_ept_data
& 0x03000000) >> 24);
308 printk(" error_code : %d\n", (r_usb_ept_data
& 0x00c00000) >> 22);
309 printk(" t_out : %d\n", (r_usb_ept_data
& 0x00200000) >> 21);
310 printk(" error_count_out : %d\n", (r_usb_ept_data
& 0x00180000) >> 19);
311 printk(" max_len : %d\n", (r_usb_ept_data
& 0x0003f800) >> 11);
312 printk(" ep : %d\n", (r_usb_ept_data
& 0x00000780) >> 7);
313 printk(" dev : %d\n", (r_usb_ept_data
& 0x0000003f));
316 static inline void __dump_ept_data_iso(int epid
)
321 if (epid
< 0 || epid
> 31) {
322 printk("Cannot dump ept data for invalid epid %d\n", epid
);
326 local_irq_save(flags
);
327 *R_USB_EPT_INDEX
= IO_FIELD(R_USB_EPT_INDEX
, value
, epid
);
329 ept_data
= *R_USB_EPT_DATA_ISO
;
330 local_irq_restore(flags
);
332 printk(" R_USB_EPT_DATA = 0x%x for epid %d :\n", ept_data
, epid
);
334 /* No need for more detailed printing. */
337 printk(" valid : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO
, valid
,
339 printk(" port : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO
, port
,
341 printk(" error_code : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO
, error_code
,
343 printk(" max_len : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO
, max_len
,
345 printk(" ep : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO
, ep
,
347 printk(" dev : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO
, dev
,
351 static inline void __dump_ept_data_list(void)
355 printk("Dumping the whole R_USB_EPT_DATA list\n");
357 for (i
= 0; i
< 32; i
++) {
362 static void debug_epid(int epid
) {
365 if(epid_isoc(epid
)) {
366 __dump_ept_data_iso(epid
);
368 __dump_ept_data(epid
);
372 for(i
= 0; i
< 32; i
++) {
373 if(IO_EXTRACT(USB_EP_command
, epid
, TxBulkEPList
[i
].command
) ==
375 printk("%d: ", i
); __dump_ep_desc(&(TxBulkEPList
[i
]));
380 for(i
= 0; i
< 32; i
++) {
381 if(IO_EXTRACT(USB_EP_command
, epid
, TxCtrlEPList
[i
].command
) ==
383 printk("%d: ", i
); __dump_ep_desc(&(TxCtrlEPList
[i
]));
388 for(i
= 0; i
< MAX_INTR_INTERVAL
; i
++) {
389 if(IO_EXTRACT(USB_EP_command
, epid
, TxIntrEPList
[i
].command
) ==
391 printk("%d: ", i
); __dump_ep_desc(&(TxIntrEPList
[i
]));
396 for(i
= 0; i
< 32; i
++) {
397 if(IO_EXTRACT(USB_EP_command
, epid
, TxIsocEPList
[i
].command
) ==
399 printk("%d: ", i
); __dump_ep_desc(&(TxIsocEPList
[i
]));
403 __dump_ept_data_list();
404 __dump_ep_list(PIPE_INTERRUPT
);
410 char* hcd_status_to_str(__u8 bUsbStatus
) {
411 static char hcd_status_str
[128];
412 hcd_status_str
[0] = '\0';
413 if(bUsbStatus
& IO_STATE(R_USB_STATUS
, ourun
, yes
)) {
414 strcat(hcd_status_str
, "ourun ");
416 if(bUsbStatus
& IO_STATE(R_USB_STATUS
, perror
, yes
)) {
417 strcat(hcd_status_str
, "perror ");
419 if(bUsbStatus
& IO_STATE(R_USB_STATUS
, device_mode
, yes
)) {
420 strcat(hcd_status_str
, "device_mode ");
422 if(bUsbStatus
& IO_STATE(R_USB_STATUS
, host_mode
, yes
)) {
423 strcat(hcd_status_str
, "host_mode ");
425 if(bUsbStatus
& IO_STATE(R_USB_STATUS
, started
, yes
)) {
426 strcat(hcd_status_str
, "started ");
428 if(bUsbStatus
& IO_STATE(R_USB_STATUS
, running
, yes
)) {
429 strcat(hcd_status_str
, "running ");
431 return hcd_status_str
;
435 char* sblist_to_str(struct USB_SB_Desc
* sb_desc
) {
436 static char sblist_to_str_buff
[128];
437 char tmp
[32], tmp2
[32];
438 sblist_to_str_buff
[0] = '\0';
439 while(sb_desc
!= NULL
) {
440 switch(IO_EXTRACT(USB_SB_command
, tt
, sb_desc
->command
)) {
441 case 0: sprintf(tmp
, "zout"); break;
442 case 1: sprintf(tmp
, "in"); break;
443 case 2: sprintf(tmp
, "out"); break;
444 case 3: sprintf(tmp
, "setup"); break;
446 sprintf(tmp2
, "(%s %d)", tmp
, sb_desc
->sw_len
);
447 strcat(sblist_to_str_buff
, tmp2
);
448 if(sb_desc
->next
!= 0) {
449 sb_desc
= phys_to_virt(sb_desc
->next
);
454 return sblist_to_str_buff
;
457 char* port_status_to_str(__u16 wPortStatus
) {
458 static char port_status_str
[128];
459 port_status_str
[0] = '\0';
460 if(wPortStatus
& IO_STATE(R_USB_RH_PORT_STATUS_1
, connected
, yes
)) {
461 strcat(port_status_str
, "connected ");
463 if(wPortStatus
& IO_STATE(R_USB_RH_PORT_STATUS_1
, enabled
, yes
)) {
464 strcat(port_status_str
, "enabled ");
466 if(wPortStatus
& IO_STATE(R_USB_RH_PORT_STATUS_1
, suspended
, yes
)) {
467 strcat(port_status_str
, "suspended ");
469 if(wPortStatus
& IO_STATE(R_USB_RH_PORT_STATUS_1
, reset
, yes
)) {
470 strcat(port_status_str
, "reset ");
472 if(wPortStatus
& IO_STATE(R_USB_RH_PORT_STATUS_1
, speed
, full
)) {
473 strcat(port_status_str
, "full-speed ");
475 strcat(port_status_str
, "low-speed ");
477 return port_status_str
;
481 char* endpoint_to_str(struct usb_endpoint_descriptor
*ed
) {
482 static char endpoint_to_str_buff
[128];
484 int epnum
= ed
->bEndpointAddress
& 0x0F;
485 int dir
= ed
->bEndpointAddress
& 0x80;
486 int type
= ed
->bmAttributes
& 0x03;
487 endpoint_to_str_buff
[0] = '\0';
488 sprintf(endpoint_to_str_buff
, "ep:%d ", epnum
);
491 sprintf(tmp
, " ctrl");
494 sprintf(tmp
, " isoc");
497 sprintf(tmp
, " bulk");
500 sprintf(tmp
, " intr");
503 strcat(endpoint_to_str_buff
, tmp
);
507 sprintf(tmp
, " out");
509 strcat(endpoint_to_str_buff
, tmp
);
511 return endpoint_to_str_buff
;
514 /* Debug helper functions for Transfer Controller */
515 char* pipe_to_str(unsigned int pipe
) {
516 static char pipe_to_str_buff
[128];
518 sprintf(pipe_to_str_buff
, "dir:%s", str_dir(pipe
));
519 sprintf(tmp
, " type:%s", str_type(pipe
));
520 strcat(pipe_to_str_buff
, tmp
);
522 sprintf(tmp
, " dev:%d", usb_pipedevice(pipe
));
523 strcat(pipe_to_str_buff
, tmp
);
524 sprintf(tmp
, " ep:%d", usb_pipeendpoint(pipe
));
525 strcat(pipe_to_str_buff
, tmp
);
526 return pipe_to_str_buff
;
530 #define USB_DEBUG_DESC 1
532 #ifdef USB_DEBUG_DESC
533 #define dump_in_desc(x) __dump_in_desc(x)
534 #define dump_sb_desc(...) __dump_sb_desc(...)
535 #define dump_ep_desc(x) __dump_ep_desc(x)
536 #define dump_ept_data(x) __dump_ept_data(x)
538 #define dump_in_desc(...) do {} while (0)
539 #define dump_sb_desc(...) do {} while (0)
540 #define dump_ep_desc(...) do {} while (0)
544 /* Uncomment this to enable massive function call trace
545 #define USB_DEBUG_TRACE */
547 #ifdef USB_DEBUG_TRACE
548 #define DBFENTER (printk(": Entering: %s\n", __FUNCTION__))
549 #define DBFEXIT (printk(": Exiting: %s\n", __FUNCTION__))
551 #define DBFENTER do {} while (0)
552 #define DBFEXIT do {} while (0)
555 #define CHECK_ALIGN(x) if (((__u32)(x)) & 0x00000003) \
556 {panic("Alignment check (DWORD) failed at %s:%s:%d\n", __FILE__, __FUNCTION__, __LINE__);}
558 /* Most helpful debugging aid */
559 #define ASSERT(expr) ((void) ((expr) ? 0 : (err("assert failed at: %s %d",__FUNCTION__, __LINE__))))
562 /***************************************************************************/
563 /***************************************************************************/
564 /* Forward declarations */
565 /***************************************************************************/
566 /***************************************************************************/
567 void crisv10_hcd_epid_attn_irq(struct crisv10_irq_reg
*reg
);
568 void crisv10_hcd_port_status_irq(struct crisv10_irq_reg
*reg
);
569 void crisv10_hcd_ctl_status_irq(struct crisv10_irq_reg
*reg
);
570 void crisv10_hcd_isoc_eof_irq(struct crisv10_irq_reg
*reg
);
572 void rh_port_status_change(__u16
[]);
573 int rh_clear_port_feature(__u8
, __u16
);
574 int rh_set_port_feature(__u8
, __u16
);
575 static void rh_disable_port(unsigned int port
);
577 static void check_finished_bulk_tx_epids(struct usb_hcd
*hcd
,
580 static int tc_setup_epid(struct usb_host_endpoint
*ep
, struct urb
*urb
,
582 static void tc_free_epid(struct usb_host_endpoint
*ep
);
583 static int tc_allocate_epid(void);
584 static void tc_finish_urb(struct usb_hcd
*hcd
, struct urb
*urb
, int status
);
585 static void tc_finish_urb_later(struct usb_hcd
*hcd
, struct urb
*urb
,
588 static int urb_priv_create(struct usb_hcd
*hcd
, struct urb
*urb
, int epid
,
590 static void urb_priv_free(struct usb_hcd
*hcd
, struct urb
*urb
);
592 static int crisv10_usb_check_bandwidth(struct usb_device
*dev
,struct urb
*urb
);
593 static void crisv10_usb_claim_bandwidth(
594 struct usb_device
*dev
, struct urb
*urb
, int bustime
, int isoc
);
595 static void crisv10_usb_release_bandwidth(
596 struct usb_hcd
*hcd
, int isoc
, int bandwidth
);
598 static inline struct urb
*urb_list_first(int epid
);
599 static inline void urb_list_add(struct urb
*urb
, int epid
,
601 static inline urb_entry_t
*urb_list_entry(struct urb
*urb
, int epid
);
602 static inline void urb_list_del(struct urb
*urb
, int epid
);
603 static inline void urb_list_move_last(struct urb
*urb
, int epid
);
604 static inline struct urb
*urb_list_next(struct urb
*urb
, int epid
);
606 int create_sb_for_urb(struct urb
*urb
, int mem_flags
);
607 int init_intr_urb(struct urb
*urb
, int mem_flags
);
609 static inline void etrax_epid_set(__u8 index
, __u32 data
);
610 static inline void etrax_epid_clear_error(__u8 index
);
611 static inline void etrax_epid_set_toggle(__u8 index
, __u8 dirout
,
613 static inline __u8
etrax_epid_get_toggle(__u8 index
, __u8 dirout
);
614 static inline __u32
etrax_epid_get(__u8 index
);
616 /* We're accessing the same register position in Etrax so
617 when we do full access the internal difference doesn't matter */
618 #define etrax_epid_iso_set(index, data) etrax_epid_set(index, data)
619 #define etrax_epid_iso_get(index) etrax_epid_get(index)
622 static void tc_dma_process_isoc_urb(struct urb
*urb
);
623 static void tc_dma_process_queue(int epid
);
624 static void tc_dma_unlink_intr_urb(struct urb
*urb
);
625 static irqreturn_t
tc_dma_tx_interrupt(int irq
, void *vhc
);
626 static irqreturn_t
tc_dma_rx_interrupt(int irq
, void *vhc
);
628 static void tc_bulk_start_timer_func(unsigned long dummy
);
629 static void tc_bulk_eot_timer_func(unsigned long dummy
);
632 /*************************************************************/
633 /*************************************************************/
634 /* Host Controler Driver block */
635 /*************************************************************/
636 /*************************************************************/
639 static irqreturn_t
crisv10_hcd_top_irq(int irq
, void*);
640 static int crisv10_hcd_reset(struct usb_hcd
*);
641 static int crisv10_hcd_start(struct usb_hcd
*);
642 static void crisv10_hcd_stop(struct usb_hcd
*);
644 static int crisv10_hcd_suspend(struct device
*, u32
, u32
);
645 static int crisv10_hcd_resume(struct device
*, u32
);
646 #endif /* CONFIG_PM */
647 static int crisv10_hcd_get_frame(struct usb_hcd
*);
649 static int tc_urb_enqueue(struct usb_hcd
*, struct urb
*, gfp_t mem_flags
);
650 static int tc_urb_dequeue(struct usb_hcd
*, struct urb
*, int);
651 static void tc_endpoint_disable(struct usb_hcd
*, struct usb_host_endpoint
*ep
);
653 static int rh_status_data_request(struct usb_hcd
*, char *);
654 static int rh_control_request(struct usb_hcd
*, u16
, u16
, u16
, char*, u16
);
657 static int crisv10_hcd_hub_suspend(struct usb_hcd
*);
658 static int crisv10_hcd_hub_resume(struct usb_hcd
*);
659 #endif /* CONFIG_PM */
660 #ifdef CONFIG_USB_OTG
661 static int crisv10_hcd_start_port_reset(struct usb_hcd
*, unsigned);
662 #endif /* CONFIG_USB_OTG */
664 /* host controller driver interface */
665 static const struct hc_driver crisv10_hc_driver
=
667 .description
= hc_name
,
668 .product_desc
= product_desc
,
669 .hcd_priv_size
= sizeof(struct crisv10_hcd
),
671 /* Attaching IRQ handler manualy in probe() */
672 /* .irq = crisv10_hcd_irq, */
676 /* called to init HCD and root hub */
677 .reset
= crisv10_hcd_reset
,
678 .start
= crisv10_hcd_start
,
680 /* cleanly make HCD stop writing memory and doing I/O */
681 .stop
= crisv10_hcd_stop
,
683 /* return current frame number */
684 .get_frame_number
= crisv10_hcd_get_frame
,
687 /* Manage i/o requests via the Transfer Controller */
688 .urb_enqueue
= tc_urb_enqueue
,
689 .urb_dequeue
= tc_urb_dequeue
,
691 /* hw synch, freeing endpoint resources that urb_dequeue can't */
692 .endpoint_disable
= tc_endpoint_disable
,
695 /* Root Hub support */
696 .hub_status_data
= rh_status_data_request
,
697 .hub_control
= rh_control_request
,
699 .hub_suspend
= rh_suspend_request
,
700 .hub_resume
= rh_resume_request
,
701 #endif /* CONFIG_PM */
702 #ifdef CONFIG_USB_OTG
703 .start_port_reset
= crisv10_hcd_start_port_reset
,
704 #endif /* CONFIG_USB_OTG */
709 * conversion between pointers to a hcd and the corresponding
713 static inline struct crisv10_hcd
*hcd_to_crisv10_hcd(struct usb_hcd
*hcd
)
715 return (struct crisv10_hcd
*) hcd
->hcd_priv
;
718 static inline struct usb_hcd
*crisv10_hcd_to_hcd(struct crisv10_hcd
*hcd
)
720 return container_of((void *) hcd
, struct usb_hcd
, hcd_priv
);
723 /* check if specified port is in use */
724 static inline int port_in_use(unsigned int port
)
726 return ports
& (1 << port
);
729 /* number of ports in use */
730 static inline unsigned int num_ports(void)
732 unsigned int i
, num
= 0;
733 for (i
= 0; i
< USB_ROOT_HUB_PORTS
; i
++)
739 /* map hub port number to the port number used internally by the HC */
740 static inline unsigned int map_port(unsigned int port
)
742 unsigned int i
, num
= 0;
743 for (i
= 0; i
< USB_ROOT_HUB_PORTS
; i
++)
750 /* size of descriptors in slab cache */
752 #define MAX(x, y) ((x) > (y) ? (x) : (y))
756 /******************************************************************/
757 /* Hardware Interrupt functions */
758 /******************************************************************/
760 /* Fast interrupt handler for HC */
761 static irqreturn_t
crisv10_hcd_top_irq(int irq
, void *vcd
)
763 struct usb_hcd
*hcd
= vcd
;
764 struct crisv10_irq_reg reg
;
773 /* Turn of other interrupts while handling these sensitive cases */
774 local_irq_save(flags
);
776 /* Read out which interrupts that are flaged */
777 irq_mask
= *R_USB_IRQ_MASK_READ
;
778 reg
.r_usb_irq_mask_read
= irq_mask
;
780 /* Reading R_USB_STATUS clears the ctl_status interrupt. Note that
781 R_USB_STATUS must be read before R_USB_EPID_ATTN since reading the latter
782 clears the ourun and perror fields of R_USB_STATUS. */
783 reg
.r_usb_status
= *R_USB_STATUS
;
785 /* Reading R_USB_EPID_ATTN clears the iso_eof, bulk_eot and epid_attn
787 reg
.r_usb_epid_attn
= *R_USB_EPID_ATTN
;
789 /* Reading R_USB_RH_PORT_STATUS_1 and R_USB_RH_PORT_STATUS_2 clears the
790 port_status interrupt. */
791 reg
.r_usb_rh_port_status_1
= *R_USB_RH_PORT_STATUS_1
;
792 reg
.r_usb_rh_port_status_2
= *R_USB_RH_PORT_STATUS_2
;
794 /* Reading R_USB_FM_NUMBER clears the sof interrupt. */
795 /* Note: the lower 11 bits contain the actual frame number, sent with each
797 reg
.r_usb_fm_number
= *R_USB_FM_NUMBER
;
799 /* Interrupts are handled in order of priority. */
800 if (irq_mask
& IO_MASK(R_USB_IRQ_MASK_READ
, port_status
)) {
801 crisv10_hcd_port_status_irq(®
);
803 if (irq_mask
& IO_MASK(R_USB_IRQ_MASK_READ
, epid_attn
)) {
804 crisv10_hcd_epid_attn_irq(®
);
806 if (irq_mask
& IO_MASK(R_USB_IRQ_MASK_READ
, ctl_status
)) {
807 crisv10_hcd_ctl_status_irq(®
);
809 if (irq_mask
& IO_MASK(R_USB_IRQ_MASK_READ
, iso_eof
)) {
810 crisv10_hcd_isoc_eof_irq(®
);
812 if (irq_mask
& IO_MASK(R_USB_IRQ_MASK_READ
, bulk_eot
)) {
813 /* Update/restart the bulk start timer since obviously the channel is
815 mod_timer(&bulk_start_timer
, jiffies
+ BULK_START_TIMER_INTERVAL
);
816 /* Update/restart the bulk eot timer since we just received an bulk eot
818 mod_timer(&bulk_eot_timer
, jiffies
+ BULK_EOT_TIMER_INTERVAL
);
820 /* Check for finished bulk transfers on epids */
821 check_finished_bulk_tx_epids(hcd
, 0);
823 local_irq_restore(flags
);
830 void crisv10_hcd_epid_attn_irq(struct crisv10_irq_reg
*reg
) {
831 struct usb_hcd
*hcd
= reg
->hcd
;
832 struct crisv10_urb_priv
*urb_priv
;
836 for (epid
= 0; epid
< NBR_OF_EPIDS
; epid
++) {
837 if (test_bit(epid
, (void *)®
->r_usb_epid_attn
)) {
842 if (epid
== DUMMY_EPID
|| epid
== INVALID_EPID
) {
843 /* We definitely don't care about these ones. Besides, they are
844 always disabled, so any possible disabling caused by the
845 epid attention interrupt is irrelevant. */
846 warn("Got epid_attn for INVALID_EPID or DUMMY_EPID (%d).", epid
);
850 if(!epid_inuse(epid
)) {
851 irq_err("Epid attention on epid:%d that isn't in use\n", epid
);
852 printk("R_USB_STATUS: 0x%x\n", reg
->r_usb_status
);
857 /* Note that although there are separate R_USB_EPT_DATA and
858 R_USB_EPT_DATA_ISO registers, they are located at the same address and
859 are of the same size. In other words, this read should be ok for isoc
861 ept_data
= etrax_epid_get(epid
);
862 error_code
= IO_EXTRACT(R_USB_EPT_DATA
, error_code
, ept_data
);
864 /* Get the active URB for this epid. We blatantly assume
865 that only this URB could have caused the epid attention. */
866 urb
= activeUrbList
[epid
];
868 irq_err("Attention on epid:%d error:%d with no active URB.\n",
870 printk("R_USB_STATUS: 0x%x\n", reg
->r_usb_status
);
875 urb_priv
= (struct crisv10_urb_priv
*)urb
->hcpriv
;
878 /* Using IO_STATE_VALUE on R_USB_EPT_DATA should be ok for isoc also. */
879 if (error_code
== IO_STATE_VALUE(R_USB_EPT_DATA
, error_code
, no_error
)) {
881 /* Isoc traffic doesn't have error_count_in/error_count_out. */
882 if ((usb_pipetype(urb
->pipe
) != PIPE_ISOCHRONOUS
) &&
883 (IO_EXTRACT(R_USB_EPT_DATA
, error_count_in
, ept_data
) == 3 ||
884 IO_EXTRACT(R_USB_EPT_DATA
, error_count_out
, ept_data
) == 3)) {
885 /* Check if URB allready is marked for late-finish, we can get
886 several 3rd error for Intr traffic when a device is unplugged */
887 if(urb_priv
->later_data
== NULL
) {
889 irq_warn("3rd error for epid:%d (%s %s) URB:0x%x[%d]\n", epid
,
890 str_dir(urb
->pipe
), str_type(urb
->pipe
),
891 (unsigned int)urb
, urb_priv
->urb_num
);
893 tc_finish_urb_later(hcd
, urb
, -EPROTO
);
896 } else if (reg
->r_usb_status
& IO_MASK(R_USB_STATUS
, perror
)) {
897 irq_warn("Perror for epid:%d\n", epid
);
898 printk("FM_NUMBER: %d\n", reg
->r_usb_fm_number
& 0x7ff);
899 printk("R_USB_STATUS: 0x%x\n", reg
->r_usb_status
);
903 if (!(ept_data
& IO_MASK(R_USB_EPT_DATA
, valid
))) {
905 panic("Perror because of invalid epid."
906 " Deconfigured too early?");
908 /* past eof1, near eof, zout transfer, setup transfer */
909 /* Dump the urb and the relevant EP descriptor. */
910 panic("Something wrong with DMA descriptor contents."
911 " Too much traffic inserted?");
913 } else if (reg
->r_usb_status
& IO_MASK(R_USB_STATUS
, ourun
)) {
915 printk("FM_NUMBER: %d\n", reg
->r_usb_fm_number
& 0x7ff);
916 printk("R_USB_STATUS: 0x%x\n", reg
->r_usb_status
);
920 panic("Buffer overrun/underrun for epid:%d. DMA too busy?", epid
);
922 irq_warn("Attention on epid:%d (%s %s) with no error code\n", epid
,
923 str_dir(urb
->pipe
), str_type(urb
->pipe
));
924 printk("R_USB_STATUS: 0x%x\n", reg
->r_usb_status
);
929 } else if (error_code
== IO_STATE_VALUE(R_USB_EPT_DATA
, error_code
,
931 /* Not really a protocol error, just says that the endpoint gave
932 a stall response. Note that error_code cannot be stall for isoc. */
933 if (usb_pipetype(urb
->pipe
) == PIPE_ISOCHRONOUS
) {
934 panic("Isoc traffic cannot stall");
937 tc_dbg("Stall for epid:%d (%s %s) URB:0x%x\n", epid
,
938 str_dir(urb
->pipe
), str_type(urb
->pipe
), (unsigned int)urb
);
939 tc_finish_urb(hcd
, urb
, -EPIPE
);
941 } else if (error_code
== IO_STATE_VALUE(R_USB_EPT_DATA
, error_code
,
943 /* Two devices responded to a transaction request. Must be resolved
944 by software. FIXME: Reset ports? */
945 panic("Bus error for epid %d."
946 " Two devices responded to transaction request\n",
949 } else if (error_code
== IO_STATE_VALUE(R_USB_EPT_DATA
, error_code
,
951 /* DMA overrun or underrun. */
952 irq_warn("Buffer overrun/underrun for epid:%d (%s %s)\n", epid
,
953 str_dir(urb
->pipe
), str_type(urb
->pipe
));
955 /* It seems that error_code = buffer_error in
956 R_USB_EPT_DATA/R_USB_EPT_DATA_ISO and ourun = yes in R_USB_STATUS
957 are the same error. */
958 tc_finish_urb(hcd
, urb
, -EPROTO
);
960 irq_warn("Unknown attention on epid:%d (%s %s)\n", epid
,
961 str_dir(urb
->pipe
), str_type(urb
->pipe
));
969 void crisv10_hcd_port_status_irq(struct crisv10_irq_reg
*reg
)
971 __u16 port_reg
[USB_ROOT_HUB_PORTS
];
973 port_reg
[0] = reg
->r_usb_rh_port_status_1
;
974 port_reg
[1] = reg
->r_usb_rh_port_status_2
;
975 rh_port_status_change(port_reg
);
979 void crisv10_hcd_isoc_eof_irq(struct crisv10_irq_reg
*reg
)
983 struct crisv10_urb_priv
*urb_priv
;
987 for (epid
= 0; epid
< NBR_OF_EPIDS
- 1; epid
++) {
989 /* Only check epids that are in use, is valid and has SB list */
990 if (!epid_inuse(epid
) || epid
== INVALID_EPID
||
991 TxIsocEPList
[epid
].sub
== 0 || epid
== DUMMY_EPID
) {
992 /* Nothing here to see. */
995 ASSERT(epid_isoc(epid
));
997 /* Get the active URB for this epid (if any). */
998 urb
= activeUrbList
[epid
];
1000 isoc_warn("Ignoring NULL urb for epid:%d\n", epid
);
1003 if(!epid_out_traffic(epid
)) {
1005 ASSERT(usb_pipetype(urb
->pipe
) == PIPE_ISOCHRONOUS
);
1007 urb_priv
= (struct crisv10_urb_priv
*)urb
->hcpriv
;
1010 if (urb_priv
->urb_state
== NOT_STARTED
) {
1011 /* If ASAP is not set and urb->start_frame is the current frame,
1012 start the transfer. */
1013 if (!(urb
->transfer_flags
& URB_ISO_ASAP
) &&
1014 (urb
->start_frame
== (*R_USB_FM_NUMBER
& 0x7ff))) {
1015 /* EP should not be enabled if we're waiting for start_frame */
1016 ASSERT((TxIsocEPList
[epid
].command
&
1017 IO_STATE(USB_EP_command
, enable
, yes
)) == 0);
1019 isoc_warn("Enabling isoc IN EP descr for epid %d\n", epid
);
1020 TxIsocEPList
[epid
].command
|= IO_STATE(USB_EP_command
, enable
, yes
);
1022 /* This urb is now active. */
1023 urb_priv
->urb_state
= STARTED
;
1033 void crisv10_hcd_ctl_status_irq(struct crisv10_irq_reg
*reg
)
1035 struct crisv10_hcd
* crisv10_hcd
= hcd_to_crisv10_hcd(reg
->hcd
);
1038 ASSERT(crisv10_hcd
);
1040 irq_dbg("ctr_status_irq, controller status: %s\n",
1041 hcd_status_to_str(reg
->r_usb_status
));
1043 /* FIXME: What should we do if we get ourun or perror? Dump the EP and SB
1044 list for the corresponding epid? */
1045 if (reg
->r_usb_status
& IO_MASK(R_USB_STATUS
, ourun
)) {
1046 panic("USB controller got ourun.");
1048 if (reg
->r_usb_status
& IO_MASK(R_USB_STATUS
, perror
)) {
1050 /* Before, etrax_usb_do_intr_recover was called on this epid if it was
1051 an interrupt pipe. I don't see how re-enabling all EP descriptors
1052 will help if there was a programming error. */
1053 panic("USB controller got perror.");
1056 /* Keep track of USB Controller, if it's running or not */
1057 if(reg
->r_usb_status
& IO_STATE(R_USB_STATUS
, running
, yes
)) {
1058 crisv10_hcd
->running
= 1;
1060 crisv10_hcd
->running
= 0;
1063 if (reg
->r_usb_status
& IO_MASK(R_USB_STATUS
, device_mode
)) {
1064 /* We should never operate in device mode. */
1065 panic("USB controller in device mode.");
1068 /* Set the flag to avoid getting "Unlink after no-IRQ? Controller is probably
1069 using the wrong IRQ" from hcd_unlink_urb() in drivers/usb/core/hcd.c */
1070 set_bit(HCD_FLAG_SAW_IRQ
, ®
->hcd
->flags
);
1076 /******************************************************************/
1077 /* Host Controller interface functions */
1078 /******************************************************************/
1080 static inline void crisv10_ready_wait(void) {
1081 volatile int timeout
= 10000;
1082 /* Check the busy bit of USB controller in Etrax */
1083 while((*R_USB_COMMAND
& IO_MASK(R_USB_COMMAND
, busy
)) &&
1086 warn("Timeout while waiting for USB controller to be idle\n");
1090 /* reset host controller */
1091 static int crisv10_hcd_reset(struct usb_hcd
*hcd
)
1094 hcd_dbg(hcd
, "reset\n");
1097 /* Reset the USB interface. */
1100 IO_STATE(R_USB_COMMAND, port_sel, nop) |
1101 IO_STATE(R_USB_COMMAND, port_cmd, reset) |
1102 IO_STATE(R_USB_COMMAND, ctrl_cmd, reset);
1109 /* start host controller */
1110 static int crisv10_hcd_start(struct usb_hcd
*hcd
)
1113 hcd_dbg(hcd
, "start\n");
1115 crisv10_ready_wait();
1117 /* Start processing of USB traffic. */
1119 IO_STATE(R_USB_COMMAND
, port_sel
, nop
) |
1120 IO_STATE(R_USB_COMMAND
, port_cmd
, reset
) |
1121 IO_STATE(R_USB_COMMAND
, ctrl_cmd
, host_run
);
1125 hcd
->state
= HC_STATE_RUNNING
;
1131 /* stop host controller */
1132 static void crisv10_hcd_stop(struct usb_hcd
*hcd
)
1135 hcd_dbg(hcd
, "stop\n");
1136 crisv10_hcd_reset(hcd
);
1140 /* return the current frame number */
1141 static int crisv10_hcd_get_frame(struct usb_hcd
*hcd
)
1145 return (*R_USB_FM_NUMBER
& 0x7ff);
1148 #ifdef CONFIG_USB_OTG
1150 static int crisv10_hcd_start_port_reset(struct usb_hcd
*hcd
, unsigned port
)
1152 return 0; /* no-op for now */
1155 #endif /* CONFIG_USB_OTG */
1158 /******************************************************************/
1159 /* Root Hub functions */
1160 /******************************************************************/
1162 /* root hub status */
1163 static const struct usb_hub_status rh_hub_status
=
1169 /* root hub descriptor */
1170 static const u8 rh_hub_descr
[] =
1172 0x09, /* bDescLength */
1173 0x29, /* bDescriptorType */
1174 USB_ROOT_HUB_PORTS
, /* bNbrPorts */
1175 0x00, /* wHubCharacteristics */
1177 0x01, /* bPwrOn2pwrGood */
1178 0x00, /* bHubContrCurrent */
1179 0x00, /* DeviceRemovable */
1180 0xff /* PortPwrCtrlMask */
1183 /* Actual holder of root hub status*/
1184 struct crisv10_rh rh
;
1186 /* Initialize root hub data structures (called from dvdrv_hcd_probe()) */
1189 /* Reset port status flags */
1190 for (i
= 0; i
< USB_ROOT_HUB_PORTS
; i
++) {
1191 rh
.wPortChange
[i
] = 0;
1192 rh
.wPortStatusPrev
[i
] = 0;
1197 #define RH_FEAT_MASK ((1<<USB_PORT_FEAT_CONNECTION)|\
1198 (1<<USB_PORT_FEAT_ENABLE)|\
1199 (1<<USB_PORT_FEAT_SUSPEND)|\
1200 (1<<USB_PORT_FEAT_RESET))
1202 /* Handle port status change interrupt (called from bottom part interrupt) */
1203 void rh_port_status_change(__u16 port_reg
[]) {
1207 for(i
= 0; i
< USB_ROOT_HUB_PORTS
; i
++) {
1208 /* Xor out changes since last read, masked for important flags */
1209 wChange
= (port_reg
[i
] & RH_FEAT_MASK
) ^ rh
.wPortStatusPrev
[i
];
1210 /* Or changes together with (if any) saved changes */
1211 rh
.wPortChange
[i
] |= wChange
;
1212 /* Save new status */
1213 rh
.wPortStatusPrev
[i
] = port_reg
[i
];
1216 rh_dbg("Interrupt port_status change port%d: %s Current-status:%s\n", i
+1,
1217 port_status_to_str(wChange
),
1218 port_status_to_str(port_reg
[i
]));
1223 /* Construct port status change bitmap for the root hub */
1224 static int rh_status_data_request(struct usb_hcd
*hcd
, char *buf
)
1226 struct crisv10_hcd
* crisv10_hcd
= hcd_to_crisv10_hcd(hcd
);
1231 * corresponds to hub status change EP (USB 2.0 spec section 11.13.4)
1232 * return bitmap indicating ports with status change
1235 spin_lock(&crisv10_hcd
->lock
);
1236 for (i
= 1; i
<= crisv10_hcd
->num_ports
; i
++) {
1237 if (rh
.wPortChange
[map_port(i
)]) {
1239 rh_dbg("rh_status_data_request, change on port %d: %s Current Status: %s\n", i
,
1240 port_status_to_str(rh
.wPortChange
[map_port(i
)]),
1241 port_status_to_str(rh
.wPortStatusPrev
[map_port(i
)]));
1244 spin_unlock(&crisv10_hcd
->lock
);
1246 return *buf
== 0 ? 0 : 1;
1249 /* Handle a control request for the root hub (called from hcd_driver) */
1250 static int rh_control_request(struct usb_hcd
*hcd
,
1257 struct crisv10_hcd
*crisv10_hcd
= hcd_to_crisv10_hcd(hcd
);
1263 case GetHubDescriptor
:
1264 rh_dbg("GetHubDescriptor\n");
1265 len
= min_t(unsigned int, sizeof rh_hub_descr
, wLength
);
1266 memcpy(buf
, rh_hub_descr
, len
);
1267 buf
[2] = crisv10_hcd
->num_ports
;
1270 rh_dbg("GetHubStatus\n");
1271 len
= min_t(unsigned int, sizeof rh_hub_status
, wLength
);
1272 memcpy(buf
, &rh_hub_status
, len
);
1275 if (!wIndex
|| wIndex
> crisv10_hcd
->num_ports
)
1277 rh_dbg("GetportStatus, port:%d change:%s status:%s\n", wIndex
,
1278 port_status_to_str(rh
.wPortChange
[map_port(wIndex
)]),
1279 port_status_to_str(rh
.wPortStatusPrev
[map_port(wIndex
)]));
1280 *(u16
*) buf
= cpu_to_le16(rh
.wPortStatusPrev
[map_port(wIndex
)]);
1281 *(u16
*) (buf
+ 2) = cpu_to_le16(rh
.wPortChange
[map_port(wIndex
)]);
1284 rh_dbg("SetHubFeature\n");
1285 case ClearHubFeature
:
1286 rh_dbg("ClearHubFeature\n");
1288 case C_HUB_OVER_CURRENT
:
1289 case C_HUB_LOCAL_POWER
:
1290 rh_warn("Not implemented hub request:%d \n", typeReq
);
1291 /* not implemented */
1297 case SetPortFeature
:
1298 if (!wIndex
|| wIndex
> crisv10_hcd
->num_ports
)
1300 if(rh_set_port_feature(map_port(wIndex
), wValue
))
1303 case ClearPortFeature
:
1304 if (!wIndex
|| wIndex
> crisv10_hcd
->num_ports
)
1306 if(rh_clear_port_feature(map_port(wIndex
), wValue
))
1310 rh_warn("Unknown hub request: %d\n", typeReq
);
1318 int rh_set_port_feature(__u8 bPort
, __u16 wFeature
) {
1319 __u8 bUsbCommand
= 0;
1322 case USB_PORT_FEAT_RESET
:
1323 rh_dbg("SetPortFeature: reset\n");
1325 if (rh
.wPortStatusPrev
[bPort
] &
1326 IO_STATE(R_USB_RH_PORT_STATUS_1
, enabled
, yes
))
1328 __u8 restart_controller
= 0;
1330 if ( (rh
.wPortStatusPrev
[0] &
1331 IO_STATE(R_USB_RH_PORT_STATUS_1
, enabled
, yes
)) &&
1332 (rh
.wPortStatusPrev
[1] &
1333 IO_STATE(R_USB_RH_PORT_STATUS_2
, enabled
, yes
)) )
1335 /* Both ports is enabled. The USB controller will not change state. */
1336 restart_controller
= 0;
1340 /* Only ports is enabled. The USB controller will change state and
1341 must be restarted. */
1342 restart_controller
= 1;
1345 In ETRAX 100LX it's not possible to reset an enabled root hub port.
1346 The workaround is to disable and enable the port before resetting it.
1347 Disabling the port can, if both ports are disabled at once, cause the
1348 USB controller to change state to HOST_MODE state.
1349 The USB controller state transition causes a lot of unwanted
1350 interrupts that must be avoided.
1351 Disabling the USB controller status and port status interrupts before
1352 disabling/resetting the port stops these interrupts.
1354 These actions are performed:
1355 1. Disable USB controller status and port status interrupts.
1357 3. Wait for the port to be disabled.
1359 5. Wait for the port to be enabled.
1361 7. Wait for for the reset to end.
1362 8. Wait for the USB controller entering started state.
1363 9. Order the USB controller to running state.
1364 10. Wait for the USB controller reaching running state.
1365 11. Clear all interrupts generated during the disable/enable/reset
1367 12. Enable the USB controller status and port status interrupts.
1370 /* 1. Disable USB controller status and USB port status interrupts. */
1371 *R_USB_IRQ_MASK_CLR
= IO_STATE(R_USB_IRQ_MASK_CLR
, ctl_status
, clr
);
1372 __asm__
__volatile__ (" nop");
1373 *R_USB_IRQ_MASK_CLR
= IO_STATE(R_USB_IRQ_MASK_CLR
, port_status
, clr
);
1374 __asm__
__volatile__ (" nop");
1378 /* Since an root hub port reset shall be 50 ms and the ETRAX 100LX
1379 root hub port reset is 10 ms we must perform 5 port resets to
1380 achieve a proper root hub port reset. */
1381 for (reset_cnt
= 0; reset_cnt
< 5; reset_cnt
++)
1383 rh_dbg("Disable Port %d\n", bPort
+ 1);
1385 /* 2. Disable the port*/
1388 *R_USB_PORT1_DISABLE
= IO_STATE(R_USB_PORT1_DISABLE
, disable
, yes
);
1392 *R_USB_PORT2_DISABLE
= IO_STATE(R_USB_PORT2_DISABLE
, disable
, yes
);
1395 /* 3. Wait for the port to be disabled. */
1396 while ( (bPort
== 0) ?
1397 *R_USB_RH_PORT_STATUS_1
&
1398 IO_STATE(R_USB_RH_PORT_STATUS_1
, enabled
, yes
) :
1399 *R_USB_RH_PORT_STATUS_2
&
1400 IO_STATE(R_USB_RH_PORT_STATUS_2
, enabled
, yes
) ) {}
1402 rh_dbg("Port %d is disabled. Enable it!\n", bPort
+ 1);
1404 /* 4. Enable the port. */
1407 *R_USB_PORT1_DISABLE
= IO_STATE(R_USB_PORT1_DISABLE
, disable
, no
);
1411 *R_USB_PORT2_DISABLE
= IO_STATE(R_USB_PORT2_DISABLE
, disable
, no
);
1414 /* 5. Wait for the port to be enabled again. */
1415 while (!( (bPort
== 0) ?
1416 *R_USB_RH_PORT_STATUS_1
&
1417 IO_STATE(R_USB_RH_PORT_STATUS_1
, connected
, yes
) :
1418 *R_USB_RH_PORT_STATUS_2
&
1419 IO_STATE(R_USB_RH_PORT_STATUS_2
, connected
, yes
) ) ) {}
1421 rh_dbg("Port %d is enabled.\n", bPort
+ 1);
1423 /* 6. Reset the port */
1424 crisv10_ready_wait();
1427 IO_STATE(R_USB_COMMAND
, port_sel
, port1
):
1428 IO_STATE(R_USB_COMMAND
, port_sel
, port2
) ) |
1429 IO_STATE(R_USB_COMMAND
, port_cmd
, reset
) |
1430 IO_STATE(R_USB_COMMAND
, busy
, no
) |
1431 IO_STATE(R_USB_COMMAND
, ctrl_cmd
, nop
);
1432 rh_dbg("Port %d is resetting.\n", bPort
+ 1);
1434 /* 7. The USB specification says that we should wait for at least
1435 10ms for device recover */
1436 udelay(10500); /* 10,5ms blocking wait */
1438 crisv10_ready_wait();
1443 /* Check if the USB controller needs to be restarted. */
1444 if (restart_controller
)
1446 /* 8. Wait for the USB controller entering started state. */
1447 while (!(*R_USB_STATUS
& IO_STATE(R_USB_STATUS
, started
, yes
))) {}
1449 /* 9. Order the USB controller to running state. */
1450 crisv10_ready_wait();
1452 IO_STATE(R_USB_COMMAND
, port_sel
, nop
) |
1453 IO_STATE(R_USB_COMMAND
, port_cmd
, reset
) |
1454 IO_STATE(R_USB_COMMAND
, busy
, no
) |
1455 IO_STATE(R_USB_COMMAND
, ctrl_cmd
, host_run
);
1457 /* 10. Wait for the USB controller reaching running state. */
1458 while (!(*R_USB_STATUS
& IO_STATE(R_USB_STATUS
, running
, yes
))) {}
1461 /* 11. Clear any controller or port satus interrupts before enabling
1466 /* Clear the port status interrupt of the reset port. */
1469 rh_dbg("Clearing port 1 interrupts\n");
1470 dummy
= *R_USB_RH_PORT_STATUS_1
;
1474 rh_dbg("Clearing port 2 interrupts\n");
1475 dummy
= *R_USB_RH_PORT_STATUS_2
;
1478 if (restart_controller
)
1480 /* The USB controller is restarted. Clear all interupts. */
1481 rh_dbg("Clearing all interrupts\n");
1482 dummy
= *R_USB_STATUS
;
1483 dummy
= *R_USB_RH_PORT_STATUS_1
;
1484 dummy
= *R_USB_RH_PORT_STATUS_2
;
1488 /* 12. Enable USB controller status and USB port status interrupts. */
1489 *R_USB_IRQ_MASK_SET
= IO_STATE(R_USB_IRQ_MASK_SET
, ctl_status
, set
);
1490 __asm__
__volatile__ (" nop");
1491 *R_USB_IRQ_MASK_SET
= IO_STATE(R_USB_IRQ_MASK_SET
, port_status
, set
);
1492 __asm__
__volatile__ (" nop");
1498 bUsbCommand
|= IO_STATE(R_USB_COMMAND
, port_cmd
, reset
);
1499 /* Select which port via the port_sel field */
1500 bUsbCommand
|= IO_FIELD(R_USB_COMMAND
, port_sel
, bPort
+1);
1502 /* Make sure the controller isn't busy. */
1503 crisv10_ready_wait();
1504 /* Send out the actual command to the USB controller */
1505 *R_USB_COMMAND
= bUsbCommand
;
1507 /* Wait a while for controller to first become started after port reset */
1508 udelay(12000); /* 12ms blocking wait */
1510 /* Make sure the controller isn't busy. */
1511 crisv10_ready_wait();
1513 /* If all enabled ports were disabled the host controller goes down into
1514 started mode, so we need to bring it back into the running state.
1515 (This is safe even if it's already in the running state.) */
1517 IO_STATE(R_USB_COMMAND
, port_sel
, nop
) |
1518 IO_STATE(R_USB_COMMAND
, port_cmd
, reset
) |
1519 IO_STATE(R_USB_COMMAND
, ctrl_cmd
, host_run
);
1523 case USB_PORT_FEAT_SUSPEND
:
1524 rh_dbg("SetPortFeature: suspend\n");
1525 bUsbCommand
|= IO_STATE(R_USB_COMMAND
, port_cmd
, suspend
);
1528 case USB_PORT_FEAT_POWER
:
1529 rh_dbg("SetPortFeature: power\n");
1531 case USB_PORT_FEAT_C_CONNECTION
:
1532 rh_dbg("SetPortFeature: c_connection\n");
1534 case USB_PORT_FEAT_C_RESET
:
1535 rh_dbg("SetPortFeature: c_reset\n");
1537 case USB_PORT_FEAT_C_OVER_CURRENT
:
1538 rh_dbg("SetPortFeature: c_over_current\n");
1542 /* Select which port via the port_sel field */
1543 bUsbCommand
|= IO_FIELD(R_USB_COMMAND
, port_sel
, bPort
+1);
1545 /* Make sure the controller isn't busy. */
1546 crisv10_ready_wait();
1547 /* Send out the actual command to the USB controller */
1548 *R_USB_COMMAND
= bUsbCommand
;
1551 rh_dbg("SetPortFeature: unknown feature\n");
1557 int rh_clear_port_feature(__u8 bPort
, __u16 wFeature
) {
1559 case USB_PORT_FEAT_ENABLE
:
1560 rh_dbg("ClearPortFeature: enable\n");
1561 rh_disable_port(bPort
);
1563 case USB_PORT_FEAT_SUSPEND
:
1564 rh_dbg("ClearPortFeature: suspend\n");
1566 case USB_PORT_FEAT_POWER
:
1567 rh_dbg("ClearPortFeature: power\n");
1570 case USB_PORT_FEAT_C_ENABLE
:
1571 rh_dbg("ClearPortFeature: c_enable\n");
1573 case USB_PORT_FEAT_C_SUSPEND
:
1574 rh_dbg("ClearPortFeature: c_suspend\n");
1576 case USB_PORT_FEAT_C_CONNECTION
:
1577 rh_dbg("ClearPortFeature: c_connection\n");
1579 case USB_PORT_FEAT_C_OVER_CURRENT
:
1580 rh_dbg("ClearPortFeature: c_over_current\n");
1582 case USB_PORT_FEAT_C_RESET
:
1583 rh_dbg("ClearPortFeature: c_reset\n");
1586 rh
.wPortChange
[bPort
] &= ~(1 << (wFeature
- 16));
1589 rh_dbg("ClearPortFeature: unknown feature\n");
1597 /* Handle a suspend request for the root hub (called from hcd_driver) */
1598 static int rh_suspend_request(struct usb_hcd
*hcd
)
1600 return 0; /* no-op for now */
1603 /* Handle a resume request for the root hub (called from hcd_driver) */
1604 static int rh_resume_request(struct usb_hcd
*hcd
)
1606 return 0; /* no-op for now */
1608 #endif /* CONFIG_PM */
1612 /* Wrapper function for workaround port disable registers in USB controller */
1613 static void rh_disable_port(unsigned int port
) {
1614 volatile int timeout
= 10000;
1615 volatile char* usb_portx_disable
;
1618 usb_portx_disable
= R_USB_PORT1_DISABLE
;
1621 usb_portx_disable
= R_USB_PORT2_DISABLE
;
1624 /* Invalid port index */
1627 /* Set disable flag in special register */
1628 *usb_portx_disable
= IO_STATE(R_USB_PORT1_DISABLE
, disable
, yes
);
1629 /* Wait until not enabled anymore */
1630 while((rh
.wPortStatusPrev
[port
] &
1631 IO_STATE(R_USB_RH_PORT_STATUS_1
, enabled
, yes
)) &&
1634 warn("Timeout while waiting for port %d to become disabled\n", port
);
1636 /* clear disable flag in special register */
1637 *usb_portx_disable
= IO_STATE(R_USB_PORT1_DISABLE
, disable
, no
);
1638 rh_info("Physical port %d disabled\n", port
+1);
1642 /******************************************************************/
1643 /* Transfer Controller (TC) functions */
1644 /******************************************************************/
1646 /* FIXME: Should RX_BUF_SIZE be a config option, or maybe we should adjust it
1648 To adjust it dynamically we would have to get an interrupt when we reach
1649 the end of the rx descriptor list, or when we get close to the end, and
1650 then allocate more descriptors. */
1651 #define NBR_OF_RX_DESC 512
1652 #define RX_DESC_BUF_SIZE 1024
1653 #define RX_BUF_SIZE (NBR_OF_RX_DESC * RX_DESC_BUF_SIZE)
1656 /* Local variables for Transfer Controller */
1657 /* --------------------------------------- */
1659 /* This is a circular (double-linked) list of the active urbs for each epid.
1660 The head is never removed, and new urbs are linked onto the list as
1661 urb_entry_t elements. Don't reference urb_list directly; use the wrapper
1662 functions instead (which includes spin_locks) */
1663 static struct list_head urb_list
[NBR_OF_EPIDS
];
1665 /* Read about the need and usage of this lock in submit_ctrl_urb. */
1666 /* Lock for URB lists for each EPID */
1667 static spinlock_t urb_list_lock
;
1669 /* Lock for EPID array register (R_USB_EPT_x) in Etrax */
1670 static spinlock_t etrax_epid_lock
;
1672 /* Lock for dma8 sub0 handling */
1673 static spinlock_t etrax_dma8_sub0_lock
;
1675 /* DMA IN cache bug. Align the DMA IN buffers to 32 bytes, i.e. a cache line.
1676 Since RX_DESC_BUF_SIZE is 1024 is a multiple of 32, all rx buffers will be
1678 static volatile unsigned char RxBuf
[RX_BUF_SIZE
] __attribute__ ((aligned (32)));
1679 static volatile struct USB_IN_Desc RxDescList
[NBR_OF_RX_DESC
] __attribute__ ((aligned (4)));
1681 /* Pointers into RxDescList. */
1682 static volatile struct USB_IN_Desc
*myNextRxDesc
;
1683 static volatile struct USB_IN_Desc
*myLastRxDesc
;
1685 /* A zout transfer makes a memory access at the address of its buf pointer,
1686 which means that setting this buf pointer to 0 will cause an access to the
1687 flash. In addition to this, setting sw_len to 0 results in a 16/32 bytes
1688 (depending on DMA burst size) transfer.
1689 Instead, we set it to 1, and point it to this buffer. */
1690 static int zout_buffer
[4] __attribute__ ((aligned (4)));
1692 /* Cache for allocating new EP and SB descriptors. */
1693 static struct kmem_cache
*usb_desc_cache
;
1695 /* Cache for the data allocated in the isoc descr top half. */
1696 static struct kmem_cache
*isoc_compl_cache
;
1698 /* Cache for the data allocated when delayed finishing of URBs */
1699 static struct kmem_cache
*later_data_cache
;
1702 /* Counter to keep track of how many Isoc EP we have sat up. Used to enable
1703 and disable iso_eof interrupt. We only need these interrupts when we have
1704 Isoc data endpoints (consumes CPU cycles).
1705 FIXME: This could be more fine granular, so this interrupt is only enabled
1706 when we have a In Isoc URB not URB_ISO_ASAP flaged queued. */
1707 static int isoc_epid_counter
;
1709 /* Protecting wrapper functions for R_USB_EPT_x */
1710 /* -------------------------------------------- */
1711 static inline void etrax_epid_set(__u8 index
, __u32 data
) {
1712 unsigned long flags
;
1713 spin_lock_irqsave(&etrax_epid_lock
, flags
);
1714 *R_USB_EPT_INDEX
= IO_FIELD(R_USB_EPT_INDEX
, value
, index
);
1716 *R_USB_EPT_DATA
= data
;
1717 spin_unlock_irqrestore(&etrax_epid_lock
, flags
);
1720 static inline void etrax_epid_clear_error(__u8 index
) {
1721 unsigned long flags
;
1722 spin_lock_irqsave(&etrax_epid_lock
, flags
);
1723 *R_USB_EPT_INDEX
= IO_FIELD(R_USB_EPT_INDEX
, value
, index
);
1726 ~(IO_MASK(R_USB_EPT_DATA
, error_count_in
) |
1727 IO_MASK(R_USB_EPT_DATA
, error_count_out
) |
1728 IO_MASK(R_USB_EPT_DATA
, error_code
));
1729 spin_unlock_irqrestore(&etrax_epid_lock
, flags
);
1732 static inline void etrax_epid_set_toggle(__u8 index
, __u8 dirout
,
1734 unsigned long flags
;
1735 spin_lock_irqsave(&etrax_epid_lock
, flags
);
1736 *R_USB_EPT_INDEX
= IO_FIELD(R_USB_EPT_INDEX
, value
, index
);
1739 *R_USB_EPT_DATA
&= ~IO_MASK(R_USB_EPT_DATA
, t_out
);
1740 *R_USB_EPT_DATA
|= IO_FIELD(R_USB_EPT_DATA
, t_out
, toggle
);
1742 *R_USB_EPT_DATA
&= ~IO_MASK(R_USB_EPT_DATA
, t_in
);
1743 *R_USB_EPT_DATA
|= IO_FIELD(R_USB_EPT_DATA
, t_in
, toggle
);
1745 spin_unlock_irqrestore(&etrax_epid_lock
, flags
);
1748 static inline __u8
etrax_epid_get_toggle(__u8 index
, __u8 dirout
) {
1749 unsigned long flags
;
1751 spin_lock_irqsave(&etrax_epid_lock
, flags
);
1752 *R_USB_EPT_INDEX
= IO_FIELD(R_USB_EPT_INDEX
, value
, index
);
1755 toggle
= IO_EXTRACT(R_USB_EPT_DATA
, t_out
, *R_USB_EPT_DATA
);
1757 toggle
= IO_EXTRACT(R_USB_EPT_DATA
, t_in
, *R_USB_EPT_DATA
);
1759 spin_unlock_irqrestore(&etrax_epid_lock
, flags
);
1764 static inline __u32
etrax_epid_get(__u8 index
) {
1765 unsigned long flags
;
1767 spin_lock_irqsave(&etrax_epid_lock
, flags
);
1768 *R_USB_EPT_INDEX
= IO_FIELD(R_USB_EPT_INDEX
, value
, index
);
1770 data
= *R_USB_EPT_DATA
;
1771 spin_unlock_irqrestore(&etrax_epid_lock
, flags
);
1778 /* Main functions for Transfer Controller */
1779 /* -------------------------------------- */
1781 /* Init structs, memories and lists used by Transfer Controller */
1782 int tc_init(struct usb_hcd
*hcd
) {
1784 /* Clear software state info for all epids */
1785 memset(epid_state
, 0, sizeof(struct etrax_epid
) * NBR_OF_EPIDS
);
1787 /* Set Invalid and Dummy as being in use and disabled */
1788 epid_state
[INVALID_EPID
].inuse
= 1;
1789 epid_state
[DUMMY_EPID
].inuse
= 1;
1790 epid_state
[INVALID_EPID
].disabled
= 1;
1791 epid_state
[DUMMY_EPID
].disabled
= 1;
1793 /* Clear counter for how many Isoc epids we have sat up */
1794 isoc_epid_counter
= 0;
1796 /* Initialize the urb list by initiating a head for each list.
1797 Also reset list hodling active URB for each epid */
1798 for (i
= 0; i
< NBR_OF_EPIDS
; i
++) {
1799 INIT_LIST_HEAD(&urb_list
[i
]);
1800 activeUrbList
[i
] = NULL
;
1803 /* Init lock for URB lists */
1804 spin_lock_init(&urb_list_lock
);
1805 /* Init lock for Etrax R_USB_EPT register */
1806 spin_lock_init(&etrax_epid_lock
);
1807 /* Init lock for Etrax dma8 sub0 handling */
1808 spin_lock_init(&etrax_dma8_sub0_lock
);
1810 /* We use kmem_cache_* to make sure that all DMA desc. are dword aligned */
1812 /* Note that we specify sizeof(struct USB_EP_Desc) as the size, but also
1813 allocate SB descriptors from this cache. This is ok since
1814 sizeof(struct USB_EP_Desc) == sizeof(struct USB_SB_Desc). */
1815 usb_desc_cache
= kmem_cache_create("usb_desc_cache",
1816 sizeof(struct USB_EP_Desc
), 0,
1817 SLAB_HWCACHE_ALIGN
, 0);
1818 if(usb_desc_cache
== NULL
) {
1822 /* Create slab cache for speedy allocation of memory for isoc bottom-half
1823 interrupt handling */
1825 kmem_cache_create("isoc_compl_cache",
1826 sizeof(struct crisv10_isoc_complete_data
),
1827 0, SLAB_HWCACHE_ALIGN
, 0);
1828 if(isoc_compl_cache
== NULL
) {
1832 /* Create slab cache for speedy allocation of memory for later URB finish
1835 kmem_cache_create("later_data_cache",
1836 sizeof(struct urb_later_data
),
1837 0, SLAB_HWCACHE_ALIGN
, 0);
1838 if(later_data_cache
== NULL
) {
1843 /* Initiate the bulk start timer. */
1844 init_timer(&bulk_start_timer
);
1845 bulk_start_timer
.expires
= jiffies
+ BULK_START_TIMER_INTERVAL
;
1846 bulk_start_timer
.function
= tc_bulk_start_timer_func
;
1847 add_timer(&bulk_start_timer
);
1850 /* Initiate the bulk eot timer. */
1851 init_timer(&bulk_eot_timer
);
1852 bulk_eot_timer
.expires
= jiffies
+ BULK_EOT_TIMER_INTERVAL
;
1853 bulk_eot_timer
.function
= tc_bulk_eot_timer_func
;
1854 bulk_eot_timer
.data
= (unsigned long)hcd
;
1855 add_timer(&bulk_eot_timer
);
1860 /* Uninitialize all resources used by Transfer Controller */
1861 void tc_destroy(void) {
1863 /* Destroy all slab cache */
1864 kmem_cache_destroy(usb_desc_cache
);
1865 kmem_cache_destroy(isoc_compl_cache
);
1866 kmem_cache_destroy(later_data_cache
);
1869 del_timer(&bulk_start_timer
);
1870 del_timer(&bulk_eot_timer
);
1873 static void restart_dma8_sub0(void) {
1874 unsigned long flags
;
1875 spin_lock_irqsave(&etrax_dma8_sub0_lock
, flags
);
1876 /* Verify that the dma is not running */
1877 if ((*R_DMA_CH8_SUB0_CMD
& IO_MASK(R_DMA_CH8_SUB0_CMD
, cmd
)) == 0) {
1878 struct USB_EP_Desc
*ep
= (struct USB_EP_Desc
*)phys_to_virt(*R_DMA_CH8_SUB0_EP
);
1879 while (DUMMY_EPID
== IO_EXTRACT(USB_EP_command
, epid
, ep
->command
)) {
1880 ep
= (struct USB_EP_Desc
*)phys_to_virt(ep
->next
);
1882 /* Advance the DMA to the next EP descriptor that is not a DUMMY_EPID. */
1883 *R_DMA_CH8_SUB0_EP
= virt_to_phys(ep
);
1884 /* Restart the DMA */
1885 *R_DMA_CH8_SUB0_CMD
= IO_STATE(R_DMA_CH8_SUB0_CMD
, cmd
, start
);
1887 spin_unlock_irqrestore(&etrax_dma8_sub0_lock
, flags
);
1890 /* queue an URB with the transfer controller (called from hcd_driver) */
1891 static int tc_urb_enqueue(struct usb_hcd
*hcd
,
1898 unsigned long flags
;
1899 struct crisv10_urb_priv
*urb_priv
;
1900 struct crisv10_hcd
* crisv10_hcd
= hcd_to_crisv10_hcd(hcd
);
1903 if(!(crisv10_hcd
->running
)) {
1904 /* The USB Controller is not running, probably because no device is
1905 attached. No idea to enqueue URBs then */
1906 tc_warn("Rejected enqueueing of URB:0x%x because no dev attached\n",
1911 maxpacket
= usb_maxpacket(urb
->dev
, urb
->pipe
, usb_pipeout(urb
->pipe
));
1912 /* Special case check for In Isoc transfers. Specification states that each
1913 In Isoc transfer consists of one packet and therefore it should fit into
1914 the transfer-buffer of an URB.
1915 We do the check here to be sure (an invalid scenario can be produced with
1916 parameters to the usbtest suite) */
1917 if(usb_pipeisoc(urb
->pipe
) && usb_pipein(urb
->pipe
) &&
1918 (urb
->transfer_buffer_length
< maxpacket
)) {
1919 tc_err("Submit In Isoc URB with buffer length:%d to pipe with maxpacketlen: %d\n", urb
->transfer_buffer_length
, maxpacket
);
1923 /* Check if there is a epid for URBs destination, if not this function
1925 epid
= tc_setup_epid(urb
->ep
, urb
, mem_flags
);
1927 tc_err("Failed setup epid:%d for URB:0x%x\n", epid
, (unsigned int)urb
);
1932 if(urb
== activeUrbList
[epid
]) {
1933 tc_err("Resubmition of allready active URB:0x%x\n", (unsigned int)urb
);
1937 if(urb_list_entry(urb
, epid
)) {
1938 tc_err("Resubmition of allready queued URB:0x%x\n", (unsigned int)urb
);
1942 /* If we actively have flaged endpoint as disabled then refuse submition */
1943 if(epid_state
[epid
].disabled
) {
1947 /* Allocate and init HC-private data for URB */
1948 if(urb_priv_create(hcd
, urb
, epid
, mem_flags
) != 0) {
1952 urb_priv
= urb
->hcpriv
;
1954 /* Check if there is enough bandwidth for periodic transfer */
1955 if(usb_pipeint(urb
->pipe
) || usb_pipeisoc(urb
->pipe
)) {
1956 /* only check (and later claim) if not already claimed */
1957 if (urb_priv
->bandwidth
== 0) {
1958 bustime
= crisv10_usb_check_bandwidth(urb
->dev
, urb
);
1960 tc_err("Not enough periodic bandwidth\n");
1961 urb_priv_free(hcd
, urb
);
1968 tc_dbg("Enqueue URB:0x%x[%d] epid:%d (%s) bufflen:%d\n",
1969 (unsigned int)urb
, urb_priv
->urb_num
, epid
,
1970 pipe_to_str(urb
->pipe
), urb
->transfer_buffer_length
);
1972 /* Create and link SBs required for this URB */
1973 retval
= create_sb_for_urb(urb
, mem_flags
);
1975 tc_err("Failed to create SBs for URB:0x%x[%d]\n", (unsigned int)urb
,
1977 urb_priv_free(hcd
, urb
);
1982 /* Init intr EP pool if this URB is a INTR transfer. This pool is later
1983 used when inserting EPs in the TxIntrEPList. We do the alloc here
1984 so we can't run out of memory later */
1985 if(usb_pipeint(urb
->pipe
)) {
1986 retval
= init_intr_urb(urb
, mem_flags
);
1988 tc_warn("Failed to init Intr URB\n");
1989 urb_priv_free(hcd
, urb
);
1995 /* Disable other access when inserting USB */
1996 local_irq_save(flags
);
1998 /* Claim bandwidth, if needed */
2000 crisv10_usb_claim_bandwidth(urb
->dev
,
2003 (usb_pipetype(urb
->pipe
) == PIPE_ISOCHRONOUS
));
2006 /* Add URB to EP queue */
2007 urb_list_add(urb
, epid
, mem_flags
);
2009 if(usb_pipeisoc(urb
->pipe
)) {
2010 /* Special processing of Isoc URBs. */
2011 tc_dma_process_isoc_urb(urb
);
2013 /* Process EP queue for rest of the URB types (Bulk, Ctrl, Intr) */
2014 tc_dma_process_queue(epid
);
2017 local_irq_restore(flags
);
2023 /* remove an URB from the transfer controller queues (called from hcd_driver)*/
2024 static int tc_urb_dequeue(struct usb_hcd
*hcd
, struct urb
*urb
, int status
) {
2025 struct crisv10_urb_priv
*urb_priv
;
2026 unsigned long flags
;
2030 /* Disable interrupts here since a descriptor interrupt for the isoc epid
2031 will modify the sb list. This could possibly be done more granular, but
2032 urb_dequeue should not be used frequently anyway.
2034 local_irq_save(flags
);
2036 urb
->status
= status
;
2037 urb_priv
= urb
->hcpriv
;
2040 /* This happens if a device driver calls unlink on an urb that
2041 was never submitted (lazy driver) or if the urb was completed
2042 while dequeue was being called. */
2043 tc_warn("Dequeing of not enqueued URB:0x%x\n", (unsigned int)urb
);
2044 local_irq_restore(flags
);
2047 epid
= urb_priv
->epid
;
2049 tc_warn("Dequeing %s URB:0x%x[%d] (%s %s epid:%d) status:%d %s\n",
2050 (urb
== activeUrbList
[epid
]) ? "active" : "queued",
2051 (unsigned int)urb
, urb_priv
->urb_num
, str_dir(urb
->pipe
),
2052 str_type(urb
->pipe
), epid
, urb
->status
,
2053 (urb_priv
->later_data
) ? "later-sched" : "");
2055 /* For Bulk, Ctrl and Intr are only one URB active at a time. So any URB
2056 that isn't active can be dequeued by just removing it from the queue */
2057 if(usb_pipebulk(urb
->pipe
) || usb_pipecontrol(urb
->pipe
) ||
2058 usb_pipeint(urb
->pipe
)) {
2060 /* Check if URB haven't gone further than the queue */
2061 if(urb
!= activeUrbList
[epid
]) {
2062 ASSERT(urb_priv
->later_data
== NULL
);
2063 tc_warn("Dequeing URB:0x%x[%d] (%s %s epid:%d) from queue"
2064 " (not active)\n", (unsigned int)urb
, urb_priv
->urb_num
,
2065 str_dir(urb
->pipe
), str_type(urb
->pipe
), epid
);
2067 /* Finish the URB with error status from USB core */
2068 tc_finish_urb(hcd
, urb
, urb
->status
);
2069 local_irq_restore(flags
);
2074 /* Set URB status to Unlink for handling when interrupt comes. */
2075 urb_priv
->urb_state
= UNLINK
;
2077 /* Differentiate dequeing of Bulk and Ctrl from Isoc and Intr */
2078 switch(usb_pipetype(urb
->pipe
)) {
2080 /* Check if EP still is enabled */
2081 if (TxBulkEPList
[epid
].command
& IO_MASK(USB_EP_command
, enable
)) {
2082 /* The EP was enabled, disable it. */
2083 TxBulkEPList
[epid
].command
&= ~IO_MASK(USB_EP_command
, enable
);
2085 /* Kicking dummy list out of the party. */
2086 TxBulkEPList
[epid
].next
= virt_to_phys(&TxBulkEPList
[(epid
+ 1) % NBR_OF_EPIDS
]);
2089 /* Check if EP still is enabled */
2090 if (TxCtrlEPList
[epid
].command
& IO_MASK(USB_EP_command
, enable
)) {
2091 /* The EP was enabled, disable it. */
2092 TxCtrlEPList
[epid
].command
&= ~IO_MASK(USB_EP_command
, enable
);
2095 case PIPE_ISOCHRONOUS
:
2096 /* Disabling, busy-wait and unlinking of Isoc SBs will be done in
2097 finish_isoc_urb(). Because there might the case when URB is dequeued
2098 but there are other valid URBs waiting */
2100 /* Check if In Isoc EP still is enabled */
2101 if (TxIsocEPList
[epid
].command
& IO_MASK(USB_EP_command
, enable
)) {
2102 /* The EP was enabled, disable it. */
2103 TxIsocEPList
[epid
].command
&= ~IO_MASK(USB_EP_command
, enable
);
2106 case PIPE_INTERRUPT
:
2107 /* Special care is taken for interrupt URBs. EPs are unlinked in
2114 /* Asynchronous unlink, finish the URB later from scheduled or other
2115 event (data finished, error) */
2116 tc_finish_urb_later(hcd
, urb
, urb
->status
);
2118 local_irq_restore(flags
);
2124 static void tc_sync_finish_epid(struct usb_hcd
*hcd
, int epid
) {
2125 volatile int timeout
= 10000;
2127 struct crisv10_urb_priv
* urb_priv
;
2128 unsigned long flags
;
2130 volatile struct USB_EP_Desc
*first_ep
; /* First EP in the list. */
2131 volatile struct USB_EP_Desc
*curr_ep
; /* Current EP, the iterator. */
2132 volatile struct USB_EP_Desc
*next_ep
; /* The EP after current. */
2134 int type
= epid_state
[epid
].type
;
2136 /* Setting this flag will cause enqueue() to return -ENOENT for new
2137 submitions on this endpoint and finish_urb() wont process queue further */
2138 epid_state
[epid
].disabled
= 1;
2142 /* Check if EP still is enabled */
2143 if (TxBulkEPList
[epid
].command
& IO_MASK(USB_EP_command
, enable
)) {
2144 /* The EP was enabled, disable it. */
2145 TxBulkEPList
[epid
].command
&= ~IO_MASK(USB_EP_command
, enable
);
2146 tc_warn("sync_finish: Disabling EP for epid:%d\n", epid
);
2148 /* Do busy-wait until DMA not using this EP descriptor anymore */
2149 while((*R_DMA_CH8_SUB0_EP
==
2150 virt_to_phys(&TxBulkEPList
[epid
])) &&
2153 warn("Timeout while waiting for DMA-TX-Bulk to leave EP for"
2154 " epid:%d\n", epid
);
2160 /* Check if EP still is enabled */
2161 if (TxCtrlEPList
[epid
].command
& IO_MASK(USB_EP_command
, enable
)) {
2162 /* The EP was enabled, disable it. */
2163 TxCtrlEPList
[epid
].command
&= ~IO_MASK(USB_EP_command
, enable
);
2164 tc_warn("sync_finish: Disabling EP for epid:%d\n", epid
);
2166 /* Do busy-wait until DMA not using this EP descriptor anymore */
2167 while((*R_DMA_CH8_SUB1_EP
==
2168 virt_to_phys(&TxCtrlEPList
[epid
])) &&
2171 warn("Timeout while waiting for DMA-TX-Ctrl to leave EP for"
2172 " epid:%d\n", epid
);
2177 case PIPE_INTERRUPT
:
2178 local_irq_save(flags
);
2179 /* Disable all Intr EPs belonging to epid */
2180 first_ep
= &TxIntrEPList
[0];
2183 next_ep
= (struct USB_EP_Desc
*)phys_to_virt(curr_ep
->next
);
2184 if (IO_EXTRACT(USB_EP_command
, epid
, next_ep
->command
) == epid
) {
2186 next_ep
->command
&= ~IO_MASK(USB_EP_command
, enable
);
2188 curr_ep
= phys_to_virt(curr_ep
->next
);
2189 } while (curr_ep
!= first_ep
);
2191 local_irq_restore(flags
);
2194 case PIPE_ISOCHRONOUS
:
2195 /* Check if EP still is enabled */
2196 if (TxIsocEPList
[epid
].command
& IO_MASK(USB_EP_command
, enable
)) {
2197 tc_warn("sync_finish: Disabling Isoc EP for epid:%d\n", epid
);
2198 /* The EP was enabled, disable it. */
2199 TxIsocEPList
[epid
].command
&= ~IO_MASK(USB_EP_command
, enable
);
2201 while((*R_DMA_CH8_SUB3_EP
== virt_to_phys(&TxIsocEPList
[epid
])) &&
2204 warn("Timeout while waiting for DMA-TX-Isoc to leave EP for"
2205 " epid:%d\n", epid
);
2211 local_irq_save(flags
);
2213 /* Finish if there is active URB for this endpoint */
2214 if(activeUrbList
[epid
] != NULL
) {
2215 urb
= activeUrbList
[epid
];
2216 urb_priv
= urb
->hcpriv
;
2218 tc_warn("Sync finish %s URB:0x%x[%d] (%s %s epid:%d) status:%d %s\n",
2219 (urb
== activeUrbList
[epid
]) ? "active" : "queued",
2220 (unsigned int)urb
, urb_priv
->urb_num
, str_dir(urb
->pipe
),
2221 str_type(urb
->pipe
), epid
, urb
->status
,
2222 (urb_priv
->later_data
) ? "later-sched" : "");
2224 tc_finish_urb(hcd
, activeUrbList
[epid
], -ENOENT
);
2225 ASSERT(activeUrbList
[epid
] == NULL
);
2228 /* Finish any queued URBs for this endpoint. There won't be any resubmitions
2229 because epid_disabled causes enqueue() to fail for this endpoint */
2230 while((urb
= urb_list_first(epid
)) != NULL
) {
2231 urb_priv
= urb
->hcpriv
;
2234 tc_warn("Sync finish %s URB:0x%x[%d] (%s %s epid:%d) status:%d %s\n",
2235 (urb
== activeUrbList
[epid
]) ? "active" : "queued",
2236 (unsigned int)urb
, urb_priv
->urb_num
, str_dir(urb
->pipe
),
2237 str_type(urb
->pipe
), epid
, urb
->status
,
2238 (urb_priv
->later_data
) ? "later-sched" : "");
2240 tc_finish_urb(hcd
, urb
, -ENOENT
);
2242 epid_state
[epid
].disabled
= 0;
2243 local_irq_restore(flags
);
2246 /* free resources associated with an endpoint (called from hcd_driver) */
2247 static void tc_endpoint_disable(struct usb_hcd
*hcd
,
2248 struct usb_host_endpoint
*ep
) {
2250 /* Only free epid if it has been allocated. We get two endpoint_disable
2251 requests for ctrl endpoints so ignore the second one */
2252 if(ep
->hcpriv
!= NULL
) {
2253 struct crisv10_ep_priv
*ep_priv
= ep
->hcpriv
;
2254 int epid
= ep_priv
->epid
;
2255 tc_warn("endpoint_disable ep:0x%x ep-priv:0x%x (%s) (epid:%d freed)\n",
2256 (unsigned int)ep
, (unsigned int)ep
->hcpriv
,
2257 endpoint_to_str(&(ep
->desc
)), epid
);
2259 tc_sync_finish_epid(hcd
, epid
);
2261 ASSERT(activeUrbList
[epid
] == NULL
);
2262 ASSERT(list_empty(&urb_list
[epid
]));
2266 tc_dbg("endpoint_disable ep:0x%x ep-priv:0x%x (%s)\n", (unsigned int)ep
,
2267 (unsigned int)ep
->hcpriv
, endpoint_to_str(&(ep
->desc
)));
2272 static void tc_finish_urb_later_proc(struct work_struct
* work
) {
2273 unsigned long flags
;
2274 struct urb_later_data
* uld
;
2276 local_irq_save(flags
);
2277 uld
= container_of(work
, struct urb_later_data
, dws
.work
);
2278 if(uld
->urb
== NULL
) {
2279 late_dbg("Later finish of URB = NULL (allready finished)\n");
2281 struct crisv10_urb_priv
* urb_priv
= uld
->urb
->hcpriv
;
2283 if(urb_priv
->urb_num
== uld
->urb_num
) {
2284 late_dbg("Later finish of URB:0x%x[%d]\n", (unsigned int)(uld
->urb
),
2286 if(uld
->status
!= uld
->urb
->status
) {
2287 errno_dbg("Later-finish URB with status:%d, later-status:%d\n",
2288 uld
->urb
->status
, uld
->status
);
2290 if(uld
!= urb_priv
->later_data
) {
2291 panic("Scheduled uld not same as URBs uld\n");
2293 tc_finish_urb(uld
->hcd
, uld
->urb
, uld
->status
);
2295 late_warn("Ignoring later finish of URB:0x%x[%d]"
2296 ", urb_num doesn't match current URB:0x%x[%d]",
2297 (unsigned int)(uld
->urb
), uld
->urb_num
,
2298 (unsigned int)(uld
->urb
), urb_priv
->urb_num
);
2301 local_irq_restore(flags
);
2302 kmem_cache_free(later_data_cache
, uld
);
2305 static void tc_finish_urb_later(struct usb_hcd
*hcd
, struct urb
*urb
,
2307 struct crisv10_urb_priv
*urb_priv
= urb
->hcpriv
;
2308 struct urb_later_data
* uld
;
2312 if(urb_priv
->later_data
!= NULL
) {
2313 /* Later-finish allready scheduled for this URB, just update status to
2314 return when finishing later */
2315 errno_dbg("Later-finish schedule change URB status:%d with new"
2316 " status:%d\n", urb_priv
->later_data
->status
, status
);
2318 urb_priv
->later_data
->status
= status
;
2322 uld
= kmem_cache_alloc(later_data_cache
, GFP_ATOMIC
);
2327 uld
->urb_num
= urb_priv
->urb_num
;
2328 uld
->status
= status
;
2330 INIT_DELAYED_WORK(&uld
->dws
, tc_finish_urb_later_proc
);
2331 urb_priv
->later_data
= uld
;
2333 /* Schedule the finishing of the URB to happen later */
2334 schedule_delayed_work(&uld
->dws
, LATER_TIMER_DELAY
);
2337 static void tc_finish_isoc_urb(struct usb_hcd
*hcd
, struct urb
*urb
,
2340 static void tc_finish_urb(struct usb_hcd
*hcd
, struct urb
*urb
, int status
) {
2341 struct crisv10_hcd
* crisv10_hcd
= hcd_to_crisv10_hcd(hcd
);
2342 struct crisv10_urb_priv
*urb_priv
= urb
->hcpriv
;
2348 ASSERT(urb_priv
!= NULL
);
2349 epid
= urb_priv
->epid
;
2350 urb_num
= urb_priv
->urb_num
;
2352 if(urb
!= activeUrbList
[epid
]) {
2353 if(urb_list_entry(urb
, epid
)) {
2354 /* Remove this URB from the list. Only happens when URB are finished
2355 before having been processed (dequeing) */
2356 urb_list_del(urb
, epid
);
2358 tc_warn("Finishing of URB:0x%x[%d] neither active or in queue for"
2359 " epid:%d\n", (unsigned int)urb
, urb_num
, epid
);
2363 /* Cancel any pending later-finish of this URB */
2364 if(urb_priv
->later_data
) {
2365 urb_priv
->later_data
->urb
= NULL
;
2368 /* For an IN pipe, we always set the actual length, regardless of whether
2369 there was an error or not (which means the device driver can use the data
2371 if(usb_pipein(urb
->pipe
)) {
2372 urb
->actual_length
= urb_priv
->rx_offset
;
2374 /* Set actual_length for OUT urbs also; the USB mass storage driver seems
2376 if (status
== 0 && urb
->status
== -EINPROGRESS
) {
2377 urb
->actual_length
= urb
->transfer_buffer_length
;
2379 /* We wouldn't know of any partial writes if there was an error. */
2380 urb
->actual_length
= 0;
2385 /* URB status mangling */
2386 if(urb
->status
== -EINPROGRESS
) {
2387 /* The USB core hasn't changed the status, let's set our finish status */
2388 urb
->status
= status
;
2390 if ((status
== 0) && (urb
->transfer_flags
& URB_SHORT_NOT_OK
) &&
2391 usb_pipein(urb
->pipe
) &&
2392 (urb
->actual_length
!= urb
->transfer_buffer_length
)) {
2393 /* URB_SHORT_NOT_OK means that short reads (shorter than the endpoint's
2394 max length) is to be treated as an error. */
2395 errno_dbg("Finishing URB:0x%x[%d] with SHORT_NOT_OK flag and short"
2396 " data:%d\n", (unsigned int)urb
, urb_num
,
2397 urb
->actual_length
);
2398 urb
->status
= -EREMOTEIO
;
2401 if(urb_priv
->urb_state
== UNLINK
) {
2402 /* URB has been requested to be unlinked asynchronously */
2403 urb
->status
= -ECONNRESET
;
2404 errno_dbg("Fixing unlink status of URB:0x%x[%d] to:%d\n",
2405 (unsigned int)urb
, urb_num
, urb
->status
);
2408 /* The USB Core wants to signal some error via the URB, pass it through */
2411 /* use completely different finish function for Isoc URBs */
2412 if(usb_pipeisoc(urb
->pipe
)) {
2413 tc_finish_isoc_urb(hcd
, urb
, status
);
2417 /* Do special unlinking of EPs for Intr traffic */
2418 if(usb_pipeint(urb
->pipe
)) {
2419 tc_dma_unlink_intr_urb(urb
);
2422 /* Release allocated bandwidth for periodic transfers */
2423 if(usb_pipeint(urb
->pipe
) || usb_pipeisoc(urb
->pipe
))
2424 crisv10_usb_release_bandwidth(hcd
,
2425 usb_pipetype(urb
->pipe
) == PIPE_ISOCHRONOUS
,
2426 urb_priv
->bandwidth
);
2428 /* This URB is active on EP */
2429 if(urb
== activeUrbList
[epid
]) {
2430 /* We need to fiddle with the toggle bits because the hardware doesn't do
2432 toggle
= etrax_epid_get_toggle(epid
, usb_pipeout(urb
->pipe
));
2433 usb_settoggle(urb
->dev
, usb_pipeendpoint(urb
->pipe
),
2434 usb_pipeout(urb
->pipe
), toggle
);
2436 /* Checks for Ctrl and Bulk EPs */
2437 switch(usb_pipetype(urb
->pipe
)) {
2439 /* Check so Bulk EP realy is disabled before finishing active URB */
2440 ASSERT((TxBulkEPList
[epid
].command
& IO_MASK(USB_EP_command
, enable
)) ==
2441 IO_STATE(USB_EP_command
, enable
, no
));
2442 /* Disable sub-pointer for EP to avoid next tx_interrupt() to
2444 TxBulkEPList
[epid
].sub
= 0;
2445 /* No need to wait for the DMA before changing the next pointer.
2446 The modulo NBR_OF_EPIDS isn't actually necessary, since we will never use
2447 the last one (INVALID_EPID) for actual traffic. */
2448 TxBulkEPList
[epid
].next
=
2449 virt_to_phys(&TxBulkEPList
[(epid
+ 1) % NBR_OF_EPIDS
]);
2452 /* Check so Ctrl EP realy is disabled before finishing active URB */
2453 ASSERT((TxCtrlEPList
[epid
].command
& IO_MASK(USB_EP_command
, enable
)) ==
2454 IO_STATE(USB_EP_command
, enable
, no
));
2455 /* Disable sub-pointer for EP to avoid next tx_interrupt() to
2457 TxCtrlEPList
[epid
].sub
= 0;
2462 /* Free HC-private URB data*/
2463 urb_priv_free(hcd
, urb
);
2466 errno_dbg("finish_urb (URB:0x%x[%d] %s %s) (data:%d) status:%d\n",
2467 (unsigned int)urb
, urb_num
, str_dir(urb
->pipe
),
2468 str_type(urb
->pipe
), urb
->actual_length
, urb
->status
);
2470 tc_dbg("finish_urb (URB:0x%x[%d] %s %s) (data:%d) status:%d\n",
2471 (unsigned int)urb
, urb_num
, str_dir(urb
->pipe
),
2472 str_type(urb
->pipe
), urb
->actual_length
, urb
->status
);
2475 /* If we just finished an active URB, clear active pointer. */
2476 if (urb
== activeUrbList
[epid
]) {
2477 /* Make URB not active on EP anymore */
2478 activeUrbList
[epid
] = NULL
;
2480 if(urb
->status
== 0) {
2481 /* URB finished sucessfully, process queue to see if there are any more
2482 URBs waiting before we call completion function.*/
2483 if(crisv10_hcd
->running
) {
2484 /* Only process queue if USB controller is running */
2485 tc_dma_process_queue(epid
);
2487 tc_warn("No processing of queue for epid:%d, USB Controller not"
2488 " running\n", epid
);
2493 /* Hand the URB from HCD to its USB device driver, using its completion
2495 usb_hcd_giveback_urb (hcd
, urb
, status
);
2497 /* Check the queue once more if the URB returned with error, because we
2498 didn't do it before the completion function because the specification
2499 states that the queue should not restart until all it's unlinked
2500 URBs have been fully retired, with the completion functions run */
2501 if(crisv10_hcd
->running
) {
2502 /* Only process queue if USB controller is running */
2503 tc_dma_process_queue(epid
);
2505 tc_warn("No processing of queue for epid:%d, USB Controller not running\n",
2512 static void tc_finish_isoc_urb(struct usb_hcd
*hcd
, struct urb
*urb
,
2514 struct crisv10_urb_priv
*urb_priv
= urb
->hcpriv
;
2516 volatile int timeout
= 10000;
2520 epid
= urb_priv
->epid
;
2522 ASSERT(usb_pipeisoc(urb
->pipe
));
2524 /* Set that all isoc packets have status and length set before
2525 completing the urb. */
2526 for (i
= urb_priv
->isoc_packet_counter
; i
< urb
->number_of_packets
; i
++){
2527 urb
->iso_frame_desc
[i
].actual_length
= 0;
2528 urb
->iso_frame_desc
[i
].status
= -EPROTO
;
2531 /* Check if the URB is currently active (done or error) */
2532 if(urb
== activeUrbList
[epid
]) {
2533 /* Check if there are another In Isoc URB queued for this epid */
2534 if (!list_empty(&urb_list
[epid
])&& !epid_state
[epid
].disabled
) {
2535 /* Move it from queue to active and mark it started so Isoc transfers
2536 won't be interrupted.
2537 All Isoc URBs data transfers are already added to DMA lists so we
2538 don't have to insert anything in DMA lists here. */
2539 activeUrbList
[epid
] = urb_list_first(epid
);
2540 ((struct crisv10_urb_priv
*)(activeUrbList
[epid
]->hcpriv
))->urb_state
=
2542 urb_list_del(activeUrbList
[epid
], epid
);
2545 errno_dbg("finish_isoc_urb (URB:0x%x[%d] %s %s) (%d of %d packets)"
2546 " status:%d, new waiting URB:0x%x[%d]\n",
2547 (unsigned int)urb
, urb_priv
->urb_num
, str_dir(urb
->pipe
),
2548 str_type(urb
->pipe
), urb_priv
->isoc_packet_counter
,
2549 urb
->number_of_packets
, urb
->status
,
2550 (unsigned int)activeUrbList
[epid
],
2551 ((struct crisv10_urb_priv
*)(activeUrbList
[epid
]->hcpriv
))->urb_num
);
2554 } else { /* No other URB queued for this epid */
2556 errno_dbg("finish_isoc_urb (URB:0x%x[%d] %s %s) (%d of %d packets)"
2557 " status:%d, no new URB waiting\n",
2558 (unsigned int)urb
, urb_priv
->urb_num
, str_dir(urb
->pipe
),
2559 str_type(urb
->pipe
), urb_priv
->isoc_packet_counter
,
2560 urb
->number_of_packets
, urb
->status
);
2563 /* Check if EP is still enabled, then shut it down. */
2564 if (TxIsocEPList
[epid
].command
& IO_MASK(USB_EP_command
, enable
)) {
2565 isoc_dbg("Isoc EP enabled for epid:%d, disabling it\n", epid
);
2567 /* Should only occur for In Isoc EPs where SB isn't consumed. */
2568 ASSERT(usb_pipein(urb
->pipe
));
2570 /* Disable it and wait for it to stop */
2571 TxIsocEPList
[epid
].command
&= ~IO_MASK(USB_EP_command
, enable
);
2573 /* Ah, the luxury of busy-wait. */
2574 while((*R_DMA_CH8_SUB3_EP
== virt_to_phys(&TxIsocEPList
[epid
])) &&
2577 warn("Timeout while waiting for DMA-TX-Isoc to leave EP for epid:%d\n", epid
);
2581 /* Unlink SB to say that epid is finished. */
2582 TxIsocEPList
[epid
].sub
= 0;
2583 TxIsocEPList
[epid
].hw_len
= 0;
2585 /* No URB active for EP anymore */
2586 activeUrbList
[epid
] = NULL
;
2588 } else { /* Finishing of not active URB (queued up with SBs thought) */
2589 isoc_warn("finish_isoc_urb (URB:0x%x %s) (%d of %d packets) status:%d,"
2590 " SB queued but not active\n",
2591 (unsigned int)urb
, str_dir(urb
->pipe
),
2592 urb_priv
->isoc_packet_counter
, urb
->number_of_packets
,
2594 if(usb_pipeout(urb
->pipe
)) {
2595 /* Finishing of not yet active Out Isoc URB needs unlinking of SBs. */
2596 struct USB_SB_Desc
*iter_sb
, *prev_sb
, *next_sb
;
2598 iter_sb
= TxIsocEPList
[epid
].sub
?
2599 phys_to_virt(TxIsocEPList
[epid
].sub
) : 0;
2602 /* SB that is linked before this URBs first SB */
2603 while (iter_sb
&& (iter_sb
!= urb_priv
->first_sb
)) {
2605 iter_sb
= iter_sb
->next
? phys_to_virt(iter_sb
->next
) : 0;
2609 /* Unlink of the URB currently being transmitted. */
2611 iter_sb
= TxIsocEPList
[epid
].sub
? phys_to_virt(TxIsocEPList
[epid
].sub
) : 0;
2614 while (iter_sb
&& (iter_sb
!= urb_priv
->last_sb
)) {
2615 iter_sb
= iter_sb
->next
? phys_to_virt(iter_sb
->next
) : 0;
2619 next_sb
= iter_sb
->next
? phys_to_virt(iter_sb
->next
) : 0;
2621 /* This should only happen if the DMA has completed
2622 processing the SB list for this EP while interrupts
2624 isoc_dbg("Isoc urb not found, already sent?\n");
2628 prev_sb
->next
= next_sb
? virt_to_phys(next_sb
) : 0;
2630 TxIsocEPList
[epid
].sub
= next_sb
? virt_to_phys(next_sb
) : 0;
2635 /* Free HC-private URB data*/
2636 bandwidth
= urb_priv
->bandwidth
;
2637 urb_priv_free(hcd
, urb
);
2639 crisv10_usb_release_bandwidth(hcd
, usb_pipeisoc(urb
->pipe
), bandwidth
);
2641 /* Hand the URB from HCD to its USB device driver, using its completion
2643 usb_hcd_giveback_urb (hcd
, urb
, status
);
2646 static __u32 urb_num
= 0;
2648 /* allocate and initialize URB private data */
2649 static int urb_priv_create(struct usb_hcd
*hcd
, struct urb
*urb
, int epid
,
2651 struct crisv10_urb_priv
*urb_priv
;
2653 urb_priv
= kmalloc(sizeof *urb_priv
, mem_flags
);
2656 memset(urb_priv
, 0, sizeof *urb_priv
);
2658 urb_priv
->epid
= epid
;
2659 urb_priv
->urb_state
= NOT_STARTED
;
2661 urb
->hcpriv
= urb_priv
;
2662 /* Assign URB a sequence number, and increment counter */
2663 urb_priv
->urb_num
= urb_num
;
2665 urb_priv
->bandwidth
= 0;
2669 /* free URB private data */
2670 static void urb_priv_free(struct usb_hcd
*hcd
, struct urb
*urb
) {
2672 struct crisv10_urb_priv
*urb_priv
= urb
->hcpriv
;
2673 ASSERT(urb_priv
!= 0);
2675 /* Check it has any SBs linked that needs to be freed*/
2676 if(urb_priv
->first_sb
!= NULL
) {
2677 struct USB_SB_Desc
*next_sb
, *first_sb
, *last_sb
;
2679 first_sb
= urb_priv
->first_sb
;
2680 last_sb
= urb_priv
->last_sb
;
2682 while(first_sb
!= last_sb
) {
2683 next_sb
= (struct USB_SB_Desc
*)phys_to_virt(first_sb
->next
);
2684 kmem_cache_free(usb_desc_cache
, first_sb
);
2688 kmem_cache_free(usb_desc_cache
, last_sb
);
2692 /* Check if it has any EPs in its Intr pool that also needs to be freed */
2693 if(urb_priv
->intr_ep_pool_length
> 0) {
2694 for(i
= 0; i
< urb_priv
->intr_ep_pool_length
; i
++) {
2695 kfree(urb_priv
->intr_ep_pool
[i
]);
2698 tc_dbg("Freed %d EPs from URB:0x%x EP pool\n",
2699 urb_priv->intr_ep_pool_length, (unsigned int)urb);
2707 static int ep_priv_create(struct usb_host_endpoint
*ep
, int mem_flags
) {
2708 struct crisv10_ep_priv
*ep_priv
;
2710 ep_priv
= kmalloc(sizeof *ep_priv
, mem_flags
);
2713 memset(ep_priv
, 0, sizeof *ep_priv
);
2715 ep
->hcpriv
= ep_priv
;
2719 static void ep_priv_free(struct usb_host_endpoint
*ep
) {
2720 struct crisv10_ep_priv
*ep_priv
= ep
->hcpriv
;
2727 * usb_check_bandwidth():
2729 * old_alloc is from host_controller->bandwidth_allocated in microseconds;
2730 * bustime is from calc_bus_time(), but converted to microseconds.
2732 * returns <bustime in us> if successful,
2733 * or -ENOSPC if bandwidth request fails.
2736 * This initial implementation does not use Endpoint.bInterval
2737 * in managing bandwidth allocation.
2738 * It probably needs to be expanded to use Endpoint.bInterval.
2739 * This can be done as a later enhancement (correction).
2741 * This will also probably require some kind of
2742 * frame allocation tracking...meaning, for example,
2743 * that if multiple drivers request interrupts every 10 USB frames,
2744 * they don't all have to be allocated at
2745 * frame numbers N, N+10, N+20, etc. Some of them could be at
2746 * N+11, N+21, N+31, etc., and others at
2747 * N+12, N+22, N+32, etc.
2749 * Similarly for isochronous transfers...
2751 * Individual HCDs can schedule more directly ... this logic
2752 * is not correct for high speed transfers.
2754 static int crisv10_usb_check_bandwidth(
2755 struct usb_device
*dev
,
2758 unsigned int pipe
= urb
->pipe
;
2760 int is_in
= usb_pipein (pipe
);
2761 int is_iso
= usb_pipeisoc (pipe
);
2762 int old_alloc
= dev
->bus
->bandwidth_allocated
;
2765 bustime
= NS_TO_US (usb_calc_bus_time (dev
->speed
, is_in
, is_iso
,
2766 usb_maxpacket (dev
, pipe
, !is_in
)));
2768 bustime
/= urb
->number_of_packets
;
2770 new_alloc
= old_alloc
+ (int) bustime
;
2771 if (new_alloc
> FRAME_TIME_MAX_USECS_ALLOC
) {
2772 dev_dbg (&dev
->dev
, "usb_check_bandwidth FAILED: %d + %ld = %d usec\n",
2773 old_alloc
, bustime
, new_alloc
);
2774 bustime
= -ENOSPC
; /* report error */
2781 * usb_claim_bandwidth - records bandwidth for a periodic transfer
2782 * @dev: source/target of request
2783 * @urb: request (urb->dev == dev)
2784 * @bustime: bandwidth consumed, in (average) microseconds per frame
2785 * @isoc: true iff the request is isochronous
2787 * HCDs are expected not to overcommit periodic bandwidth, and to record such
2788 * reservations whenever endpoints are added to the periodic schedule.
2790 * FIXME averaging per-frame is suboptimal. Better to sum over the HCD's
2791 * entire periodic schedule ... 32 frames for OHCI, 1024 for UHCI, settable
2792 * for EHCI (256/512/1024 frames, default 1024) and have the bus expose how
2793 * large its periodic schedule is.
2795 static void crisv10_usb_claim_bandwidth(
2796 struct usb_device
*dev
,
2797 struct urb
*urb
, int bustime
, int isoc
)
2799 dev
->bus
->bandwidth_allocated
+= bustime
;
2801 dev
->bus
->bandwidth_isoc_reqs
++;
2803 dev
->bus
->bandwidth_int_reqs
++;
2804 struct crisv10_urb_priv
*urb_priv
;
2805 urb_priv
= urb
->hcpriv
;
2806 urb_priv
->bandwidth
= bustime
;
2810 * usb_release_bandwidth - reverses effect of usb_claim_bandwidth()
2811 * @hcd: host controller
2812 * @isoc: true iff the request is isochronous
2813 * @bandwidth: bandwidth returned
2815 * This records that previously allocated bandwidth has been released.
2816 * Bandwidth is released when endpoints are removed from the host controller's
2817 * periodic schedule.
2819 static void crisv10_usb_release_bandwidth(
2820 struct usb_hcd
*hcd
,
2824 hcd_to_bus(hcd
)->bandwidth_allocated
-= bandwidth
;
2826 hcd_to_bus(hcd
)->bandwidth_isoc_reqs
--;
2828 hcd_to_bus(hcd
)->bandwidth_int_reqs
--;
2832 /* EPID handling functions, managing EP-list in Etrax through wrappers */
2833 /* ------------------------------------------------------------------- */
2835 /* Sets up a new EPID for an endpoint or returns existing if found */
2836 static int tc_setup_epid(struct usb_host_endpoint
*ep
, struct urb
*urb
,
2839 char devnum
, endpoint
, out_traffic
, slow
;
2842 struct crisv10_ep_priv
*ep_priv
= ep
->hcpriv
;
2846 /* Check if a valid epid already is setup for this endpoint */
2847 if(ep_priv
!= NULL
) {
2848 return ep_priv
->epid
;
2851 /* We must find and initiate a new epid for this urb. */
2852 epid
= tc_allocate_epid();
2855 /* Failed to allocate a new epid. */
2860 /* We now have a new epid to use. Claim it. */
2861 epid_state
[epid
].inuse
= 1;
2863 /* Init private data for new endpoint */
2864 if(ep_priv_create(ep
, mem_flags
) != 0) {
2867 ep_priv
= ep
->hcpriv
;
2868 ep_priv
->epid
= epid
;
2870 devnum
= usb_pipedevice(urb
->pipe
);
2871 endpoint
= usb_pipeendpoint(urb
->pipe
);
2872 slow
= (urb
->dev
->speed
== USB_SPEED_LOW
);
2873 maxlen
= usb_maxpacket(urb
->dev
, urb
->pipe
, usb_pipeout(urb
->pipe
));
2875 if (usb_pipetype(urb
->pipe
) == PIPE_CONTROL
) {
2876 /* We want both IN and OUT control traffic to be put on the same
2880 out_traffic
= usb_pipeout(urb
->pipe
);
2883 if (usb_pipetype(urb
->pipe
) == PIPE_ISOCHRONOUS
) {
2884 epid_data
= IO_STATE(R_USB_EPT_DATA_ISO
, valid
, yes
) |
2885 /* FIXME: Change any to the actual port? */
2886 IO_STATE(R_USB_EPT_DATA_ISO
, port
, any
) |
2887 IO_FIELD(R_USB_EPT_DATA_ISO
, max_len
, maxlen
) |
2888 IO_FIELD(R_USB_EPT_DATA_ISO
, ep
, endpoint
) |
2889 IO_FIELD(R_USB_EPT_DATA_ISO
, dev
, devnum
);
2890 etrax_epid_iso_set(epid
, epid_data
);
2892 epid_data
= IO_STATE(R_USB_EPT_DATA
, valid
, yes
) |
2893 IO_FIELD(R_USB_EPT_DATA
, low_speed
, slow
) |
2894 /* FIXME: Change any to the actual port? */
2895 IO_STATE(R_USB_EPT_DATA
, port
, any
) |
2896 IO_FIELD(R_USB_EPT_DATA
, max_len
, maxlen
) |
2897 IO_FIELD(R_USB_EPT_DATA
, ep
, endpoint
) |
2898 IO_FIELD(R_USB_EPT_DATA
, dev
, devnum
);
2899 etrax_epid_set(epid
, epid_data
);
2902 epid_state
[epid
].out_traffic
= out_traffic
;
2903 epid_state
[epid
].type
= usb_pipetype(urb
->pipe
);
2905 tc_warn("Setting up ep:0x%x epid:%d (addr:%d endp:%d max_len:%d %s %s %s)\n",
2906 (unsigned int)ep
, epid
, devnum
, endpoint
, maxlen
,
2907 str_type(urb
->pipe
), out_traffic
? "out" : "in",
2908 slow
? "low" : "full");
2910 /* Enable Isoc eof interrupt if we set up the first Isoc epid */
2911 if(usb_pipeisoc(urb
->pipe
)) {
2912 isoc_epid_counter
++;
2913 if(isoc_epid_counter
== 1) {
2914 isoc_warn("Enabled Isoc eof interrupt\n");
2915 *R_USB_IRQ_MASK_SET
= IO_STATE(R_USB_IRQ_MASK_SET
, iso_eof
, set
);
2923 static void tc_free_epid(struct usb_host_endpoint
*ep
) {
2924 unsigned long flags
;
2925 struct crisv10_ep_priv
*ep_priv
= ep
->hcpriv
;
2927 volatile int timeout
= 10000;
2931 if (ep_priv
== NULL
) {
2932 tc_warn("Trying to free unused epid on ep:0x%x\n", (unsigned int)ep
);
2937 epid
= ep_priv
->epid
;
2939 /* Disable Isoc eof interrupt if we free the last Isoc epid */
2940 if(epid_isoc(epid
)) {
2941 ASSERT(isoc_epid_counter
> 0);
2942 isoc_epid_counter
--;
2943 if(isoc_epid_counter
== 0) {
2944 *R_USB_IRQ_MASK_CLR
= IO_STATE(R_USB_IRQ_MASK_CLR
, iso_eof
, clr
);
2945 isoc_warn("Disabled Isoc eof interrupt\n");
2949 /* Take lock manualy instead of in epid_x_x wrappers,
2950 because we need to be polling here */
2951 spin_lock_irqsave(&etrax_epid_lock
, flags
);
2953 *R_USB_EPT_INDEX
= IO_FIELD(R_USB_EPT_INDEX
, value
, epid
);
2955 while((*R_USB_EPT_DATA
& IO_MASK(R_USB_EPT_DATA
, hold
)) &&
2958 warn("Timeout while waiting for epid:%d to drop hold\n", epid
);
2960 /* This will, among other things, set the valid field to 0. */
2961 *R_USB_EPT_DATA
= 0;
2962 spin_unlock_irqrestore(&etrax_epid_lock
, flags
);
2964 /* Free resource in software state info list */
2965 epid_state
[epid
].inuse
= 0;
2967 /* Free private endpoint data */
2973 static int tc_allocate_epid(void) {
2976 for (i
= 0; i
< NBR_OF_EPIDS
; i
++) {
2977 if (!epid_inuse(i
)) {
2983 tc_warn("Found no free epids\n");
2989 /* Wrappers around the list functions (include/linux/list.h). */
2990 /* ---------------------------------------------------------- */
2991 static inline int __urb_list_empty(int epid
) {
2993 retval
= list_empty(&urb_list
[epid
]);
2997 /* Returns first urb for this epid, or NULL if list is empty. */
2998 static inline struct urb
*urb_list_first(int epid
) {
2999 unsigned long flags
;
3000 struct urb
*first_urb
= 0;
3001 spin_lock_irqsave(&urb_list_lock
, flags
);
3002 if (!__urb_list_empty(epid
)) {
3003 /* Get the first urb (i.e. head->next). */
3004 urb_entry_t
*urb_entry
= list_entry((&urb_list
[epid
])->next
, urb_entry_t
, list
);
3005 first_urb
= urb_entry
->urb
;
3007 spin_unlock_irqrestore(&urb_list_lock
, flags
);
3011 /* Adds an urb_entry last in the list for this epid. */
3012 static inline void urb_list_add(struct urb
*urb
, int epid
, int mem_flags
) {
3013 unsigned long flags
;
3014 urb_entry_t
*urb_entry
= (urb_entry_t
*)kmalloc(sizeof(urb_entry_t
), mem_flags
);
3017 urb_entry
->urb
= urb
;
3018 spin_lock_irqsave(&urb_list_lock
, flags
);
3019 list_add_tail(&urb_entry
->list
, &urb_list
[epid
]);
3020 spin_unlock_irqrestore(&urb_list_lock
, flags
);
3023 /* Search through the list for an element that contains this urb. (The list
3024 is expected to be short and the one we are about to delete will often be
3025 the first in the list.)
3026 Should be protected by spin_locks in calling function */
3027 static inline urb_entry_t
*__urb_list_entry(struct urb
*urb
, int epid
) {
3028 struct list_head
*entry
;
3029 struct list_head
*tmp
;
3030 urb_entry_t
*urb_entry
;
3032 list_for_each_safe(entry
, tmp
, &urb_list
[epid
]) {
3033 urb_entry
= list_entry(entry
, urb_entry_t
, list
);
3035 ASSERT(urb_entry
->urb
);
3037 if (urb_entry
->urb
== urb
) {
3044 /* Same function as above but for global use. Protects list by spinlock */
3045 static inline urb_entry_t
*urb_list_entry(struct urb
*urb
, int epid
) {
3046 unsigned long flags
;
3047 urb_entry_t
*urb_entry
;
3048 spin_lock_irqsave(&urb_list_lock
, flags
);
3049 urb_entry
= __urb_list_entry(urb
, epid
);
3050 spin_unlock_irqrestore(&urb_list_lock
, flags
);
3054 /* Delete an urb from the list. */
3055 static inline void urb_list_del(struct urb
*urb
, int epid
) {
3056 unsigned long flags
;
3057 urb_entry_t
*urb_entry
;
3059 /* Delete entry and free. */
3060 spin_lock_irqsave(&urb_list_lock
, flags
);
3061 urb_entry
= __urb_list_entry(urb
, epid
);
3064 list_del(&urb_entry
->list
);
3065 spin_unlock_irqrestore(&urb_list_lock
, flags
);
3069 /* Move an urb to the end of the list. */
3070 static inline void urb_list_move_last(struct urb
*urb
, int epid
) {
3071 unsigned long flags
;
3072 urb_entry_t
*urb_entry
;
3074 spin_lock_irqsave(&urb_list_lock
, flags
);
3075 urb_entry
= __urb_list_entry(urb
, epid
);
3078 list_del(&urb_entry
->list
);
3079 list_add_tail(&urb_entry
->list
, &urb_list
[epid
]);
3080 spin_unlock_irqrestore(&urb_list_lock
, flags
);
3083 /* Get the next urb in the list. */
3084 static inline struct urb
*urb_list_next(struct urb
*urb
, int epid
) {
3085 unsigned long flags
;
3086 urb_entry_t
*urb_entry
;
3088 spin_lock_irqsave(&urb_list_lock
, flags
);
3089 urb_entry
= __urb_list_entry(urb
, epid
);
3092 if (urb_entry
->list
.next
!= &urb_list
[epid
]) {
3093 struct list_head
*elem
= urb_entry
->list
.next
;
3094 urb_entry
= list_entry(elem
, urb_entry_t
, list
);
3095 spin_unlock_irqrestore(&urb_list_lock
, flags
);
3096 return urb_entry
->urb
;
3098 spin_unlock_irqrestore(&urb_list_lock
, flags
);
3103 struct USB_EP_Desc
* create_ep(int epid
, struct USB_SB_Desc
* sb_desc
,
3105 struct USB_EP_Desc
*ep_desc
;
3106 ep_desc
= (struct USB_EP_Desc
*) kmem_cache_alloc(usb_desc_cache
, mem_flags
);
3109 memset(ep_desc
, 0, sizeof(struct USB_EP_Desc
));
3111 ep_desc
->hw_len
= 0;
3112 ep_desc
->command
= (IO_FIELD(USB_EP_command
, epid
, epid
) |
3113 IO_STATE(USB_EP_command
, enable
, yes
));
3114 if(sb_desc
== NULL
) {
3117 ep_desc
->sub
= virt_to_phys(sb_desc
);
3127 #define CMD_EOL IO_STATE(USB_SB_command, eol, yes)
3128 #define CMD_INTR IO_STATE(USB_SB_command, intr, yes)
3129 #define CMD_FULL IO_STATE(USB_SB_command, full, yes)
3131 /* Allocation and setup of a generic SB. Used to create SETUP, OUT and ZOUT
3132 SBs. Also used by create_sb_in() to avoid same allocation procedure at two
3134 struct USB_SB_Desc
* create_sb(struct USB_SB_Desc
* sb_prev
, int tt
, void* data
,
3135 int datalen
, int mem_flags
) {
3136 struct USB_SB_Desc
*sb_desc
;
3137 sb_desc
= (struct USB_SB_Desc
*)kmem_cache_alloc(usb_desc_cache
, mem_flags
);
3140 memset(sb_desc
, 0, sizeof(struct USB_SB_Desc
));
3142 sb_desc
->command
= IO_FIELD(USB_SB_command
, tt
, tt
) |
3143 IO_STATE(USB_SB_command
, eot
, yes
);
3145 sb_desc
->sw_len
= datalen
;