[etrax] fix a bug inside usb driver, thanks to Jesper Nilsson
[openwrt/svn-archive/archive.git] / target / linux / etrax / files / drivers / usb / host / hc-crisv10.c
1 /*
2 *
3 * ETRAX 100LX USB Host Controller Driver
4 *
5 * Copyright (C) 2005 - 2008 Axis Communications AB
6 *
7 * Author: Konrad Eriksson <konrad.eriksson@axis.se>
8 *
9 */
10
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/moduleparam.h>
15 #include <linux/spinlock.h>
16 #include <linux/usb.h>
17 #include <linux/platform_device.h>
18
19 #include <asm/io.h>
20 #include <asm/irq.h>
21 #include <asm/arch/dma.h>
22 #include <asm/arch/io_interface_mux.h>
23
24 #include "../core/hcd.h"
25 #include "../core/hub.h"
26 #include "hc-crisv10.h"
27 #include "hc-cris-dbg.h"
28
29
30 /***************************************************************************/
31 /***************************************************************************/
32 /* Host Controller settings */
33 /***************************************************************************/
34 /***************************************************************************/
35
36 #define VERSION "1.00-openwrt_diff"
37 #define COPYRIGHT "(c) 2005, 2006 Axis Communications AB"
38 #define DESCRIPTION "ETRAX 100LX USB Host Controller"
39
40 #define ETRAX_USB_HC_IRQ USB_HC_IRQ_NBR
41 #define ETRAX_USB_RX_IRQ USB_DMA_RX_IRQ_NBR
42 #define ETRAX_USB_TX_IRQ USB_DMA_TX_IRQ_NBR
43
44 /* Number of physical ports in Etrax 100LX */
45 #define USB_ROOT_HUB_PORTS 2
46
47 const char hc_name[] = "hc-crisv10";
48 const char product_desc[] = DESCRIPTION;
49
50 /* The number of epids is, among other things, used for pre-allocating
51 ctrl, bulk and isoc EP descriptors (one for each epid).
52 Assumed to be > 1 when initiating the DMA lists. */
53 #define NBR_OF_EPIDS 32
54
55 /* Support interrupt traffic intervals up to 128 ms. */
56 #define MAX_INTR_INTERVAL 128
57
58 /* If periodic traffic (intr or isoc) is to be used, then one entry in the EP
59 table must be "invalid". By this we mean that we shouldn't care about epid
60 attentions for this epid, or at least handle them differently from epid
61 attentions for "valid" epids. This define determines which one to use
62 (don't change it). */
63 #define INVALID_EPID 31
64 /* A special epid for the bulk dummys. */
65 #define DUMMY_EPID 30
66
67 /* Module settings */
68
69 MODULE_DESCRIPTION(DESCRIPTION);
70 MODULE_LICENSE("GPL");
71 MODULE_AUTHOR("Konrad Eriksson <konrad.eriksson@axis.se>");
72
73
74 /* Module parameters */
75
76 /* 0 = No ports enabled
77 1 = Only port 1 enabled (on board ethernet on devboard)
78 2 = Only port 2 enabled (external connector on devboard)
79 3 = Both ports enabled
80 */
81 static unsigned int ports = 3;
82 module_param(ports, uint, S_IRUGO);
83 MODULE_PARM_DESC(ports, "Bitmask indicating USB ports to use");
84
85
86 /***************************************************************************/
87 /***************************************************************************/
88 /* Shared global variables for this module */
89 /***************************************************************************/
90 /***************************************************************************/
91
92 /* EP descriptor lists for non period transfers. Must be 32-bit aligned. */
93 static volatile struct USB_EP_Desc TxBulkEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
94
95 static volatile struct USB_EP_Desc TxCtrlEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
96
97 /* EP descriptor lists for period transfers. Must be 32-bit aligned. */
98 static volatile struct USB_EP_Desc TxIntrEPList[MAX_INTR_INTERVAL] __attribute__ ((aligned (4)));
99 static volatile struct USB_SB_Desc TxIntrSB_zout __attribute__ ((aligned (4)));
100
101 static volatile struct USB_EP_Desc TxIsocEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
102 static volatile struct USB_SB_Desc TxIsocSB_zout __attribute__ ((aligned (4)));
103
104 static volatile struct USB_SB_Desc TxIsocSBList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
105
106 /* After each enabled bulk EP IN we put two disabled EP descriptors with the eol flag set,
107 causing the DMA to stop the DMA channel. The first of these two has the intr flag set, which
108 gives us a dma8_sub0_descr interrupt. When we receive this, we advance the DMA one step in the
109 EP list and then restart the bulk channel, thus forcing a switch between bulk EP descriptors
110 in each frame. */
111 static volatile struct USB_EP_Desc TxBulkDummyEPList[NBR_OF_EPIDS][2] __attribute__ ((aligned (4)));
112
113 /* List of URB pointers, where each points to the active URB for a epid.
114 For Bulk, Ctrl and Intr this means which URB that currently is added to
115 DMA lists (Isoc URBs are all directly added to DMA lists). As soon as
116 URB has completed is the queue examined and the first URB in queue is
117 removed and moved to the activeUrbList while its state change to STARTED and
118 its transfer(s) gets added to DMA list (exception Isoc where URBs enter
119 state STARTED directly and added transfers added to DMA lists). */
120 static struct urb *activeUrbList[NBR_OF_EPIDS];
121
122 /* Additional software state info for each epid */
123 static struct etrax_epid epid_state[NBR_OF_EPIDS];
124
125 /* Timer handles for bulk traffic timer used to avoid DMA bug where DMA stops
126 even if there is new data waiting to be processed */
127 static struct timer_list bulk_start_timer = TIMER_INITIALIZER(NULL, 0, 0);
128 static struct timer_list bulk_eot_timer = TIMER_INITIALIZER(NULL, 0, 0);
129
130 /* We want the start timer to expire before the eot timer, because the former
131 might start traffic, thus making it unnecessary for the latter to time
132 out. */
133 #define BULK_START_TIMER_INTERVAL (HZ/50) /* 20 ms */
134 #define BULK_EOT_TIMER_INTERVAL (HZ/16) /* 60 ms */
135
136 /* Delay before a URB completion happen when it's scheduled to be delayed */
137 #define LATER_TIMER_DELAY (HZ/50) /* 20 ms */
138
139 /* Simplifying macros for checking software state info of a epid */
140 /* ----------------------------------------------------------------------- */
141 #define epid_inuse(epid) epid_state[epid].inuse
142 #define epid_out_traffic(epid) epid_state[epid].out_traffic
143 #define epid_isoc(epid) (epid_state[epid].type == PIPE_ISOCHRONOUS ? 1 : 0)
144 #define epid_intr(epid) (epid_state[epid].type == PIPE_INTERRUPT ? 1 : 0)
145
146
147 /***************************************************************************/
148 /***************************************************************************/
149 /* DEBUG FUNCTIONS */
150 /***************************************************************************/
151 /***************************************************************************/
152 /* Note that these functions are always available in their "__" variants,
153 for use in error situations. The "__" missing variants are controlled by
154 the USB_DEBUG_DESC/USB_DEBUG_URB macros. */
155 static void __dump_urb(struct urb* purb)
156 {
157 struct crisv10_urb_priv *urb_priv = purb->hcpriv;
158 int urb_num = -1;
159 if(urb_priv) {
160 urb_num = urb_priv->urb_num;
161 }
162 printk("\nURB:0x%x[%d]\n", (unsigned int)purb, urb_num);
163 printk("dev :0x%08lx\n", (unsigned long)purb->dev);
164 printk("pipe :0x%08x\n", purb->pipe);
165 printk("status :%d\n", purb->status);
166 printk("transfer_flags :0x%08x\n", purb->transfer_flags);
167 printk("transfer_buffer :0x%08lx\n", (unsigned long)purb->transfer_buffer);
168 printk("transfer_buffer_length:%d\n", purb->transfer_buffer_length);
169 printk("actual_length :%d\n", purb->actual_length);
170 printk("setup_packet :0x%08lx\n", (unsigned long)purb->setup_packet);
171 printk("start_frame :%d\n", purb->start_frame);
172 printk("number_of_packets :%d\n", purb->number_of_packets);
173 printk("interval :%d\n", purb->interval);
174 printk("error_count :%d\n", purb->error_count);
175 printk("context :0x%08lx\n", (unsigned long)purb->context);
176 printk("complete :0x%08lx\n\n", (unsigned long)purb->complete);
177 }
178
179 static void __dump_in_desc(volatile struct USB_IN_Desc *in)
180 {
181 printk("\nUSB_IN_Desc at 0x%08lx\n", (unsigned long)in);
182 printk(" sw_len : 0x%04x (%d)\n", in->sw_len, in->sw_len);
183 printk(" command : 0x%04x\n", in->command);
184 printk(" next : 0x%08lx\n", in->next);
185 printk(" buf : 0x%08lx\n", in->buf);
186 printk(" hw_len : 0x%04x (%d)\n", in->hw_len, in->hw_len);
187 printk(" status : 0x%04x\n\n", in->status);
188 }
189
190 static void __dump_sb_desc(volatile struct USB_SB_Desc *sb)
191 {
192 char tt = (sb->command & 0x30) >> 4;
193 char *tt_string;
194
195 switch (tt) {
196 case 0:
197 tt_string = "zout";
198 break;
199 case 1:
200 tt_string = "in";
201 break;
202 case 2:
203 tt_string = "out";
204 break;
205 case 3:
206 tt_string = "setup";
207 break;
208 default:
209 tt_string = "unknown (weird)";
210 }
211
212 printk(" USB_SB_Desc at 0x%08lx ", (unsigned long)sb);
213 printk(" command:0x%04x (", sb->command);
214 printk("rem:%d ", (sb->command & 0x3f00) >> 8);
215 printk("full:%d ", (sb->command & 0x40) >> 6);
216 printk("tt:%d(%s) ", tt, tt_string);
217 printk("intr:%d ", (sb->command & 0x8) >> 3);
218 printk("eot:%d ", (sb->command & 0x2) >> 1);
219 printk("eol:%d)", sb->command & 0x1);
220 printk(" sw_len:0x%04x(%d)", sb->sw_len, sb->sw_len);
221 printk(" next:0x%08lx", sb->next);
222 printk(" buf:0x%08lx\n", sb->buf);
223 }
224
225
226 static void __dump_ep_desc(volatile struct USB_EP_Desc *ep)
227 {
228 printk("USB_EP_Desc at 0x%08lx ", (unsigned long)ep);
229 printk(" command:0x%04x (", ep->command);
230 printk("ep_id:%d ", (ep->command & 0x1f00) >> 8);
231 printk("enable:%d ", (ep->command & 0x10) >> 4);
232 printk("intr:%d ", (ep->command & 0x8) >> 3);
233 printk("eof:%d ", (ep->command & 0x2) >> 1);
234 printk("eol:%d)", ep->command & 0x1);
235 printk(" hw_len:0x%04x(%d)", ep->hw_len, ep->hw_len);
236 printk(" next:0x%08lx", ep->next);
237 printk(" sub:0x%08lx\n", ep->sub);
238 }
239
240 static inline void __dump_ep_list(int pipe_type)
241 {
242 volatile struct USB_EP_Desc *ep;
243 volatile struct USB_EP_Desc *first_ep;
244 volatile struct USB_SB_Desc *sb;
245
246 switch (pipe_type)
247 {
248 case PIPE_BULK:
249 first_ep = &TxBulkEPList[0];
250 break;
251 case PIPE_CONTROL:
252 first_ep = &TxCtrlEPList[0];
253 break;
254 case PIPE_INTERRUPT:
255 first_ep = &TxIntrEPList[0];
256 break;
257 case PIPE_ISOCHRONOUS:
258 first_ep = &TxIsocEPList[0];
259 break;
260 default:
261 return;
262 }
263 ep = first_ep;
264
265 printk("\n\nDumping EP list...\n\n");
266
267 do {
268 __dump_ep_desc(ep);
269 /* Cannot phys_to_virt on 0 as it turns into 80000000, which is != 0. */
270 sb = ep->sub ? phys_to_virt(ep->sub) : 0;
271 while (sb) {
272 __dump_sb_desc(sb);
273 sb = sb->next ? phys_to_virt(sb->next) : 0;
274 }
275 ep = (volatile struct USB_EP_Desc *)(phys_to_virt(ep->next));
276
277 } while (ep != first_ep);
278 }
279
280 static inline void __dump_ept_data(int epid)
281 {
282 unsigned long flags;
283 __u32 r_usb_ept_data;
284
285 if (epid < 0 || epid > 31) {
286 printk("Cannot dump ept data for invalid epid %d\n", epid);
287 return;
288 }
289
290 local_irq_save(flags);
291 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
292 nop();
293 r_usb_ept_data = *R_USB_EPT_DATA;
294 local_irq_restore(flags);
295
296 printk(" R_USB_EPT_DATA = 0x%x for epid %d :\n", r_usb_ept_data, epid);
297 if (r_usb_ept_data == 0) {
298 /* No need for more detailed printing. */
299 return;
300 }
301 printk(" valid : %d\n", (r_usb_ept_data & 0x80000000) >> 31);
302 printk(" hold : %d\n", (r_usb_ept_data & 0x40000000) >> 30);
303 printk(" error_count_in : %d\n", (r_usb_ept_data & 0x30000000) >> 28);
304 printk(" t_in : %d\n", (r_usb_ept_data & 0x08000000) >> 27);
305 printk(" low_speed : %d\n", (r_usb_ept_data & 0x04000000) >> 26);
306 printk(" port : %d\n", (r_usb_ept_data & 0x03000000) >> 24);
307 printk(" error_code : %d\n", (r_usb_ept_data & 0x00c00000) >> 22);
308 printk(" t_out : %d\n", (r_usb_ept_data & 0x00200000) >> 21);
309 printk(" error_count_out : %d\n", (r_usb_ept_data & 0x00180000) >> 19);
310 printk(" max_len : %d\n", (r_usb_ept_data & 0x0003f800) >> 11);
311 printk(" ep : %d\n", (r_usb_ept_data & 0x00000780) >> 7);
312 printk(" dev : %d\n", (r_usb_ept_data & 0x0000003f));
313 }
314
315 static inline void __dump_ept_data_iso(int epid)
316 {
317 unsigned long flags;
318 __u32 ept_data;
319
320 if (epid < 0 || epid > 31) {
321 printk("Cannot dump ept data for invalid epid %d\n", epid);
322 return;
323 }
324
325 local_irq_save(flags);
326 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
327 nop();
328 ept_data = *R_USB_EPT_DATA_ISO;
329 local_irq_restore(flags);
330
331 printk(" R_USB_EPT_DATA = 0x%x for epid %d :\n", ept_data, epid);
332 if (ept_data == 0) {
333 /* No need for more detailed printing. */
334 return;
335 }
336 printk(" valid : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, valid,
337 ept_data));
338 printk(" port : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, port,
339 ept_data));
340 printk(" error_code : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, error_code,
341 ept_data));
342 printk(" max_len : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, max_len,
343 ept_data));
344 printk(" ep : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, ep,
345 ept_data));
346 printk(" dev : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, dev,
347 ept_data));
348 }
349
350 static inline void __dump_ept_data_list(void)
351 {
352 int i;
353
354 printk("Dumping the whole R_USB_EPT_DATA list\n");
355
356 for (i = 0; i < 32; i++) {
357 __dump_ept_data(i);
358 }
359 }
360
361 static void debug_epid(int epid) {
362 int i;
363
364 if(epid_isoc(epid)) {
365 __dump_ept_data_iso(epid);
366 } else {
367 __dump_ept_data(epid);
368 }
369
370 printk("Bulk:\n");
371 for(i = 0; i < 32; i++) {
372 if(IO_EXTRACT(USB_EP_command, epid, TxBulkEPList[i].command) ==
373 epid) {
374 printk("%d: ", i); __dump_ep_desc(&(TxBulkEPList[i]));
375 }
376 }
377
378 printk("Ctrl:\n");
379 for(i = 0; i < 32; i++) {
380 if(IO_EXTRACT(USB_EP_command, epid, TxCtrlEPList[i].command) ==
381 epid) {
382 printk("%d: ", i); __dump_ep_desc(&(TxCtrlEPList[i]));
383 }
384 }
385
386 printk("Intr:\n");
387 for(i = 0; i < MAX_INTR_INTERVAL; i++) {
388 if(IO_EXTRACT(USB_EP_command, epid, TxIntrEPList[i].command) ==
389 epid) {
390 printk("%d: ", i); __dump_ep_desc(&(TxIntrEPList[i]));
391 }
392 }
393
394 printk("Isoc:\n");
395 for(i = 0; i < 32; i++) {
396 if(IO_EXTRACT(USB_EP_command, epid, TxIsocEPList[i].command) ==
397 epid) {
398 printk("%d: ", i); __dump_ep_desc(&(TxIsocEPList[i]));
399 }
400 }
401
402 __dump_ept_data_list();
403 __dump_ep_list(PIPE_INTERRUPT);
404 printk("\n\n");
405 }
406
407
408
409 char* hcd_status_to_str(__u8 bUsbStatus) {
410 static char hcd_status_str[128];
411 hcd_status_str[0] = '\0';
412 if(bUsbStatus & IO_STATE(R_USB_STATUS, ourun, yes)) {
413 strcat(hcd_status_str, "ourun ");
414 }
415 if(bUsbStatus & IO_STATE(R_USB_STATUS, perror, yes)) {
416 strcat(hcd_status_str, "perror ");
417 }
418 if(bUsbStatus & IO_STATE(R_USB_STATUS, device_mode, yes)) {
419 strcat(hcd_status_str, "device_mode ");
420 }
421 if(bUsbStatus & IO_STATE(R_USB_STATUS, host_mode, yes)) {
422 strcat(hcd_status_str, "host_mode ");
423 }
424 if(bUsbStatus & IO_STATE(R_USB_STATUS, started, yes)) {
425 strcat(hcd_status_str, "started ");
426 }
427 if(bUsbStatus & IO_STATE(R_USB_STATUS, running, yes)) {
428 strcat(hcd_status_str, "running ");
429 }
430 return hcd_status_str;
431 }
432
433
434 char* sblist_to_str(struct USB_SB_Desc* sb_desc) {
435 static char sblist_to_str_buff[128];
436 char tmp[32], tmp2[32];
437 sblist_to_str_buff[0] = '\0';
438 while(sb_desc != NULL) {
439 switch(IO_EXTRACT(USB_SB_command, tt, sb_desc->command)) {
440 case 0: sprintf(tmp, "zout"); break;
441 case 1: sprintf(tmp, "in"); break;
442 case 2: sprintf(tmp, "out"); break;
443 case 3: sprintf(tmp, "setup"); break;
444 }
445 sprintf(tmp2, "(%s %d)", tmp, sb_desc->sw_len);
446 strcat(sblist_to_str_buff, tmp2);
447 if(sb_desc->next != 0) {
448 sb_desc = phys_to_virt(sb_desc->next);
449 } else {
450 sb_desc = NULL;
451 }
452 }
453 return sblist_to_str_buff;
454 }
455
456 char* port_status_to_str(__u16 wPortStatus) {
457 static char port_status_str[128];
458 port_status_str[0] = '\0';
459 if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, connected, yes)) {
460 strcat(port_status_str, "connected ");
461 }
462 if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes)) {
463 strcat(port_status_str, "enabled ");
464 }
465 if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, suspended, yes)) {
466 strcat(port_status_str, "suspended ");
467 }
468 if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, reset, yes)) {
469 strcat(port_status_str, "reset ");
470 }
471 if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, speed, full)) {
472 strcat(port_status_str, "full-speed ");
473 } else {
474 strcat(port_status_str, "low-speed ");
475 }
476 return port_status_str;
477 }
478
479
480 char* endpoint_to_str(struct usb_endpoint_descriptor *ed) {
481 static char endpoint_to_str_buff[128];
482 char tmp[32];
483 int epnum = ed->bEndpointAddress & 0x0F;
484 int dir = ed->bEndpointAddress & 0x80;
485 int type = ed->bmAttributes & 0x03;
486 endpoint_to_str_buff[0] = '\0';
487 sprintf(endpoint_to_str_buff, "ep:%d ", epnum);
488 switch(type) {
489 case 0:
490 sprintf(tmp, " ctrl");
491 break;
492 case 1:
493 sprintf(tmp, " isoc");
494 break;
495 case 2:
496 sprintf(tmp, " bulk");
497 break;
498 case 3:
499 sprintf(tmp, " intr");
500 break;
501 }
502 strcat(endpoint_to_str_buff, tmp);
503 if(dir) {
504 sprintf(tmp, " in");
505 } else {
506 sprintf(tmp, " out");
507 }
508 strcat(endpoint_to_str_buff, tmp);
509
510 return endpoint_to_str_buff;
511 }
512
513 /* Debug helper functions for Transfer Controller */
514 char* pipe_to_str(unsigned int pipe) {
515 static char pipe_to_str_buff[128];
516 char tmp[64];
517 sprintf(pipe_to_str_buff, "dir:%s", str_dir(pipe));
518 sprintf(tmp, " type:%s", str_type(pipe));
519 strcat(pipe_to_str_buff, tmp);
520
521 sprintf(tmp, " dev:%d", usb_pipedevice(pipe));
522 strcat(pipe_to_str_buff, tmp);
523 sprintf(tmp, " ep:%d", usb_pipeendpoint(pipe));
524 strcat(pipe_to_str_buff, tmp);
525 return pipe_to_str_buff;
526 }
527
528
529 #define USB_DEBUG_DESC 1
530
531 #ifdef USB_DEBUG_DESC
532 #define dump_in_desc(x) __dump_in_desc(x)
533 #define dump_sb_desc(...) __dump_sb_desc(...)
534 #define dump_ep_desc(x) __dump_ep_desc(x)
535 #define dump_ept_data(x) __dump_ept_data(x)
536 #else
537 #define dump_in_desc(...) do {} while (0)
538 #define dump_sb_desc(...) do {} while (0)
539 #define dump_ep_desc(...) do {} while (0)
540 #endif
541
542
543 /* Uncomment this to enable massive function call trace
544 #define USB_DEBUG_TRACE */
545
546 #ifdef USB_DEBUG_TRACE
547 #define DBFENTER (printk(": Entering: %s\n", __FUNCTION__))
548 #define DBFEXIT (printk(": Exiting: %s\n", __FUNCTION__))
549 #else
550 #define DBFENTER do {} while (0)
551 #define DBFEXIT do {} while (0)
552 #endif
553
554 #define CHECK_ALIGN(x) if (((__u32)(x)) & 0x00000003) \
555 {panic("Alignment check (DWORD) failed at %s:%s:%d\n", __FILE__, __FUNCTION__, __LINE__);}
556
557 /* Most helpful debugging aid */
558 #define ASSERT(expr) ((void) ((expr) ? 0 : (err("assert failed at: %s %d",__FUNCTION__, __LINE__))))
559
560
561 /***************************************************************************/
562 /***************************************************************************/
563 /* Forward declarations */
564 /***************************************************************************/
565 /***************************************************************************/
566 void crisv10_hcd_epid_attn_irq(struct crisv10_irq_reg *reg);
567 void crisv10_hcd_port_status_irq(struct crisv10_irq_reg *reg);
568 void crisv10_hcd_ctl_status_irq(struct crisv10_irq_reg *reg);
569 void crisv10_hcd_isoc_eof_irq(struct crisv10_irq_reg *reg);
570
571 void rh_port_status_change(__u16[]);
572 int rh_clear_port_feature(__u8, __u16);
573 int rh_set_port_feature(__u8, __u16);
574 static void rh_disable_port(unsigned int port);
575
576 static void check_finished_bulk_tx_epids(struct usb_hcd *hcd,
577 int timer);
578
579 static int tc_setup_epid(struct usb_host_endpoint *ep, struct urb *urb,
580 int mem_flags);
581 static void tc_free_epid(struct usb_host_endpoint *ep);
582 static int tc_allocate_epid(void);
583 static void tc_finish_urb(struct usb_hcd *hcd, struct urb *urb, int status);
584 static void tc_finish_urb_later(struct usb_hcd *hcd, struct urb *urb,
585 int status);
586
587 static int urb_priv_create(struct usb_hcd *hcd, struct urb *urb, int epid,
588 int mem_flags);
589 static void urb_priv_free(struct usb_hcd *hcd, struct urb *urb);
590
591 static int crisv10_usb_check_bandwidth(struct usb_device *dev,struct urb *urb);
592 static void crisv10_usb_claim_bandwidth(
593 struct usb_device *dev, struct urb *urb, int bustime, int isoc);
594 static void crisv10_usb_release_bandwidth(
595 struct usb_hcd *hcd, int isoc, int bandwidth);
596
597 static inline struct urb *urb_list_first(int epid);
598 static inline void urb_list_add(struct urb *urb, int epid,
599 int mem_flags);
600 static inline urb_entry_t *urb_list_entry(struct urb *urb, int epid);
601 static inline void urb_list_del(struct urb *urb, int epid);
602 static inline void urb_list_move_last(struct urb *urb, int epid);
603 static inline struct urb *urb_list_next(struct urb *urb, int epid);
604
605 int create_sb_for_urb(struct urb *urb, int mem_flags);
606 int init_intr_urb(struct urb *urb, int mem_flags);
607
608 static inline void etrax_epid_set(__u8 index, __u32 data);
609 static inline void etrax_epid_clear_error(__u8 index);
610 static inline void etrax_epid_set_toggle(__u8 index, __u8 dirout,
611 __u8 toggle);
612 static inline __u8 etrax_epid_get_toggle(__u8 index, __u8 dirout);
613 static inline __u32 etrax_epid_get(__u8 index);
614
615 /* We're accessing the same register position in Etrax so
616 when we do full access the internal difference doesn't matter */
617 #define etrax_epid_iso_set(index, data) etrax_epid_set(index, data)
618 #define etrax_epid_iso_get(index) etrax_epid_get(index)
619
620
621 static void tc_dma_process_isoc_urb(struct urb *urb);
622 static void tc_dma_process_queue(int epid);
623 static void tc_dma_unlink_intr_urb(struct urb *urb);
624 static irqreturn_t tc_dma_tx_interrupt(int irq, void *vhc);
625 static irqreturn_t tc_dma_rx_interrupt(int irq, void *vhc);
626
627 static void tc_bulk_start_timer_func(unsigned long dummy);
628 static void tc_bulk_eot_timer_func(unsigned long dummy);
629
630
631 /*************************************************************/
632 /*************************************************************/
633 /* Host Controler Driver block */
634 /*************************************************************/
635 /*************************************************************/
636
637 /* HCD operations */
638 static irqreturn_t crisv10_hcd_top_irq(int irq, void*);
639 static int crisv10_hcd_reset(struct usb_hcd *);
640 static int crisv10_hcd_start(struct usb_hcd *);
641 static void crisv10_hcd_stop(struct usb_hcd *);
642 #ifdef CONFIG_PM
643 static int crisv10_hcd_suspend(struct device *, u32, u32);
644 static int crisv10_hcd_resume(struct device *, u32);
645 #endif /* CONFIG_PM */
646 static int crisv10_hcd_get_frame(struct usb_hcd *);
647
648 static int tc_urb_enqueue(struct usb_hcd *, struct urb *, gfp_t mem_flags);
649 static int tc_urb_dequeue(struct usb_hcd *, struct urb *, int);
650 static void tc_endpoint_disable(struct usb_hcd *, struct usb_host_endpoint *ep);
651
652 static int rh_status_data_request(struct usb_hcd *, char *);
653 static int rh_control_request(struct usb_hcd *, u16, u16, u16, char*, u16);
654
655 #ifdef CONFIG_PM
656 static int crisv10_hcd_hub_suspend(struct usb_hcd *);
657 static int crisv10_hcd_hub_resume(struct usb_hcd *);
658 #endif /* CONFIG_PM */
659 #ifdef CONFIG_USB_OTG
660 static int crisv10_hcd_start_port_reset(struct usb_hcd *, unsigned);
661 #endif /* CONFIG_USB_OTG */
662
663 /* host controller driver interface */
664 static const struct hc_driver crisv10_hc_driver =
665 {
666 .description = hc_name,
667 .product_desc = product_desc,
668 .hcd_priv_size = sizeof(struct crisv10_hcd),
669
670 /* Attaching IRQ handler manualy in probe() */
671 /* .irq = crisv10_hcd_irq, */
672
673 .flags = HCD_USB11,
674
675 /* called to init HCD and root hub */
676 .reset = crisv10_hcd_reset,
677 .start = crisv10_hcd_start,
678
679 /* cleanly make HCD stop writing memory and doing I/O */
680 .stop = crisv10_hcd_stop,
681
682 /* return current frame number */
683 .get_frame_number = crisv10_hcd_get_frame,
684
685
686 /* Manage i/o requests via the Transfer Controller */
687 .urb_enqueue = tc_urb_enqueue,
688 .urb_dequeue = tc_urb_dequeue,
689
690 /* hw synch, freeing endpoint resources that urb_dequeue can't */
691 .endpoint_disable = tc_endpoint_disable,
692
693
694 /* Root Hub support */
695 .hub_status_data = rh_status_data_request,
696 .hub_control = rh_control_request,
697 #ifdef CONFIG_PM
698 .hub_suspend = rh_suspend_request,
699 .hub_resume = rh_resume_request,
700 #endif /* CONFIG_PM */
701 #ifdef CONFIG_USB_OTG
702 .start_port_reset = crisv10_hcd_start_port_reset,
703 #endif /* CONFIG_USB_OTG */
704 };
705
706
707 /*
708 * conversion between pointers to a hcd and the corresponding
709 * crisv10_hcd
710 */
711
712 static inline struct crisv10_hcd *hcd_to_crisv10_hcd(struct usb_hcd *hcd)
713 {
714 return (struct crisv10_hcd *) hcd->hcd_priv;
715 }
716
717 static inline struct usb_hcd *crisv10_hcd_to_hcd(struct crisv10_hcd *hcd)
718 {
719 return container_of((void *) hcd, struct usb_hcd, hcd_priv);
720 }
721
722 /* check if specified port is in use */
723 static inline int port_in_use(unsigned int port)
724 {
725 return ports & (1 << port);
726 }
727
728 /* number of ports in use */
729 static inline unsigned int num_ports(void)
730 {
731 unsigned int i, num = 0;
732 for (i = 0; i < USB_ROOT_HUB_PORTS; i++)
733 if (port_in_use(i))
734 num++;
735 return num;
736 }
737
738 /* map hub port number to the port number used internally by the HC */
739 static inline unsigned int map_port(unsigned int port)
740 {
741 unsigned int i, num = 0;
742 for (i = 0; i < USB_ROOT_HUB_PORTS; i++)
743 if (port_in_use(i))
744 if (++num == port)
745 return i;
746 return -1;
747 }
748
749 /* size of descriptors in slab cache */
750 #ifndef MAX
751 #define MAX(x, y) ((x) > (y) ? (x) : (y))
752 #endif
753
754
755 /******************************************************************/
756 /* Hardware Interrupt functions */
757 /******************************************************************/
758
759 /* Fast interrupt handler for HC */
760 static irqreturn_t crisv10_hcd_top_irq(int irq, void *vcd)
761 {
762 struct usb_hcd *hcd = vcd;
763 struct crisv10_irq_reg reg;
764 __u32 irq_mask;
765 unsigned long flags;
766
767 DBFENTER;
768
769 ASSERT(hcd != NULL);
770 reg.hcd = hcd;
771
772 /* Turn of other interrupts while handling these sensitive cases */
773 local_irq_save(flags);
774
775 /* Read out which interrupts that are flaged */
776 irq_mask = *R_USB_IRQ_MASK_READ;
777 reg.r_usb_irq_mask_read = irq_mask;
778
779 /* Reading R_USB_STATUS clears the ctl_status interrupt. Note that
780 R_USB_STATUS must be read before R_USB_EPID_ATTN since reading the latter
781 clears the ourun and perror fields of R_USB_STATUS. */
782 reg.r_usb_status = *R_USB_STATUS;
783
784 /* Reading R_USB_EPID_ATTN clears the iso_eof, bulk_eot and epid_attn
785 interrupts. */
786 reg.r_usb_epid_attn = *R_USB_EPID_ATTN;
787
788 /* Reading R_USB_RH_PORT_STATUS_1 and R_USB_RH_PORT_STATUS_2 clears the
789 port_status interrupt. */
790 reg.r_usb_rh_port_status_1 = *R_USB_RH_PORT_STATUS_1;
791 reg.r_usb_rh_port_status_2 = *R_USB_RH_PORT_STATUS_2;
792
793 /* Reading R_USB_FM_NUMBER clears the sof interrupt. */
794 /* Note: the lower 11 bits contain the actual frame number, sent with each
795 sof. */
796 reg.r_usb_fm_number = *R_USB_FM_NUMBER;
797
798 /* Interrupts are handled in order of priority. */
799 if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, port_status)) {
800 crisv10_hcd_port_status_irq(&reg);
801 }
802 if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, epid_attn)) {
803 crisv10_hcd_epid_attn_irq(&reg);
804 }
805 if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, ctl_status)) {
806 crisv10_hcd_ctl_status_irq(&reg);
807 }
808 if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, iso_eof)) {
809 crisv10_hcd_isoc_eof_irq(&reg);
810 }
811 if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, bulk_eot)) {
812 /* Update/restart the bulk start timer since obviously the channel is
813 running. */
814 mod_timer(&bulk_start_timer, jiffies + BULK_START_TIMER_INTERVAL);
815 /* Update/restart the bulk eot timer since we just received an bulk eot
816 interrupt. */
817 mod_timer(&bulk_eot_timer, jiffies + BULK_EOT_TIMER_INTERVAL);
818
819 /* Check for finished bulk transfers on epids */
820 check_finished_bulk_tx_epids(hcd, 0);
821 }
822 local_irq_restore(flags);
823
824 DBFEXIT;
825 return IRQ_HANDLED;
826 }
827
828
829 void crisv10_hcd_epid_attn_irq(struct crisv10_irq_reg *reg) {
830 struct usb_hcd *hcd = reg->hcd;
831 struct crisv10_urb_priv *urb_priv;
832 int epid;
833 DBFENTER;
834
835 for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
836 if (test_bit(epid, (void *)&reg->r_usb_epid_attn)) {
837 struct urb *urb;
838 __u32 ept_data;
839 int error_code;
840
841 if (epid == DUMMY_EPID || epid == INVALID_EPID) {
842 /* We definitely don't care about these ones. Besides, they are
843 always disabled, so any possible disabling caused by the
844 epid attention interrupt is irrelevant. */
845 continue;
846 }
847
848 if(!epid_inuse(epid)) {
849 irq_err("Epid attention on epid:%d that isn't in use\n", epid);
850 printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
851 debug_epid(epid);
852 continue;
853 }
854
855 /* Note that although there are separate R_USB_EPT_DATA and
856 R_USB_EPT_DATA_ISO registers, they are located at the same address and
857 are of the same size. In other words, this read should be ok for isoc
858 also. */
859 ept_data = etrax_epid_get(epid);
860 error_code = IO_EXTRACT(R_USB_EPT_DATA, error_code, ept_data);
861
862 /* Get the active URB for this epid. We blatantly assume
863 that only this URB could have caused the epid attention. */
864 urb = activeUrbList[epid];
865 if (urb == NULL) {
866 irq_err("Attention on epid:%d error:%d with no active URB.\n",
867 epid, error_code);
868 printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
869 debug_epid(epid);
870 continue;
871 }
872
873 urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
874 ASSERT(urb_priv);
875
876 /* Using IO_STATE_VALUE on R_USB_EPT_DATA should be ok for isoc also. */
877 if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) {
878
879 /* Isoc traffic doesn't have error_count_in/error_count_out. */
880 if ((usb_pipetype(urb->pipe) != PIPE_ISOCHRONOUS) &&
881 (IO_EXTRACT(R_USB_EPT_DATA, error_count_in, ept_data) == 3 ||
882 IO_EXTRACT(R_USB_EPT_DATA, error_count_out, ept_data) == 3)) {
883 /* Check if URB allready is marked for late-finish, we can get
884 several 3rd error for Intr traffic when a device is unplugged */
885 if(urb_priv->later_data == NULL) {
886 /* 3rd error. */
887 irq_warn("3rd error for epid:%d (%s %s) URB:0x%x[%d]\n", epid,
888 str_dir(urb->pipe), str_type(urb->pipe),
889 (unsigned int)urb, urb_priv->urb_num);
890
891 tc_finish_urb_later(hcd, urb, -EPROTO);
892 }
893
894 } else if (reg->r_usb_status & IO_MASK(R_USB_STATUS, perror)) {
895 irq_warn("Perror for epid:%d\n", epid);
896 printk("FM_NUMBER: %d\n", reg->r_usb_fm_number & 0x7ff);
897 printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
898 __dump_urb(urb);
899 debug_epid(epid);
900
901 if (!(ept_data & IO_MASK(R_USB_EPT_DATA, valid))) {
902 /* invalid ep_id */
903 panic("Perror because of invalid epid."
904 " Deconfigured too early?");
905 } else {
906 /* past eof1, near eof, zout transfer, setup transfer */
907 /* Dump the urb and the relevant EP descriptor. */
908 panic("Something wrong with DMA descriptor contents."
909 " Too much traffic inserted?");
910 }
911 } else if (reg->r_usb_status & IO_MASK(R_USB_STATUS, ourun)) {
912 /* buffer ourun */
913 printk("FM_NUMBER: %d\n", reg->r_usb_fm_number & 0x7ff);
914 printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
915 __dump_urb(urb);
916 debug_epid(epid);
917
918 panic("Buffer overrun/underrun for epid:%d. DMA too busy?", epid);
919 } else {
920 irq_warn("Attention on epid:%d (%s %s) with no error code\n", epid,
921 str_dir(urb->pipe), str_type(urb->pipe));
922 printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
923 __dump_urb(urb);
924 debug_epid(epid);
925 }
926
927 } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code,
928 stall)) {
929 /* Not really a protocol error, just says that the endpoint gave
930 a stall response. Note that error_code cannot be stall for isoc. */
931 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
932 panic("Isoc traffic cannot stall");
933 }
934
935 tc_dbg("Stall for epid:%d (%s %s) URB:0x%x\n", epid,
936 str_dir(urb->pipe), str_type(urb->pipe), (unsigned int)urb);
937 tc_finish_urb(hcd, urb, -EPIPE);
938
939 } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code,
940 bus_error)) {
941 /* Two devices responded to a transaction request. Must be resolved
942 by software. FIXME: Reset ports? */
943 panic("Bus error for epid %d."
944 " Two devices responded to transaction request\n",
945 epid);
946
947 } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code,
948 buffer_error)) {
949 /* DMA overrun or underrun. */
950 irq_warn("Buffer overrun/underrun for epid:%d (%s %s)\n", epid,
951 str_dir(urb->pipe), str_type(urb->pipe));
952
953 /* It seems that error_code = buffer_error in
954 R_USB_EPT_DATA/R_USB_EPT_DATA_ISO and ourun = yes in R_USB_STATUS
955 are the same error. */
956 tc_finish_urb(hcd, urb, -EPROTO);
957 } else {
958 irq_warn("Unknown attention on epid:%d (%s %s)\n", epid,
959 str_dir(urb->pipe), str_type(urb->pipe));
960 dump_ept_data(epid);
961 }
962 }
963 }
964 DBFEXIT;
965 }
966
967 void crisv10_hcd_port_status_irq(struct crisv10_irq_reg *reg)
968 {
969 __u16 port_reg[USB_ROOT_HUB_PORTS];
970 DBFENTER;
971 port_reg[0] = reg->r_usb_rh_port_status_1;
972 port_reg[1] = reg->r_usb_rh_port_status_2;
973 rh_port_status_change(port_reg);
974 DBFEXIT;
975 }
976
977 void crisv10_hcd_isoc_eof_irq(struct crisv10_irq_reg *reg)
978 {
979 int epid;
980 struct urb *urb;
981 struct crisv10_urb_priv *urb_priv;
982
983 DBFENTER;
984
985 for (epid = 0; epid < NBR_OF_EPIDS - 1; epid++) {
986
987 /* Only check epids that are in use, is valid and has SB list */
988 if (!epid_inuse(epid) || epid == INVALID_EPID ||
989 TxIsocEPList[epid].sub == 0 || epid == DUMMY_EPID) {
990 /* Nothing here to see. */
991 continue;
992 }
993 ASSERT(epid_isoc(epid));
994
995 /* Get the active URB for this epid (if any). */
996 urb = activeUrbList[epid];
997 if (urb == 0) {
998 isoc_warn("Ignoring NULL urb for epid:%d\n", epid);
999 continue;
1000 }
1001 if(!epid_out_traffic(epid)) {
1002 /* Sanity check. */
1003 ASSERT(usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS);
1004
1005 urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
1006 ASSERT(urb_priv);
1007
1008 if (urb_priv->urb_state == NOT_STARTED) {
1009 /* If ASAP is not set and urb->start_frame is the current frame,
1010 start the transfer. */
1011 if (!(urb->transfer_flags & URB_ISO_ASAP) &&
1012 (urb->start_frame == (*R_USB_FM_NUMBER & 0x7ff))) {
1013 /* EP should not be enabled if we're waiting for start_frame */
1014 ASSERT((TxIsocEPList[epid].command &
1015 IO_STATE(USB_EP_command, enable, yes)) == 0);
1016
1017 isoc_warn("Enabling isoc IN EP descr for epid %d\n", epid);
1018 TxIsocEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
1019
1020 /* This urb is now active. */
1021 urb_priv->urb_state = STARTED;
1022 continue;
1023 }
1024 }
1025 }
1026 }
1027
1028 DBFEXIT;
1029 }
1030
1031 void crisv10_hcd_ctl_status_irq(struct crisv10_irq_reg *reg)
1032 {
1033 struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(reg->hcd);
1034
1035 DBFENTER;
1036 ASSERT(crisv10_hcd);
1037
1038 /* irq_dbg("ctr_status_irq, controller status: %s\n",
1039 hcd_status_to_str(reg->r_usb_status));*/
1040
1041 /* FIXME: What should we do if we get ourun or perror? Dump the EP and SB
1042 list for the corresponding epid? */
1043 if (reg->r_usb_status & IO_MASK(R_USB_STATUS, ourun)) {
1044 panic("USB controller got ourun.");
1045 }
1046 if (reg->r_usb_status & IO_MASK(R_USB_STATUS, perror)) {
1047
1048 /* Before, etrax_usb_do_intr_recover was called on this epid if it was
1049 an interrupt pipe. I don't see how re-enabling all EP descriptors
1050 will help if there was a programming error. */
1051 panic("USB controller got perror.");
1052 }
1053
1054 /* Keep track of USB Controller, if it's running or not */
1055 if(reg->r_usb_status & IO_STATE(R_USB_STATUS, running, yes)) {
1056 crisv10_hcd->running = 1;
1057 } else {
1058 crisv10_hcd->running = 0;
1059 }
1060
1061 if (reg->r_usb_status & IO_MASK(R_USB_STATUS, device_mode)) {
1062 /* We should never operate in device mode. */
1063 panic("USB controller in device mode.");
1064 }
1065
1066 /* Set the flag to avoid getting "Unlink after no-IRQ? Controller is probably
1067 using the wrong IRQ" from hcd_unlink_urb() in drivers/usb/core/hcd.c */
1068 set_bit(HCD_FLAG_SAW_IRQ, &reg->hcd->flags);
1069
1070 DBFEXIT;
1071 }
1072
1073
1074 /******************************************************************/
1075 /* Host Controller interface functions */
1076 /******************************************************************/
1077
1078 static inline void crisv10_ready_wait(void) {
1079 volatile int timeout = 10000;
1080 /* Check the busy bit of USB controller in Etrax */
1081 while((*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy)) &&
1082 (timeout-- > 0));
1083 }
1084
1085 /* reset host controller */
1086 static int crisv10_hcd_reset(struct usb_hcd *hcd)
1087 {
1088 DBFENTER;
1089 hcd_dbg(hcd, "reset\n");
1090
1091
1092 /* Reset the USB interface. */
1093 /*
1094 *R_USB_COMMAND =
1095 IO_STATE(R_USB_COMMAND, port_sel, nop) |
1096 IO_STATE(R_USB_COMMAND, port_cmd, reset) |
1097 IO_STATE(R_USB_COMMAND, ctrl_cmd, reset);
1098 nop();
1099 */
1100 DBFEXIT;
1101 return 0;
1102 }
1103
1104 /* start host controller */
1105 static int crisv10_hcd_start(struct usb_hcd *hcd)
1106 {
1107 DBFENTER;
1108 hcd_dbg(hcd, "start\n");
1109
1110 crisv10_ready_wait();
1111
1112 /* Start processing of USB traffic. */
1113 *R_USB_COMMAND =
1114 IO_STATE(R_USB_COMMAND, port_sel, nop) |
1115 IO_STATE(R_USB_COMMAND, port_cmd, reset) |
1116 IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run);
1117
1118 nop();
1119
1120 hcd->state = HC_STATE_RUNNING;
1121
1122 DBFEXIT;
1123 return 0;
1124 }
1125
1126 /* stop host controller */
1127 static void crisv10_hcd_stop(struct usb_hcd *hcd)
1128 {
1129 DBFENTER;
1130 hcd_dbg(hcd, "stop\n");
1131 crisv10_hcd_reset(hcd);
1132 DBFEXIT;
1133 }
1134
1135 /* return the current frame number */
1136 static int crisv10_hcd_get_frame(struct usb_hcd *hcd)
1137 {
1138 DBFENTER;
1139 DBFEXIT;
1140 return (*R_USB_FM_NUMBER & 0x7ff);
1141 }
1142
1143 #ifdef CONFIG_USB_OTG
1144
1145 static int crisv10_hcd_start_port_reset(struct usb_hcd *hcd, unsigned port)
1146 {
1147 return 0; /* no-op for now */
1148 }
1149
1150 #endif /* CONFIG_USB_OTG */
1151
1152
1153 /******************************************************************/
1154 /* Root Hub functions */
1155 /******************************************************************/
1156
1157 /* root hub status */
1158 static const struct usb_hub_status rh_hub_status =
1159 {
1160 .wHubStatus = 0,
1161 .wHubChange = 0,
1162 };
1163
1164 /* root hub descriptor */
1165 static const u8 rh_hub_descr[] =
1166 {
1167 0x09, /* bDescLength */
1168 0x29, /* bDescriptorType */
1169 USB_ROOT_HUB_PORTS, /* bNbrPorts */
1170 0x00, /* wHubCharacteristics */
1171 0x00,
1172 0x01, /* bPwrOn2pwrGood */
1173 0x00, /* bHubContrCurrent */
1174 0x00, /* DeviceRemovable */
1175 0xff /* PortPwrCtrlMask */
1176 };
1177
1178 /* Actual holder of root hub status*/
1179 struct crisv10_rh rh;
1180
1181 /* Initialize root hub data structures (called from dvdrv_hcd_probe()) */
1182 int rh_init(void) {
1183 int i;
1184 /* Reset port status flags */
1185 for (i = 0; i < USB_ROOT_HUB_PORTS; i++) {
1186 rh.wPortChange[i] = 0;
1187 rh.wPortStatusPrev[i] = 0;
1188 }
1189 return 0;
1190 }
1191
1192 #define RH_FEAT_MASK ((1<<USB_PORT_FEAT_CONNECTION)|\
1193 (1<<USB_PORT_FEAT_ENABLE)|\
1194 (1<<USB_PORT_FEAT_SUSPEND)|\
1195 (1<<USB_PORT_FEAT_RESET))
1196
1197 /* Handle port status change interrupt (called from bottom part interrupt) */
1198 void rh_port_status_change(__u16 port_reg[]) {
1199 int i;
1200 __u16 wChange;
1201
1202 for(i = 0; i < USB_ROOT_HUB_PORTS; i++) {
1203 /* Xor out changes since last read, masked for important flags */
1204 wChange = (port_reg[i] & RH_FEAT_MASK) ^ rh.wPortStatusPrev[i];
1205 /* Or changes together with (if any) saved changes */
1206 rh.wPortChange[i] |= wChange;
1207 /* Save new status */
1208 rh.wPortStatusPrev[i] = port_reg[i];
1209
1210 if(wChange) {
1211 rh_dbg("Interrupt port_status change port%d: %s Current-status:%s\n", i+1,
1212 port_status_to_str(wChange),
1213 port_status_to_str(port_reg[i]));
1214 }
1215 }
1216 }
1217
1218 /* Construct port status change bitmap for the root hub */
1219 static int rh_status_data_request(struct usb_hcd *hcd, char *buf)
1220 {
1221 struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(hcd);
1222 unsigned int i;
1223
1224 DBFENTER;
1225 /*
1226 * corresponds to hub status change EP (USB 2.0 spec section 11.13.4)
1227 * return bitmap indicating ports with status change
1228 */
1229 *buf = 0;
1230 spin_lock(&crisv10_hcd->lock);
1231 for (i = 1; i <= crisv10_hcd->num_ports; i++) {
1232 if (rh.wPortChange[map_port(i)]) {
1233 *buf |= (1 << i);
1234 rh_dbg("rh_status_data_request, change on port %d: %s Current Status: %s\n", i,
1235 port_status_to_str(rh.wPortChange[map_port(i)]),
1236 port_status_to_str(rh.wPortStatusPrev[map_port(i)]));
1237 }
1238 }
1239 spin_unlock(&crisv10_hcd->lock);
1240 DBFEXIT;
1241 return *buf == 0 ? 0 : 1;
1242 }
1243
1244 /* Handle a control request for the root hub (called from hcd_driver) */
1245 static int rh_control_request(struct usb_hcd *hcd,
1246 u16 typeReq,
1247 u16 wValue,
1248 u16 wIndex,
1249 char *buf,
1250 u16 wLength) {
1251
1252 struct crisv10_hcd *crisv10_hcd = hcd_to_crisv10_hcd(hcd);
1253 int retval = 0;
1254 int len;
1255 DBFENTER;
1256
1257 switch (typeReq) {
1258 case GetHubDescriptor:
1259 rh_dbg("GetHubDescriptor\n");
1260 len = min_t(unsigned int, sizeof rh_hub_descr, wLength);
1261 memcpy(buf, rh_hub_descr, len);
1262 buf[2] = crisv10_hcd->num_ports;
1263 break;
1264 case GetHubStatus:
1265 rh_dbg("GetHubStatus\n");
1266 len = min_t(unsigned int, sizeof rh_hub_status, wLength);
1267 memcpy(buf, &rh_hub_status, len);
1268 break;
1269 case GetPortStatus:
1270 if (!wIndex || wIndex > crisv10_hcd->num_ports)
1271 goto error;
1272 rh_dbg("GetportStatus, port:%d change:%s status:%s\n", wIndex,
1273 port_status_to_str(rh.wPortChange[map_port(wIndex)]),
1274 port_status_to_str(rh.wPortStatusPrev[map_port(wIndex)]));
1275 *(u16 *) buf = cpu_to_le16(rh.wPortStatusPrev[map_port(wIndex)]);
1276 *(u16 *) (buf + 2) = cpu_to_le16(rh.wPortChange[map_port(wIndex)]);
1277 break;
1278 case SetHubFeature:
1279 rh_dbg("SetHubFeature\n");
1280 case ClearHubFeature:
1281 rh_dbg("ClearHubFeature\n");
1282 switch (wValue) {
1283 case C_HUB_OVER_CURRENT:
1284 case C_HUB_LOCAL_POWER:
1285 rh_warn("Not implemented hub request:%d \n", typeReq);
1286 /* not implemented */
1287 break;
1288 default:
1289 goto error;
1290 }
1291 break;
1292 case SetPortFeature:
1293 if (!wIndex || wIndex > crisv10_hcd->num_ports)
1294 goto error;
1295 if(rh_set_port_feature(map_port(wIndex), wValue))
1296 goto error;
1297 break;
1298 case ClearPortFeature:
1299 if (!wIndex || wIndex > crisv10_hcd->num_ports)
1300 goto error;
1301 if(rh_clear_port_feature(map_port(wIndex), wValue))
1302 goto error;
1303 break;
1304 default:
1305 rh_warn("Unknown hub request: %d\n", typeReq);
1306 error:
1307 retval = -EPIPE;
1308 }
1309 DBFEXIT;
1310 return retval;
1311 }
1312
1313 int rh_set_port_feature(__u8 bPort, __u16 wFeature) {
1314 __u8 bUsbCommand = 0;
1315 __u8 reset_cnt;
1316 switch(wFeature) {
1317 case USB_PORT_FEAT_RESET:
1318 rh_dbg("SetPortFeature: reset\n");
1319
1320 if (rh.wPortStatusPrev[bPort] &
1321 IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes))
1322 {
1323 __u8 restart_controller = 0;
1324
1325 if ( (rh.wPortStatusPrev[0] &
1326 IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes)) &&
1327 (rh.wPortStatusPrev[1] &
1328 IO_STATE(R_USB_RH_PORT_STATUS_2, enabled, yes)) )
1329 {
1330 /* Both ports is enabled. The USB controller will not change state. */
1331 restart_controller = 0;
1332 }
1333 else
1334 {
1335 /* Only ports is enabled. The USB controller will change state and
1336 must be restarted. */
1337 restart_controller = 1;
1338 }
1339 /*
1340 In ETRAX 100LX it's not possible to reset an enabled root hub port.
1341 The workaround is to disable and enable the port before resetting it.
1342 Disabling the port can, if both ports are disabled at once, cause the
1343 USB controller to change state to HOST_MODE state.
1344 The USB controller state transition causes a lot of unwanted
1345 interrupts that must be avoided.
1346 Disabling the USB controller status and port status interrupts before
1347 disabling/resetting the port stops these interrupts.
1348
1349 These actions are performed:
1350 1. Disable USB controller status and port status interrupts.
1351 2. Disable the port
1352 3. Wait for the port to be disabled.
1353 4. Enable the port.
1354 5. Wait for the port to be enabled.
1355 6. Reset the port.
1356 7. Wait for for the reset to end.
1357 8. Wait for the USB controller entering started state.
1358 9. Order the USB controller to running state.
1359 10. Wait for the USB controller reaching running state.
1360 11. Clear all interrupts generated during the disable/enable/reset
1361 procedure.
1362 12. Enable the USB controller status and port status interrupts.
1363 */
1364
1365 /* 1. Disable USB controller status and USB port status interrupts. */
1366 *R_USB_IRQ_MASK_CLR = IO_STATE(R_USB_IRQ_MASK_CLR, ctl_status, clr);
1367 __asm__ __volatile__ (" nop");
1368 *R_USB_IRQ_MASK_CLR = IO_STATE(R_USB_IRQ_MASK_CLR, port_status, clr);
1369 __asm__ __volatile__ (" nop");
1370
1371 {
1372
1373 /* Since an root hub port reset shall be 50 ms and the ETRAX 100LX
1374 root hub port reset is 10 ms we must perform 5 port resets to
1375 achieve a proper root hub port reset. */
1376 for (reset_cnt = 0; reset_cnt < 5; reset_cnt ++)
1377 {
1378 rh_dbg("Disable Port %d\n", bPort + 1);
1379
1380 /* 2. Disable the port*/
1381 if (bPort == 0)
1382 {
1383 *R_USB_PORT1_DISABLE = IO_STATE(R_USB_PORT1_DISABLE, disable, yes);
1384 }
1385 else
1386 {
1387 *R_USB_PORT2_DISABLE = IO_STATE(R_USB_PORT2_DISABLE, disable, yes);
1388 }
1389
1390 /* 3. Wait for the port to be disabled. */
1391 while ( (bPort == 0) ?
1392 *R_USB_RH_PORT_STATUS_1 &
1393 IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes) :
1394 *R_USB_RH_PORT_STATUS_2 &
1395 IO_STATE(R_USB_RH_PORT_STATUS_2, enabled, yes) ) {}
1396
1397 rh_dbg("Port %d is disabled. Enable it!\n", bPort + 1);
1398
1399 /* 4. Enable the port. */
1400 if (bPort == 0)
1401 {
1402 *R_USB_PORT1_DISABLE = IO_STATE(R_USB_PORT1_DISABLE, disable, no);
1403 }
1404 else
1405 {
1406 *R_USB_PORT2_DISABLE = IO_STATE(R_USB_PORT2_DISABLE, disable, no);
1407 }
1408
1409 /* 5. Wait for the port to be enabled again. */
1410 while (!( (bPort == 0) ?
1411 *R_USB_RH_PORT_STATUS_1 &
1412 IO_STATE(R_USB_RH_PORT_STATUS_1, connected, yes) :
1413 *R_USB_RH_PORT_STATUS_2 &
1414 IO_STATE(R_USB_RH_PORT_STATUS_2, connected, yes) ) ) {}
1415
1416 rh_dbg("Port %d is enabled.\n", bPort + 1);
1417
1418 /* 6. Reset the port */
1419 crisv10_ready_wait();
1420 *R_USB_COMMAND =
1421 ( (bPort == 0) ?
1422 IO_STATE(R_USB_COMMAND, port_sel, port1):
1423 IO_STATE(R_USB_COMMAND, port_sel, port2) ) |
1424 IO_STATE(R_USB_COMMAND, port_cmd, reset) |
1425 IO_STATE(R_USB_COMMAND, busy, no) |
1426 IO_STATE(R_USB_COMMAND, ctrl_cmd, nop);
1427 rh_dbg("Port %d is resetting.\n", bPort + 1);
1428
1429 /* 7. The USB specification says that we should wait for at least
1430 10ms for device recover */
1431 udelay(10500); /* 10,5ms blocking wait */
1432
1433 crisv10_ready_wait();
1434 }
1435 }
1436
1437
1438 /* Check if the USB controller needs to be restarted. */
1439 if (restart_controller)
1440 {
1441 /* 8. Wait for the USB controller entering started state. */
1442 while (!(*R_USB_STATUS & IO_STATE(R_USB_STATUS, started, yes))) {}
1443
1444 /* 9. Order the USB controller to running state. */
1445 crisv10_ready_wait();
1446 *R_USB_COMMAND =
1447 IO_STATE(R_USB_COMMAND, port_sel, nop) |
1448 IO_STATE(R_USB_COMMAND, port_cmd, reset) |
1449 IO_STATE(R_USB_COMMAND, busy, no) |
1450 IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run);
1451
1452 /* 10. Wait for the USB controller reaching running state. */
1453 while (!(*R_USB_STATUS & IO_STATE(R_USB_STATUS, running, yes))) {}
1454 }
1455
1456 /* 11. Clear any controller or port satus interrupts before enabling
1457 the interrupts. */
1458 {
1459 u16 dummy;
1460
1461 /* Clear the port status interrupt of the reset port. */
1462 if (bPort == 0)
1463 {
1464 rh_dbg("Clearing port 1 interrupts\n");
1465 dummy = *R_USB_RH_PORT_STATUS_1;
1466 }
1467 else
1468 {
1469 rh_dbg("Clearing port 2 interrupts\n");
1470 dummy = *R_USB_RH_PORT_STATUS_2;
1471 }
1472
1473 if (restart_controller)
1474 {
1475 /* The USB controller is restarted. Clear all interupts. */
1476 rh_dbg("Clearing all interrupts\n");
1477 dummy = *R_USB_STATUS;
1478 dummy = *R_USB_RH_PORT_STATUS_1;
1479 dummy = *R_USB_RH_PORT_STATUS_2;
1480 }
1481 }
1482
1483 /* 12. Enable USB controller status and USB port status interrupts. */
1484 *R_USB_IRQ_MASK_SET = IO_STATE(R_USB_IRQ_MASK_SET, ctl_status, set);
1485 __asm__ __volatile__ (" nop");
1486 *R_USB_IRQ_MASK_SET = IO_STATE(R_USB_IRQ_MASK_SET, port_status, set);
1487 __asm__ __volatile__ (" nop");
1488
1489 }
1490 else
1491 {
1492
1493 bUsbCommand |= IO_STATE(R_USB_COMMAND, port_cmd, reset);
1494 /* Select which port via the port_sel field */
1495 bUsbCommand |= IO_FIELD(R_USB_COMMAND, port_sel, bPort+1);
1496
1497 /* Make sure the controller isn't busy. */
1498 crisv10_ready_wait();
1499 /* Send out the actual command to the USB controller */
1500 *R_USB_COMMAND = bUsbCommand;
1501
1502 /* Wait a while for controller to first become started after port reset */
1503 udelay(12000); /* 12ms blocking wait */
1504
1505 /* Make sure the controller isn't busy. */
1506 crisv10_ready_wait();
1507
1508 /* If all enabled ports were disabled the host controller goes down into
1509 started mode, so we need to bring it back into the running state.
1510 (This is safe even if it's already in the running state.) */
1511 *R_USB_COMMAND =
1512 IO_STATE(R_USB_COMMAND, port_sel, nop) |
1513 IO_STATE(R_USB_COMMAND, port_cmd, reset) |
1514 IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run);
1515 }
1516
1517 break;
1518 case USB_PORT_FEAT_SUSPEND:
1519 rh_dbg("SetPortFeature: suspend\n");
1520 bUsbCommand |= IO_STATE(R_USB_COMMAND, port_cmd, suspend);
1521 goto set;
1522 break;
1523 case USB_PORT_FEAT_POWER:
1524 rh_dbg("SetPortFeature: power\n");
1525 break;
1526 case USB_PORT_FEAT_C_CONNECTION:
1527 rh_dbg("SetPortFeature: c_connection\n");
1528 break;
1529 case USB_PORT_FEAT_C_RESET:
1530 rh_dbg("SetPortFeature: c_reset\n");
1531 break;
1532 case USB_PORT_FEAT_C_OVER_CURRENT:
1533 rh_dbg("SetPortFeature: c_over_current\n");
1534 break;
1535
1536 set:
1537 /* Select which port via the port_sel field */
1538 bUsbCommand |= IO_FIELD(R_USB_COMMAND, port_sel, bPort+1);
1539
1540 /* Make sure the controller isn't busy. */
1541 crisv10_ready_wait();
1542 /* Send out the actual command to the USB controller */
1543 *R_USB_COMMAND = bUsbCommand;
1544 break;
1545 default:
1546 rh_dbg("SetPortFeature: unknown feature\n");
1547 return -1;
1548 }
1549 return 0;
1550 }
1551
1552 int rh_clear_port_feature(__u8 bPort, __u16 wFeature) {
1553 switch(wFeature) {
1554 case USB_PORT_FEAT_ENABLE:
1555 rh_dbg("ClearPortFeature: enable\n");
1556 rh_disable_port(bPort);
1557 break;
1558 case USB_PORT_FEAT_SUSPEND:
1559 rh_dbg("ClearPortFeature: suspend\n");
1560 break;
1561 case USB_PORT_FEAT_POWER:
1562 rh_dbg("ClearPortFeature: power\n");
1563 break;
1564
1565 case USB_PORT_FEAT_C_ENABLE:
1566 rh_dbg("ClearPortFeature: c_enable\n");
1567 goto clear;
1568 case USB_PORT_FEAT_C_SUSPEND:
1569 rh_dbg("ClearPortFeature: c_suspend\n");
1570 goto clear;
1571 case USB_PORT_FEAT_C_CONNECTION:
1572 rh_dbg("ClearPortFeature: c_connection\n");
1573 goto clear;
1574 case USB_PORT_FEAT_C_OVER_CURRENT:
1575 rh_dbg("ClearPortFeature: c_over_current\n");
1576 goto clear;
1577 case USB_PORT_FEAT_C_RESET:
1578 rh_dbg("ClearPortFeature: c_reset\n");
1579 goto clear;
1580 clear:
1581 rh.wPortChange[bPort] &= ~(1 << (wFeature - 16));
1582 break;
1583 default:
1584 rh_dbg("ClearPortFeature: unknown feature\n");
1585 return -1;
1586 }
1587 return 0;
1588 }
1589
1590
1591 #ifdef CONFIG_PM
1592 /* Handle a suspend request for the root hub (called from hcd_driver) */
1593 static int rh_suspend_request(struct usb_hcd *hcd)
1594 {
1595 return 0; /* no-op for now */
1596 }
1597
1598 /* Handle a resume request for the root hub (called from hcd_driver) */
1599 static int rh_resume_request(struct usb_hcd *hcd)
1600 {
1601 return 0; /* no-op for now */
1602 }
1603 #endif /* CONFIG_PM */
1604
1605
1606
1607 /* Wrapper function for workaround port disable registers in USB controller */
1608 static void rh_disable_port(unsigned int port) {
1609 volatile int timeout = 10000;
1610 volatile char* usb_portx_disable;
1611 switch(port) {
1612 case 0:
1613 usb_portx_disable = R_USB_PORT1_DISABLE;
1614 break;
1615 case 1:
1616 usb_portx_disable = R_USB_PORT2_DISABLE;
1617 break;
1618 default:
1619 /* Invalid port index */
1620 return;
1621 }
1622 /* Set disable flag in special register */
1623 *usb_portx_disable = IO_STATE(R_USB_PORT1_DISABLE, disable, yes);
1624 /* Wait until not enabled anymore */
1625 while((rh.wPortStatusPrev[port] &
1626 IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes)) &&
1627 (timeout-- > 0));
1628
1629 /* clear disable flag in special register */
1630 *usb_portx_disable = IO_STATE(R_USB_PORT1_DISABLE, disable, no);
1631 rh_info("Physical port %d disabled\n", port+1);
1632 }
1633
1634
1635 /******************************************************************/
1636 /* Transfer Controller (TC) functions */
1637 /******************************************************************/
1638
1639 /* FIXME: Should RX_BUF_SIZE be a config option, or maybe we should adjust it
1640 dynamically?
1641 To adjust it dynamically we would have to get an interrupt when we reach
1642 the end of the rx descriptor list, or when we get close to the end, and
1643 then allocate more descriptors. */
1644 #define NBR_OF_RX_DESC 512
1645 #define RX_DESC_BUF_SIZE 1024
1646 #define RX_BUF_SIZE (NBR_OF_RX_DESC * RX_DESC_BUF_SIZE)
1647
1648
1649 /* Local variables for Transfer Controller */
1650 /* --------------------------------------- */
1651
1652 /* This is a circular (double-linked) list of the active urbs for each epid.
1653 The head is never removed, and new urbs are linked onto the list as
1654 urb_entry_t elements. Don't reference urb_list directly; use the wrapper
1655 functions instead (which includes spin_locks) */
1656 static struct list_head urb_list[NBR_OF_EPIDS];
1657
1658 /* Read about the need and usage of this lock in submit_ctrl_urb. */
1659 /* Lock for URB lists for each EPID */
1660 static spinlock_t urb_list_lock;
1661
1662 /* Lock for EPID array register (R_USB_EPT_x) in Etrax */
1663 static spinlock_t etrax_epid_lock;
1664
1665 /* Lock for dma8 sub0 handling */
1666 static spinlock_t etrax_dma8_sub0_lock;
1667
1668 /* DMA IN cache bug. Align the DMA IN buffers to 32 bytes, i.e. a cache line.
1669 Since RX_DESC_BUF_SIZE is 1024 is a multiple of 32, all rx buffers will be
1670 cache aligned. */
1671 static volatile unsigned char RxBuf[RX_BUF_SIZE] __attribute__ ((aligned (32)));
1672 static volatile struct USB_IN_Desc RxDescList[NBR_OF_RX_DESC] __attribute__ ((aligned (4)));
1673
1674 /* Pointers into RxDescList. */
1675 static volatile struct USB_IN_Desc *myNextRxDesc;
1676 static volatile struct USB_IN_Desc *myLastRxDesc;
1677
1678 /* A zout transfer makes a memory access at the address of its buf pointer,
1679 which means that setting this buf pointer to 0 will cause an access to the
1680 flash. In addition to this, setting sw_len to 0 results in a 16/32 bytes
1681 (depending on DMA burst size) transfer.
1682 Instead, we set it to 1, and point it to this buffer. */
1683 static int zout_buffer[4] __attribute__ ((aligned (4)));
1684
1685 /* Cache for allocating new EP and SB descriptors. */
1686 static struct kmem_cache *usb_desc_cache;
1687
1688 /* Cache for the data allocated in the isoc descr top half. */
1689 static struct kmem_cache *isoc_compl_cache;
1690
1691 /* Cache for the data allocated when delayed finishing of URBs */
1692 static struct kmem_cache *later_data_cache;
1693
1694
1695 /* Counter to keep track of how many Isoc EP we have sat up. Used to enable
1696 and disable iso_eof interrupt. We only need these interrupts when we have
1697 Isoc data endpoints (consumes CPU cycles).
1698 FIXME: This could be more fine granular, so this interrupt is only enabled
1699 when we have a In Isoc URB not URB_ISO_ASAP flaged queued. */
1700 static int isoc_epid_counter;
1701
1702 /* Protecting wrapper functions for R_USB_EPT_x */
1703 /* -------------------------------------------- */
1704 static inline void etrax_epid_set(__u8 index, __u32 data) {
1705 unsigned long flags;
1706 spin_lock_irqsave(&etrax_epid_lock, flags);
1707 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
1708 nop();
1709 *R_USB_EPT_DATA = data;
1710 spin_unlock_irqrestore(&etrax_epid_lock, flags);
1711 }
1712
1713 static inline void etrax_epid_clear_error(__u8 index) {
1714 unsigned long flags;
1715 spin_lock_irqsave(&etrax_epid_lock, flags);
1716 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
1717 nop();
1718 *R_USB_EPT_DATA &=
1719 ~(IO_MASK(R_USB_EPT_DATA, error_count_in) |
1720 IO_MASK(R_USB_EPT_DATA, error_count_out) |
1721 IO_MASK(R_USB_EPT_DATA, error_code));
1722 spin_unlock_irqrestore(&etrax_epid_lock, flags);
1723 }
1724
1725 static inline void etrax_epid_set_toggle(__u8 index, __u8 dirout,
1726 __u8 toggle) {
1727 unsigned long flags;
1728 spin_lock_irqsave(&etrax_epid_lock, flags);
1729 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
1730 nop();
1731 if(dirout) {
1732 *R_USB_EPT_DATA &= ~IO_MASK(R_USB_EPT_DATA, t_out);
1733 *R_USB_EPT_DATA |= IO_FIELD(R_USB_EPT_DATA, t_out, toggle);
1734 } else {
1735 *R_USB_EPT_DATA &= ~IO_MASK(R_USB_EPT_DATA, t_in);
1736 *R_USB_EPT_DATA |= IO_FIELD(R_USB_EPT_DATA, t_in, toggle);
1737 }
1738 spin_unlock_irqrestore(&etrax_epid_lock, flags);
1739 }
1740
1741 static inline __u8 etrax_epid_get_toggle(__u8 index, __u8 dirout) {
1742 unsigned long flags;
1743 __u8 toggle;
1744 spin_lock_irqsave(&etrax_epid_lock, flags);
1745 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
1746 nop();
1747 if (dirout) {
1748 toggle = IO_EXTRACT(R_USB_EPT_DATA, t_out, *R_USB_EPT_DATA);
1749 } else {
1750 toggle = IO_EXTRACT(R_USB_EPT_DATA, t_in, *R_USB_EPT_DATA);
1751 }
1752 spin_unlock_irqrestore(&etrax_epid_lock, flags);
1753 return toggle;
1754 }
1755
1756
1757 static inline __u32 etrax_epid_get(__u8 index) {
1758 unsigned long flags;
1759 __u32 data;
1760 spin_lock_irqsave(&etrax_epid_lock, flags);
1761 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
1762 nop();
1763 data = *R_USB_EPT_DATA;
1764 spin_unlock_irqrestore(&etrax_epid_lock, flags);
1765 return data;
1766 }
1767
1768
1769
1770
1771 /* Main functions for Transfer Controller */
1772 /* -------------------------------------- */
1773
1774 /* Init structs, memories and lists used by Transfer Controller */
1775 int tc_init(struct usb_hcd *hcd) {
1776 int i;
1777 /* Clear software state info for all epids */
1778 memset(epid_state, 0, sizeof(struct etrax_epid) * NBR_OF_EPIDS);
1779
1780 /* Set Invalid and Dummy as being in use and disabled */
1781 epid_state[INVALID_EPID].inuse = 1;
1782 epid_state[DUMMY_EPID].inuse = 1;
1783 epid_state[INVALID_EPID].disabled = 1;
1784 epid_state[DUMMY_EPID].disabled = 1;
1785
1786 /* Clear counter for how many Isoc epids we have sat up */
1787 isoc_epid_counter = 0;
1788
1789 /* Initialize the urb list by initiating a head for each list.
1790 Also reset list hodling active URB for each epid */
1791 for (i = 0; i < NBR_OF_EPIDS; i++) {
1792 INIT_LIST_HEAD(&urb_list[i]);
1793 activeUrbList[i] = NULL;
1794 }
1795
1796 /* Init lock for URB lists */
1797 spin_lock_init(&urb_list_lock);
1798 /* Init lock for Etrax R_USB_EPT register */
1799 spin_lock_init(&etrax_epid_lock);
1800 /* Init lock for Etrax dma8 sub0 handling */
1801 spin_lock_init(&etrax_dma8_sub0_lock);
1802
1803 /* We use kmem_cache_* to make sure that all DMA desc. are dword aligned */
1804
1805 /* Note that we specify sizeof(struct USB_EP_Desc) as the size, but also
1806 allocate SB descriptors from this cache. This is ok since
1807 sizeof(struct USB_EP_Desc) == sizeof(struct USB_SB_Desc). */
1808 usb_desc_cache = kmem_cache_create("usb_desc_cache",
1809 sizeof(struct USB_EP_Desc), 0,
1810 SLAB_HWCACHE_ALIGN, 0);
1811 if(usb_desc_cache == NULL) {
1812 return -ENOMEM;
1813 }
1814
1815 /* Create slab cache for speedy allocation of memory for isoc bottom-half
1816 interrupt handling */
1817 isoc_compl_cache =
1818 kmem_cache_create("isoc_compl_cache",
1819 sizeof(struct crisv10_isoc_complete_data),
1820 0, SLAB_HWCACHE_ALIGN, 0);
1821 if(isoc_compl_cache == NULL) {
1822 return -ENOMEM;
1823 }
1824
1825 /* Create slab cache for speedy allocation of memory for later URB finish
1826 struct */
1827 later_data_cache =
1828 kmem_cache_create("later_data_cache",
1829 sizeof(struct urb_later_data),
1830 0, SLAB_HWCACHE_ALIGN, 0);
1831 if(later_data_cache == NULL) {
1832 return -ENOMEM;
1833 }
1834
1835
1836 /* Initiate the bulk start timer. */
1837 init_timer(&bulk_start_timer);
1838 bulk_start_timer.expires = jiffies + BULK_START_TIMER_INTERVAL;
1839 bulk_start_timer.function = tc_bulk_start_timer_func;
1840 add_timer(&bulk_start_timer);
1841
1842
1843 /* Initiate the bulk eot timer. */
1844 init_timer(&bulk_eot_timer);
1845 bulk_eot_timer.expires = jiffies + BULK_EOT_TIMER_INTERVAL;
1846 bulk_eot_timer.function = tc_bulk_eot_timer_func;
1847 bulk_eot_timer.data = (unsigned long)hcd;
1848 add_timer(&bulk_eot_timer);
1849
1850 return 0;
1851 }
1852
1853 /* Uninitialize all resources used by Transfer Controller */
1854 void tc_destroy(void) {
1855
1856 /* Destroy all slab cache */
1857 kmem_cache_destroy(usb_desc_cache);
1858 kmem_cache_destroy(isoc_compl_cache);
1859 kmem_cache_destroy(later_data_cache);
1860
1861 /* Remove timers */
1862 del_timer(&bulk_start_timer);
1863 del_timer(&bulk_eot_timer);
1864 }
1865
1866 static void restart_dma8_sub0(void) {
1867 unsigned long flags;
1868 spin_lock_irqsave(&etrax_dma8_sub0_lock, flags);
1869 /* Verify that the dma is not running */
1870 if ((*R_DMA_CH8_SUB0_CMD & IO_MASK(R_DMA_CH8_SUB0_CMD, cmd)) == 0) {
1871 struct USB_EP_Desc *ep = (struct USB_EP_Desc *)phys_to_virt(*R_DMA_CH8_SUB0_EP);
1872 while (DUMMY_EPID == IO_EXTRACT(USB_EP_command, epid, ep->command)) {
1873 ep = (struct USB_EP_Desc *)phys_to_virt(ep->next);
1874 }
1875 /* Advance the DMA to the next EP descriptor that is not a DUMMY_EPID. */
1876 *R_DMA_CH8_SUB0_EP = virt_to_phys(ep);
1877 /* Restart the DMA */
1878 *R_DMA_CH8_SUB0_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start);
1879 }
1880 spin_unlock_irqrestore(&etrax_dma8_sub0_lock, flags);
1881 }
1882
1883 /* queue an URB with the transfer controller (called from hcd_driver) */
1884 static int tc_urb_enqueue(struct usb_hcd *hcd,
1885 struct urb *urb,
1886 gfp_t mem_flags) {
1887 int epid;
1888 int retval;
1889 int bustime = 0;
1890 int maxpacket;
1891 unsigned long flags;
1892 struct crisv10_urb_priv *urb_priv;
1893 struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(hcd);
1894 DBFENTER;
1895
1896 if(!(crisv10_hcd->running)) {
1897 /* The USB Controller is not running, probably because no device is
1898 attached. No idea to enqueue URBs then */
1899 tc_warn("Rejected enqueueing of URB:0x%x because no dev attached\n",
1900 (unsigned int)urb);
1901 return -ENOENT;
1902 }
1903
1904 maxpacket = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
1905 /* Special case check for In Isoc transfers. Specification states that each
1906 In Isoc transfer consists of one packet and therefore it should fit into
1907 the transfer-buffer of an URB.
1908 We do the check here to be sure (an invalid scenario can be produced with
1909 parameters to the usbtest suite) */
1910 if(usb_pipeisoc(urb->pipe) && usb_pipein(urb->pipe) &&
1911 (urb->transfer_buffer_length < maxpacket)) {
1912 tc_err("Submit In Isoc URB with buffer length:%d to pipe with maxpacketlen: %d\n", urb->transfer_buffer_length, maxpacket);
1913 return -EMSGSIZE;
1914 }
1915
1916 /* Check if there is a epid for URBs destination, if not this function
1917 set up one. */
1918 epid = tc_setup_epid(urb->ep, urb, mem_flags);
1919 if (epid < 0) {
1920 tc_err("Failed setup epid:%d for URB:0x%x\n", epid, (unsigned int)urb);
1921 DBFEXIT;
1922 return -ENOMEM;
1923 }
1924
1925 if(urb == activeUrbList[epid]) {
1926 tc_err("Resubmition of allready active URB:0x%x\n", (unsigned int)urb);
1927 return -ENXIO;
1928 }
1929
1930 if(urb_list_entry(urb, epid)) {
1931 tc_err("Resubmition of allready queued URB:0x%x\n", (unsigned int)urb);
1932 return -ENXIO;
1933 }
1934
1935 /* If we actively have flaged endpoint as disabled then refuse submition */
1936 if(epid_state[epid].disabled) {
1937 return -ENOENT;
1938 }
1939
1940 /* Allocate and init HC-private data for URB */
1941 if(urb_priv_create(hcd, urb, epid, mem_flags) != 0) {
1942 DBFEXIT;
1943 return -ENOMEM;
1944 }
1945 urb_priv = urb->hcpriv;
1946
1947 /* Check if there is enough bandwidth for periodic transfer */
1948 if(usb_pipeint(urb->pipe) || usb_pipeisoc(urb->pipe)) {
1949 /* only check (and later claim) if not already claimed */
1950 if (urb_priv->bandwidth == 0) {
1951 bustime = crisv10_usb_check_bandwidth(urb->dev, urb);
1952 if (bustime < 0) {
1953 tc_err("Not enough periodic bandwidth\n");
1954 urb_priv_free(hcd, urb);
1955 DBFEXIT;
1956 return -ENOSPC;
1957 }
1958 }
1959 }
1960
1961 tc_dbg("Enqueue URB:0x%x[%d] epid:%d (%s) bufflen:%d\n",
1962 (unsigned int)urb, urb_priv->urb_num, epid,
1963 pipe_to_str(urb->pipe), urb->transfer_buffer_length);
1964
1965 /* Create and link SBs required for this URB */
1966 retval = create_sb_for_urb(urb, mem_flags);
1967 if(retval != 0) {
1968 tc_err("Failed to create SBs for URB:0x%x[%d]\n", (unsigned int)urb,
1969 urb_priv->urb_num);
1970 urb_priv_free(hcd, urb);
1971 DBFEXIT;
1972 return retval;
1973 }
1974
1975 /* Init intr EP pool if this URB is a INTR transfer. This pool is later
1976 used when inserting EPs in the TxIntrEPList. We do the alloc here
1977 so we can't run out of memory later */
1978 if(usb_pipeint(urb->pipe)) {
1979 retval = init_intr_urb(urb, mem_flags);
1980 if(retval != 0) {
1981 tc_warn("Failed to init Intr URB\n");
1982 urb_priv_free(hcd, urb);
1983 DBFEXIT;
1984 return retval;
1985 }
1986 }
1987
1988 /* Disable other access when inserting USB */
1989 local_irq_save(flags);
1990
1991 /* Claim bandwidth, if needed */
1992 if(bustime) {
1993 crisv10_usb_claim_bandwidth(urb->dev,
1994 urb,
1995 bustime,
1996 (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS));
1997 }
1998
1999 /* Add URB to EP queue */
2000 urb_list_add(urb, epid, mem_flags);
2001
2002 if(usb_pipeisoc(urb->pipe)) {
2003 /* Special processing of Isoc URBs. */
2004 tc_dma_process_isoc_urb(urb);
2005 } else {
2006 /* Process EP queue for rest of the URB types (Bulk, Ctrl, Intr) */
2007 tc_dma_process_queue(epid);
2008 }
2009
2010 local_irq_restore(flags);
2011
2012 DBFEXIT;
2013 return 0;
2014 }
2015
2016 /* remove an URB from the transfer controller queues (called from hcd_driver)*/
2017 static int tc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) {
2018 struct crisv10_urb_priv *urb_priv;
2019 unsigned long flags;
2020 int epid;
2021
2022 DBFENTER;
2023 /* Disable interrupts here since a descriptor interrupt for the isoc epid
2024 will modify the sb list. This could possibly be done more granular, but
2025 urb_dequeue should not be used frequently anyway.
2026 */
2027 local_irq_save(flags);
2028
2029 urb->status = status;
2030 urb_priv = urb->hcpriv;
2031
2032 if (!urb_priv) {
2033 /* This happens if a device driver calls unlink on an urb that
2034 was never submitted (lazy driver) or if the urb was completed
2035 while dequeue was being called. */
2036 tc_warn("Dequeing of not enqueued URB:0x%x\n", (unsigned int)urb);
2037 local_irq_restore(flags);
2038 return 0;
2039 }
2040 epid = urb_priv->epid;
2041
2042 tc_warn("Dequeing %s URB:0x%x[%d] (%s %s epid:%d) status:%d %s\n",
2043 (urb == activeUrbList[epid]) ? "active" : "queued",
2044 (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
2045 str_type(urb->pipe), epid, urb->status,
2046 (urb_priv->later_data) ? "later-sched" : "");
2047
2048 /* For Bulk, Ctrl and Intr are only one URB active at a time. So any URB
2049 that isn't active can be dequeued by just removing it from the queue */
2050 if(usb_pipebulk(urb->pipe) || usb_pipecontrol(urb->pipe) ||
2051 usb_pipeint(urb->pipe)) {
2052
2053 /* Check if URB haven't gone further than the queue */
2054 if(urb != activeUrbList[epid]) {
2055 ASSERT(urb_priv->later_data == NULL);
2056 tc_warn("Dequeing URB:0x%x[%d] (%s %s epid:%d) from queue"
2057 " (not active)\n", (unsigned int)urb, urb_priv->urb_num,
2058 str_dir(urb->pipe), str_type(urb->pipe), epid);
2059
2060 /* Finish the URB with error status from USB core */
2061 tc_finish_urb(hcd, urb, urb->status);
2062 local_irq_restore(flags);
2063 return 0;
2064 }
2065 }
2066
2067 /* Set URB status to Unlink for handling when interrupt comes. */
2068 urb_priv->urb_state = UNLINK;
2069
2070 /* Differentiate dequeing of Bulk and Ctrl from Isoc and Intr */
2071 switch(usb_pipetype(urb->pipe)) {
2072 case PIPE_BULK:
2073 /* Check if EP still is enabled */
2074 if (TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
2075 /* The EP was enabled, disable it. */
2076 TxBulkEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
2077 }
2078 /* Kicking dummy list out of the party. */
2079 TxBulkEPList[epid].next = virt_to_phys(&TxBulkEPList[(epid + 1) % NBR_OF_EPIDS]);
2080 break;
2081 case PIPE_CONTROL:
2082 /* Check if EP still is enabled */
2083 if (TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
2084 /* The EP was enabled, disable it. */
2085 TxCtrlEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
2086 }
2087 break;
2088 case PIPE_ISOCHRONOUS:
2089 /* Disabling, busy-wait and unlinking of Isoc SBs will be done in
2090 finish_isoc_urb(). Because there might the case when URB is dequeued
2091 but there are other valid URBs waiting */
2092
2093 /* Check if In Isoc EP still is enabled */
2094 if (TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
2095 /* The EP was enabled, disable it. */
2096 TxIsocEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
2097 }
2098 break;
2099 case PIPE_INTERRUPT:
2100 /* Special care is taken for interrupt URBs. EPs are unlinked in
2101 tc_finish_urb */
2102 break;
2103 default:
2104 break;
2105 }
2106
2107 /* Asynchronous unlink, finish the URB later from scheduled or other
2108 event (data finished, error) */
2109 tc_finish_urb_later(hcd, urb, urb->status);
2110
2111 local_irq_restore(flags);
2112 DBFEXIT;
2113 return 0;
2114 }
2115
2116
2117 static void tc_sync_finish_epid(struct usb_hcd *hcd, int epid) {
2118 volatile int timeout = 10000;
2119 struct urb* urb;
2120 struct crisv10_urb_priv* urb_priv;
2121 unsigned long flags;
2122
2123 volatile struct USB_EP_Desc *first_ep; /* First EP in the list. */
2124 volatile struct USB_EP_Desc *curr_ep; /* Current EP, the iterator. */
2125 volatile struct USB_EP_Desc *next_ep; /* The EP after current. */
2126
2127 int type = epid_state[epid].type;
2128
2129 /* Setting this flag will cause enqueue() to return -ENOENT for new
2130 submitions on this endpoint and finish_urb() wont process queue further */
2131 epid_state[epid].disabled = 1;
2132
2133 switch(type) {
2134 case PIPE_BULK:
2135 /* Check if EP still is enabled */
2136 if (TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
2137 /* The EP was enabled, disable it. */
2138 TxBulkEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
2139 tc_warn("sync_finish: Disabling EP for epid:%d\n", epid);
2140
2141 /* Do busy-wait until DMA not using this EP descriptor anymore */
2142 while((*R_DMA_CH8_SUB0_EP ==
2143 virt_to_phys(&TxBulkEPList[epid])) &&
2144 (timeout-- > 0));
2145
2146 }
2147 break;
2148
2149 case PIPE_CONTROL:
2150 /* Check if EP still is enabled */
2151 if (TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
2152 /* The EP was enabled, disable it. */
2153 TxCtrlEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
2154 tc_warn("sync_finish: Disabling EP for epid:%d\n", epid);
2155
2156 /* Do busy-wait until DMA not using this EP descriptor anymore */
2157 while((*R_DMA_CH8_SUB1_EP ==
2158 virt_to_phys(&TxCtrlEPList[epid])) &&
2159 (timeout-- > 0));
2160 }
2161 break;
2162
2163 case PIPE_INTERRUPT:
2164 local_irq_save(flags);
2165 /* Disable all Intr EPs belonging to epid */
2166 first_ep = &TxIntrEPList[0];
2167 curr_ep = first_ep;
2168 do {
2169 next_ep = (struct USB_EP_Desc *)phys_to_virt(curr_ep->next);
2170 if (IO_EXTRACT(USB_EP_command, epid, next_ep->command) == epid) {
2171 /* Disable EP */
2172 next_ep->command &= ~IO_MASK(USB_EP_command, enable);
2173 }
2174 curr_ep = phys_to_virt(curr_ep->next);
2175 } while (curr_ep != first_ep);
2176
2177 local_irq_restore(flags);
2178 break;
2179
2180 case PIPE_ISOCHRONOUS:
2181 /* Check if EP still is enabled */
2182 if (TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
2183 tc_warn("sync_finish: Disabling Isoc EP for epid:%d\n", epid);
2184 /* The EP was enabled, disable it. */
2185 TxIsocEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
2186
2187 while((*R_DMA_CH8_SUB3_EP == virt_to_phys(&TxIsocEPList[epid])) &&
2188 (timeout-- > 0));
2189 }
2190 break;
2191 }
2192
2193 local_irq_save(flags);
2194
2195 /* Finish if there is active URB for this endpoint */
2196 if(activeUrbList[epid] != NULL) {
2197 urb = activeUrbList[epid];
2198 urb_priv = urb->hcpriv;
2199 ASSERT(urb_priv);
2200 tc_warn("Sync finish %s URB:0x%x[%d] (%s %s epid:%d) status:%d %s\n",
2201 (urb == activeUrbList[epid]) ? "active" : "queued",
2202 (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
2203 str_type(urb->pipe), epid, urb->status,
2204 (urb_priv->later_data) ? "later-sched" : "");
2205
2206 tc_finish_urb(hcd, activeUrbList[epid], -ENOENT);
2207 ASSERT(activeUrbList[epid] == NULL);
2208 }
2209
2210 /* Finish any queued URBs for this endpoint. There won't be any resubmitions
2211 because epid_disabled causes enqueue() to fail for this endpoint */
2212 while((urb = urb_list_first(epid)) != NULL) {
2213 urb_priv = urb->hcpriv;
2214 ASSERT(urb_priv);
2215
2216 tc_warn("Sync finish %s URB:0x%x[%d] (%s %s epid:%d) status:%d %s\n",
2217 (urb == activeUrbList[epid]) ? "active" : "queued",
2218 (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
2219 str_type(urb->pipe), epid, urb->status,
2220 (urb_priv->later_data) ? "later-sched" : "");
2221
2222 tc_finish_urb(hcd, urb, -ENOENT);
2223 }
2224 epid_state[epid].disabled = 0;
2225 local_irq_restore(flags);
2226 }
2227
2228 /* free resources associated with an endpoint (called from hcd_driver) */
2229 static void tc_endpoint_disable(struct usb_hcd *hcd,
2230 struct usb_host_endpoint *ep) {
2231 DBFENTER;
2232 /* Only free epid if it has been allocated. We get two endpoint_disable
2233 requests for ctrl endpoints so ignore the second one */
2234 if(ep->hcpriv != NULL) {
2235 struct crisv10_ep_priv *ep_priv = ep->hcpriv;
2236 int epid = ep_priv->epid;
2237 tc_warn("endpoint_disable ep:0x%x ep-priv:0x%x (%s) (epid:%d freed)\n",
2238 (unsigned int)ep, (unsigned int)ep->hcpriv,
2239 endpoint_to_str(&(ep->desc)), epid);
2240
2241 tc_sync_finish_epid(hcd, epid);
2242
2243 ASSERT(activeUrbList[epid] == NULL);
2244 ASSERT(list_empty(&urb_list[epid]));
2245
2246 tc_free_epid(ep);
2247 } else {
2248 tc_dbg("endpoint_disable ep:0x%x ep-priv:0x%x (%s)\n", (unsigned int)ep,
2249 (unsigned int)ep->hcpriv, endpoint_to_str(&(ep->desc)));
2250 }
2251 DBFEXIT;
2252 }
2253
2254 static void tc_finish_urb_later_proc(struct work_struct* work) {
2255 unsigned long flags;
2256 struct urb_later_data* uld;
2257
2258 local_irq_save(flags);
2259 uld = container_of(work, struct urb_later_data, dws.work);
2260 if(uld->urb == NULL) {
2261 late_dbg("Later finish of URB = NULL (allready finished)\n");
2262 } else {
2263 struct crisv10_urb_priv* urb_priv = uld->urb->hcpriv;
2264 ASSERT(urb_priv);
2265 if(urb_priv->urb_num == uld->urb_num) {
2266 late_dbg("Later finish of URB:0x%x[%d]\n", (unsigned int)(uld->urb),
2267 urb_priv->urb_num);
2268 if(uld->status != uld->urb->status) {
2269 errno_dbg("Later-finish URB with status:%d, later-status:%d\n",
2270 uld->urb->status, uld->status);
2271 }
2272 if(uld != urb_priv->later_data) {
2273 panic("Scheduled uld not same as URBs uld\n");
2274 }
2275 tc_finish_urb(uld->hcd, uld->urb, uld->status);
2276 } else {
2277 late_warn("Ignoring later finish of URB:0x%x[%d]"
2278 ", urb_num doesn't match current URB:0x%x[%d]",
2279 (unsigned int)(uld->urb), uld->urb_num,
2280 (unsigned int)(uld->urb), urb_priv->urb_num);
2281 }
2282 }
2283 local_irq_restore(flags);
2284 kmem_cache_free(later_data_cache, uld);
2285 }
2286
2287 static void tc_finish_urb_later(struct usb_hcd *hcd, struct urb *urb,
2288 int status) {
2289 struct crisv10_urb_priv *urb_priv = urb->hcpriv;
2290 struct urb_later_data* uld;
2291
2292 ASSERT(urb_priv);
2293
2294 if(urb_priv->later_data != NULL) {
2295 /* Later-finish allready scheduled for this URB, just update status to
2296 return when finishing later */
2297 errno_dbg("Later-finish schedule change URB status:%d with new"
2298 " status:%d\n", urb_priv->later_data->status, status);
2299
2300 urb_priv->later_data->status = status;
2301 return;
2302 }
2303
2304 uld = kmem_cache_alloc(later_data_cache, GFP_ATOMIC);
2305 ASSERT(uld);
2306
2307 uld->hcd = hcd;
2308 uld->urb = urb;
2309 uld->urb_num = urb_priv->urb_num;
2310 uld->status = status;
2311
2312 INIT_DELAYED_WORK(&uld->dws, tc_finish_urb_later_proc);
2313 urb_priv->later_data = uld;
2314
2315 /* Schedule the finishing of the URB to happen later */
2316 schedule_delayed_work(&uld->dws, LATER_TIMER_DELAY);
2317 }
2318
2319 static void tc_finish_isoc_urb(struct usb_hcd *hcd, struct urb *urb,
2320 int status);
2321
2322 static void tc_finish_urb(struct usb_hcd *hcd, struct urb *urb, int status) {
2323 struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(hcd);
2324 struct crisv10_urb_priv *urb_priv = urb->hcpriv;
2325 int epid;
2326 char toggle;
2327 int urb_num;
2328
2329 DBFENTER;
2330 ASSERT(urb_priv != NULL);
2331 epid = urb_priv->epid;
2332 urb_num = urb_priv->urb_num;
2333
2334 if(urb != activeUrbList[epid]) {
2335 if(urb_list_entry(urb, epid)) {
2336 /* Remove this URB from the list. Only happens when URB are finished
2337 before having been processed (dequeing) */
2338 urb_list_del(urb, epid);
2339 } else {
2340 tc_warn("Finishing of URB:0x%x[%d] neither active or in queue for"
2341 " epid:%d\n", (unsigned int)urb, urb_num, epid);
2342 }
2343 }
2344
2345 /* Cancel any pending later-finish of this URB */
2346 if(urb_priv->later_data) {
2347 urb_priv->later_data->urb = NULL;
2348 }
2349
2350 /* For an IN pipe, we always set the actual length, regardless of whether
2351 there was an error or not (which means the device driver can use the data
2352 if it wants to). */
2353 if(usb_pipein(urb->pipe)) {
2354 urb->actual_length = urb_priv->rx_offset;
2355 } else {
2356 /* Set actual_length for OUT urbs also; the USB mass storage driver seems
2357 to want that. */
2358 if (status == 0 && urb->status == -EINPROGRESS) {
2359 urb->actual_length = urb->transfer_buffer_length;
2360 } else {
2361 /* We wouldn't know of any partial writes if there was an error. */
2362 urb->actual_length = 0;
2363 }
2364 }
2365
2366
2367 /* URB status mangling */
2368 if(urb->status == -EINPROGRESS) {
2369 /* The USB core hasn't changed the status, let's set our finish status */
2370 urb->status = status;
2371
2372 if ((status == 0) && (urb->transfer_flags & URB_SHORT_NOT_OK) &&
2373 usb_pipein(urb->pipe) &&
2374 (urb->actual_length != urb->transfer_buffer_length)) {
2375 /* URB_SHORT_NOT_OK means that short reads (shorter than the endpoint's
2376 max length) is to be treated as an error. */
2377 errno_dbg("Finishing URB:0x%x[%d] with SHORT_NOT_OK flag and short"
2378 " data:%d\n", (unsigned int)urb, urb_num,
2379 urb->actual_length);
2380 urb->status = -EREMOTEIO;
2381 }
2382
2383 if(urb_priv->urb_state == UNLINK) {
2384 /* URB has been requested to be unlinked asynchronously */
2385 urb->status = -ECONNRESET;
2386 errno_dbg("Fixing unlink status of URB:0x%x[%d] to:%d\n",
2387 (unsigned int)urb, urb_num, urb->status);
2388 }
2389 } else {
2390 /* The USB Core wants to signal some error via the URB, pass it through */
2391 }
2392
2393 /* use completely different finish function for Isoc URBs */
2394 if(usb_pipeisoc(urb->pipe)) {
2395 tc_finish_isoc_urb(hcd, urb, status);
2396 return;
2397 }
2398
2399 /* Do special unlinking of EPs for Intr traffic */
2400 if(usb_pipeint(urb->pipe)) {
2401 tc_dma_unlink_intr_urb(urb);
2402 }
2403
2404 /* Release allocated bandwidth for periodic transfers */
2405 if(usb_pipeint(urb->pipe) || usb_pipeisoc(urb->pipe))
2406 crisv10_usb_release_bandwidth(hcd,
2407 usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS,
2408 urb_priv->bandwidth);
2409
2410 /* This URB is active on EP */
2411 if(urb == activeUrbList[epid]) {
2412 /* We need to fiddle with the toggle bits because the hardware doesn't do
2413 it for us. */
2414 toggle = etrax_epid_get_toggle(epid, usb_pipeout(urb->pipe));
2415 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
2416 usb_pipeout(urb->pipe), toggle);
2417
2418 /* Checks for Ctrl and Bulk EPs */
2419 switch(usb_pipetype(urb->pipe)) {
2420 case PIPE_BULK:
2421 /* Check so Bulk EP realy is disabled before finishing active URB */
2422 ASSERT((TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) ==
2423 IO_STATE(USB_EP_command, enable, no));
2424 /* Disable sub-pointer for EP to avoid next tx_interrupt() to
2425 process Bulk EP. */
2426 TxBulkEPList[epid].sub = 0;
2427 /* No need to wait for the DMA before changing the next pointer.
2428 The modulo NBR_OF_EPIDS isn't actually necessary, since we will never use
2429 the last one (INVALID_EPID) for actual traffic. */
2430 TxBulkEPList[epid].next =
2431 virt_to_phys(&TxBulkEPList[(epid + 1) % NBR_OF_EPIDS]);
2432 break;
2433 case PIPE_CONTROL:
2434 /* Check so Ctrl EP realy is disabled before finishing active URB */
2435 ASSERT((TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) ==
2436 IO_STATE(USB_EP_command, enable, no));
2437 /* Disable sub-pointer for EP to avoid next tx_interrupt() to
2438 process Ctrl EP. */
2439 TxCtrlEPList[epid].sub = 0;
2440 break;
2441 }
2442 }
2443
2444 /* Free HC-private URB data*/
2445 urb_priv_free(hcd, urb);
2446
2447 if(urb->status) {
2448 errno_dbg("finish_urb (URB:0x%x[%d] %s %s) (data:%d) status:%d\n",
2449 (unsigned int)urb, urb_num, str_dir(urb->pipe),
2450 str_type(urb->pipe), urb->actual_length, urb->status);
2451 } else {
2452 tc_dbg("finish_urb (URB:0x%x[%d] %s %s) (data:%d) status:%d\n",
2453 (unsigned int)urb, urb_num, str_dir(urb->pipe),
2454 str_type(urb->pipe), urb->actual_length, urb->status);
2455 }
2456
2457 /* If we just finished an active URB, clear active pointer. */
2458 if (urb == activeUrbList[epid]) {
2459 /* Make URB not active on EP anymore */
2460 activeUrbList[epid] = NULL;
2461
2462 if(urb->status == 0) {
2463 /* URB finished sucessfully, process queue to see if there are any more
2464 URBs waiting before we call completion function.*/
2465 if(crisv10_hcd->running) {
2466 /* Only process queue if USB controller is running */
2467 tc_dma_process_queue(epid);
2468 } else {
2469 tc_warn("No processing of queue for epid:%d, USB Controller not"
2470 " running\n", epid);
2471 }
2472 }
2473 }
2474
2475 /* Hand the URB from HCD to its USB device driver, using its completion
2476 functions */
2477 usb_hcd_giveback_urb (hcd, urb, status);
2478
2479 /* Check the queue once more if the URB returned with error, because we
2480 didn't do it before the completion function because the specification
2481 states that the queue should not restart until all it's unlinked
2482 URBs have been fully retired, with the completion functions run */
2483 if(crisv10_hcd->running) {
2484 /* Only process queue if USB controller is running */
2485 tc_dma_process_queue(epid);
2486 } else {
2487 tc_warn("No processing of queue for epid:%d, USB Controller not running\n",
2488 epid);
2489 }
2490
2491 DBFEXIT;
2492 }
2493
2494 static void tc_finish_isoc_urb(struct usb_hcd *hcd, struct urb *urb,
2495 int status) {
2496 struct crisv10_urb_priv *urb_priv = urb->hcpriv;
2497 int epid, i;
2498 volatile int timeout = 10000;
2499 int bandwidth = 0;
2500
2501 ASSERT(urb_priv);
2502 epid = urb_priv->epid;
2503
2504 ASSERT(usb_pipeisoc(urb->pipe));
2505
2506 /* Set that all isoc packets have status and length set before
2507 completing the urb. */
2508 for (i = urb_priv->isoc_packet_counter; i < urb->number_of_packets; i++){
2509 urb->iso_frame_desc[i].actual_length = 0;
2510 urb->iso_frame_desc[i].status = -EPROTO;
2511 }
2512
2513 /* Check if the URB is currently active (done or error) */
2514 if(urb == activeUrbList[epid]) {
2515 /* Check if there are another In Isoc URB queued for this epid */
2516 if (!list_empty(&urb_list[epid])&& !epid_state[epid].disabled) {
2517 /* Move it from queue to active and mark it started so Isoc transfers
2518 won't be interrupted.
2519 All Isoc URBs data transfers are already added to DMA lists so we
2520 don't have to insert anything in DMA lists here. */
2521 activeUrbList[epid] = urb_list_first(epid);
2522 ((struct crisv10_urb_priv *)(activeUrbList[epid]->hcpriv))->urb_state =
2523 STARTED;
2524 urb_list_del(activeUrbList[epid], epid);
2525
2526 if(urb->status) {
2527 errno_dbg("finish_isoc_urb (URB:0x%x[%d] %s %s) (%d of %d packets)"
2528 " status:%d, new waiting URB:0x%x[%d]\n",
2529 (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
2530 str_type(urb->pipe), urb_priv->isoc_packet_counter,
2531 urb->number_of_packets, urb->status,
2532 (unsigned int)activeUrbList[epid],
2533 ((struct crisv10_urb_priv *)(activeUrbList[epid]->hcpriv))->urb_num);
2534 }
2535
2536 } else { /* No other URB queued for this epid */
2537 if(urb->status) {
2538 errno_dbg("finish_isoc_urb (URB:0x%x[%d] %s %s) (%d of %d packets)"
2539 " status:%d, no new URB waiting\n",
2540 (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
2541 str_type(urb->pipe), urb_priv->isoc_packet_counter,
2542 urb->number_of_packets, urb->status);
2543 }
2544
2545 /* Check if EP is still enabled, then shut it down. */
2546 if (TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
2547 isoc_dbg("Isoc EP enabled for epid:%d, disabling it\n", epid);
2548
2549 /* Should only occur for In Isoc EPs where SB isn't consumed. */
2550 ASSERT(usb_pipein(urb->pipe));
2551
2552 /* Disable it and wait for it to stop */
2553 TxIsocEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
2554
2555 /* Ah, the luxury of busy-wait. */
2556 while((*R_DMA_CH8_SUB3_EP == virt_to_phys(&TxIsocEPList[epid])) &&
2557 (timeout-- > 0));
2558 }
2559
2560 /* Unlink SB to say that epid is finished. */
2561 TxIsocEPList[epid].sub = 0;
2562 TxIsocEPList[epid].hw_len = 0;
2563
2564 /* No URB active for EP anymore */
2565 activeUrbList[epid] = NULL;
2566 }
2567 } else { /* Finishing of not active URB (queued up with SBs thought) */
2568 isoc_warn("finish_isoc_urb (URB:0x%x %s) (%d of %d packets) status:%d,"
2569 " SB queued but not active\n",
2570 (unsigned int)urb, str_dir(urb->pipe),
2571 urb_priv->isoc_packet_counter, urb->number_of_packets,
2572 urb->status);
2573 if(usb_pipeout(urb->pipe)) {
2574 /* Finishing of not yet active Out Isoc URB needs unlinking of SBs. */
2575 struct USB_SB_Desc *iter_sb, *prev_sb, *next_sb;
2576
2577 iter_sb = TxIsocEPList[epid].sub ?
2578 phys_to_virt(TxIsocEPList[epid].sub) : 0;
2579 prev_sb = 0;
2580
2581 /* SB that is linked before this URBs first SB */
2582 while (iter_sb && (iter_sb != urb_priv->first_sb)) {
2583 prev_sb = iter_sb;
2584 iter_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0;
2585 }
2586
2587 if (iter_sb == 0) {
2588 /* Unlink of the URB currently being transmitted. */
2589 prev_sb = 0;
2590 iter_sb = TxIsocEPList[epid].sub ? phys_to_virt(TxIsocEPList[epid].sub) : 0;
2591 }
2592
2593 while (iter_sb && (iter_sb != urb_priv->last_sb)) {
2594 iter_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0;
2595 }
2596
2597 if (iter_sb) {
2598 next_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0;
2599 } else {
2600 /* This should only happen if the DMA has completed
2601 processing the SB list for this EP while interrupts
2602 are disabled. */
2603 isoc_dbg("Isoc urb not found, already sent?\n");
2604 next_sb = 0;
2605 }
2606 if (prev_sb) {
2607 prev_sb->next = next_sb ? virt_to_phys(next_sb) : 0;
2608 } else {
2609 TxIsocEPList[epid].sub = next_sb ? virt_to_phys(next_sb) : 0;
2610 }
2611 }
2612 }
2613
2614 /* Free HC-private URB data*/
2615 bandwidth = urb_priv->bandwidth;
2616 urb_priv_free(hcd, urb);
2617
2618 crisv10_usb_release_bandwidth(hcd, usb_pipeisoc(urb->pipe), bandwidth);
2619
2620 /* Hand the URB from HCD to its USB device driver, using its completion
2621 functions */
2622 usb_hcd_giveback_urb (hcd, urb, status);
2623 }
2624
2625 static __u32 urb_num = 0;
2626
2627 /* allocate and initialize URB private data */
2628 static int urb_priv_create(struct usb_hcd *hcd, struct urb *urb, int epid,
2629 int mem_flags) {
2630 struct crisv10_urb_priv *urb_priv;
2631
2632 urb_priv = kmalloc(sizeof *urb_priv, mem_flags);
2633 if (!urb_priv)
2634 return -ENOMEM;
2635 memset(urb_priv, 0, sizeof *urb_priv);
2636
2637 urb_priv->epid = epid;
2638 urb_priv->urb_state = NOT_STARTED;
2639
2640 urb->hcpriv = urb_priv;
2641 /* Assign URB a sequence number, and increment counter */
2642 urb_priv->urb_num = urb_num;
2643 urb_num++;
2644 urb_priv->bandwidth = 0;
2645 return 0;
2646 }
2647
2648 /* free URB private data */
2649 static void urb_priv_free(struct usb_hcd *hcd, struct urb *urb) {
2650 int i;
2651 struct crisv10_urb_priv *urb_priv = urb->hcpriv;
2652 ASSERT(urb_priv != 0);
2653
2654 /* Check it has any SBs linked that needs to be freed*/
2655 if(urb_priv->first_sb != NULL) {
2656 struct USB_SB_Desc *next_sb, *first_sb, *last_sb;
2657 int i = 0;
2658 first_sb = urb_priv->first_sb;
2659 last_sb = urb_priv->last_sb;
2660 ASSERT(last_sb);
2661 while(first_sb != last_sb) {
2662 next_sb = (struct USB_SB_Desc *)phys_to_virt(first_sb->next);
2663 kmem_cache_free(usb_desc_cache, first_sb);
2664 first_sb = next_sb;
2665 i++;
2666 }
2667 kmem_cache_free(usb_desc_cache, last_sb);
2668 i++;
2669 }
2670
2671 /* Check if it has any EPs in its Intr pool that also needs to be freed */
2672 if(urb_priv->intr_ep_pool_length > 0) {
2673 for(i = 0; i < urb_priv->intr_ep_pool_length; i++) {
2674 kfree(urb_priv->intr_ep_pool[i]);
2675 }
2676 /*
2677 tc_dbg("Freed %d EPs from URB:0x%x EP pool\n",
2678 urb_priv->intr_ep_pool_length, (unsigned int)urb);
2679 */
2680 }
2681
2682 kfree(urb_priv);
2683 urb->hcpriv = NULL;
2684 }
2685
2686 static int ep_priv_create(struct usb_host_endpoint *ep, int mem_flags) {
2687 struct crisv10_ep_priv *ep_priv;
2688
2689 ep_priv = kmalloc(sizeof *ep_priv, mem_flags);
2690 if (!ep_priv)
2691 return -ENOMEM;
2692 memset(ep_priv, 0, sizeof *ep_priv);
2693
2694 ep->hcpriv = ep_priv;
2695 return 0;
2696 }
2697
2698 static void ep_priv_free(struct usb_host_endpoint *ep) {
2699 struct crisv10_ep_priv *ep_priv = ep->hcpriv;
2700 ASSERT(ep_priv);
2701 kfree(ep_priv);
2702 ep->hcpriv = NULL;
2703 }
2704
2705 /*
2706 * usb_check_bandwidth():
2707 *
2708 * old_alloc is from host_controller->bandwidth_allocated in microseconds;
2709 * bustime is from calc_bus_time(), but converted to microseconds.
2710 *
2711 * returns <bustime in us> if successful,
2712 * or -ENOSPC if bandwidth request fails.
2713 *
2714 * FIXME:
2715 * This initial implementation does not use Endpoint.bInterval
2716 * in managing bandwidth allocation.
2717 * It probably needs to be expanded to use Endpoint.bInterval.
2718 * This can be done as a later enhancement (correction).
2719 *
2720 * This will also probably require some kind of
2721 * frame allocation tracking...meaning, for example,
2722 * that if multiple drivers request interrupts every 10 USB frames,
2723 * they don't all have to be allocated at
2724 * frame numbers N, N+10, N+20, etc. Some of them could be at
2725 * N+11, N+21, N+31, etc., and others at
2726 * N+12, N+22, N+32, etc.
2727 *
2728 * Similarly for isochronous transfers...
2729 *
2730 * Individual HCDs can schedule more directly ... this logic
2731 * is not correct for high speed transfers.
2732 */
2733 static int crisv10_usb_check_bandwidth(
2734 struct usb_device *dev,
2735 struct urb *urb)
2736 {
2737 unsigned int pipe = urb->pipe;
2738 long bustime;
2739 int is_in = usb_pipein (pipe);
2740 int is_iso = usb_pipeisoc (pipe);
2741 int old_alloc = dev->bus->bandwidth_allocated;
2742 int new_alloc;
2743
2744 bustime = NS_TO_US (usb_calc_bus_time (dev->speed, is_in, is_iso,
2745 usb_maxpacket (dev, pipe, !is_in)));
2746 if (is_iso)
2747 bustime /= urb->number_of_packets;
2748
2749 new_alloc = old_alloc + (int) bustime;
2750 if (new_alloc > FRAME_TIME_MAX_USECS_ALLOC) {
2751 dev_dbg (&dev->dev, "usb_check_bandwidth FAILED: %d + %ld = %d usec\n",
2752 old_alloc, bustime, new_alloc);
2753 bustime = -ENOSPC; /* report error */
2754 }
2755
2756 return bustime;
2757 }
2758
2759 /**
2760 * usb_claim_bandwidth - records bandwidth for a periodic transfer
2761 * @dev: source/target of request
2762 * @urb: request (urb->dev == dev)
2763 * @bustime: bandwidth consumed, in (average) microseconds per frame
2764 * @isoc: true iff the request is isochronous
2765 *
2766 * HCDs are expected not to overcommit periodic bandwidth, and to record such
2767 * reservations whenever endpoints are added to the periodic schedule.
2768 *
2769 * FIXME averaging per-frame is suboptimal. Better to sum over the HCD's
2770 * entire periodic schedule ... 32 frames for OHCI, 1024 for UHCI, settable
2771 * for EHCI (256/512/1024 frames, default 10