[etrax] Comment out a debug message on usb host driver
[openwrt/svn-archive/archive.git] / target / linux / etrax / files / drivers / usb / host / hc-crisv10.c
1 /*
2 *
3 * ETRAX 100LX USB Host Controller Driver
4 *
5 * Copyright (C) 2005 - 2008 Axis Communications AB
6 *
7 * Author: Konrad Eriksson <konrad.eriksson@axis.se>
8 *
9 */
10
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/moduleparam.h>
15 #include <linux/spinlock.h>
16 #include <linux/usb.h>
17 #include <linux/platform_device.h>
18
19 #include <asm/io.h>
20 #include <asm/irq.h>
21 #include <asm/arch/dma.h>
22 #include <asm/arch/io_interface_mux.h>
23
24 #include "../core/hcd.h"
25 #include "../core/hub.h"
26 #include "hc-crisv10.h"
27 #include "hc-cris-dbg.h"
28
29
30 /***************************************************************************/
31 /***************************************************************************/
32 /* Host Controller settings */
33 /***************************************************************************/
34 /***************************************************************************/
35
36 #define VERSION "1.00-openwrt_diff"
37 #define COPYRIGHT "(c) 2005, 2006 Axis Communications AB"
38 #define DESCRIPTION "ETRAX 100LX USB Host Controller"
39
40 #define ETRAX_USB_HC_IRQ USB_HC_IRQ_NBR
41 #define ETRAX_USB_RX_IRQ USB_DMA_RX_IRQ_NBR
42 #define ETRAX_USB_TX_IRQ USB_DMA_TX_IRQ_NBR
43
44 /* Number of physical ports in Etrax 100LX */
45 #define USB_ROOT_HUB_PORTS 2
46
47 const char hc_name[] = "hc-crisv10";
48 const char product_desc[] = DESCRIPTION;
49
50 /* The number of epids is, among other things, used for pre-allocating
51 ctrl, bulk and isoc EP descriptors (one for each epid).
52 Assumed to be > 1 when initiating the DMA lists. */
53 #define NBR_OF_EPIDS 32
54
55 /* Support interrupt traffic intervals up to 128 ms. */
56 #define MAX_INTR_INTERVAL 128
57
58 /* If periodic traffic (intr or isoc) is to be used, then one entry in the EP
59 table must be "invalid". By this we mean that we shouldn't care about epid
60 attentions for this epid, or at least handle them differently from epid
61 attentions for "valid" epids. This define determines which one to use
62 (don't change it). */
63 #define INVALID_EPID 31
64 /* A special epid for the bulk dummys. */
65 #define DUMMY_EPID 30
66
67 /* Module settings */
68
69 MODULE_DESCRIPTION(DESCRIPTION);
70 MODULE_LICENSE("GPL");
71 MODULE_AUTHOR("Konrad Eriksson <konrad.eriksson@axis.se>");
72
73
74 /* Module parameters */
75
76 /* 0 = No ports enabled
77 1 = Only port 1 enabled (on board ethernet on devboard)
78 2 = Only port 2 enabled (external connector on devboard)
79 3 = Both ports enabled
80 */
81 static unsigned int ports = 3;
82 module_param(ports, uint, S_IRUGO);
83 MODULE_PARM_DESC(ports, "Bitmask indicating USB ports to use");
84
85
86 /***************************************************************************/
87 /***************************************************************************/
88 /* Shared global variables for this module */
89 /***************************************************************************/
90 /***************************************************************************/
91
92 /* EP descriptor lists for non period transfers. Must be 32-bit aligned. */
93 static volatile struct USB_EP_Desc TxBulkEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
94
95 static volatile struct USB_EP_Desc TxCtrlEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
96
97 /* EP descriptor lists for period transfers. Must be 32-bit aligned. */
98 static volatile struct USB_EP_Desc TxIntrEPList[MAX_INTR_INTERVAL] __attribute__ ((aligned (4)));
99 static volatile struct USB_SB_Desc TxIntrSB_zout __attribute__ ((aligned (4)));
100
101 static volatile struct USB_EP_Desc TxIsocEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
102 static volatile struct USB_SB_Desc TxIsocSB_zout __attribute__ ((aligned (4)));
103
104 static volatile struct USB_SB_Desc TxIsocSBList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
105
106 /* After each enabled bulk EP IN we put two disabled EP descriptors with the eol flag set,
107 causing the DMA to stop the DMA channel. The first of these two has the intr flag set, which
108 gives us a dma8_sub0_descr interrupt. When we receive this, we advance the DMA one step in the
109 EP list and then restart the bulk channel, thus forcing a switch between bulk EP descriptors
110 in each frame. */
111 static volatile struct USB_EP_Desc TxBulkDummyEPList[NBR_OF_EPIDS][2] __attribute__ ((aligned (4)));
112
113 /* List of URB pointers, where each points to the active URB for a epid.
114 For Bulk, Ctrl and Intr this means which URB that currently is added to
115 DMA lists (Isoc URBs are all directly added to DMA lists). As soon as
116 URB has completed is the queue examined and the first URB in queue is
117 removed and moved to the activeUrbList while its state change to STARTED and
118 its transfer(s) gets added to DMA list (exception Isoc where URBs enter
119 state STARTED directly and added transfers added to DMA lists). */
120 static struct urb *activeUrbList[NBR_OF_EPIDS];
121
122 /* Additional software state info for each epid */
123 static struct etrax_epid epid_state[NBR_OF_EPIDS];
124
125 /* Timer handles for bulk traffic timer used to avoid DMA bug where DMA stops
126 even if there is new data waiting to be processed */
127 static struct timer_list bulk_start_timer = TIMER_INITIALIZER(NULL, 0, 0);
128 static struct timer_list bulk_eot_timer = TIMER_INITIALIZER(NULL, 0, 0);
129
130 /* We want the start timer to expire before the eot timer, because the former
131 might start traffic, thus making it unnecessary for the latter to time
132 out. */
133 #define BULK_START_TIMER_INTERVAL (HZ/50) /* 20 ms */
134 #define BULK_EOT_TIMER_INTERVAL (HZ/16) /* 60 ms */
135
136 /* Delay before a URB completion happen when it's scheduled to be delayed */
137 #define LATER_TIMER_DELAY (HZ/50) /* 20 ms */
138
139 /* Simplifying macros for checking software state info of a epid */
140 /* ----------------------------------------------------------------------- */
141 #define epid_inuse(epid) epid_state[epid].inuse
142 #define epid_out_traffic(epid) epid_state[epid].out_traffic
143 #define epid_isoc(epid) (epid_state[epid].type == PIPE_ISOCHRONOUS ? 1 : 0)
144 #define epid_intr(epid) (epid_state[epid].type == PIPE_INTERRUPT ? 1 : 0)
145
146
147 /***************************************************************************/
148 /***************************************************************************/
149 /* DEBUG FUNCTIONS */
150 /***************************************************************************/
151 /***************************************************************************/
152 /* Note that these functions are always available in their "__" variants,
153 for use in error situations. The "__" missing variants are controlled by
154 the USB_DEBUG_DESC/USB_DEBUG_URB macros. */
155 static void __dump_urb(struct urb* purb)
156 {
157 struct crisv10_urb_priv *urb_priv = purb->hcpriv;
158 int urb_num = -1;
159 if(urb_priv) {
160 urb_num = urb_priv->urb_num;
161 }
162 printk("\nURB:0x%x[%d]\n", (unsigned int)purb, urb_num);
163 printk("dev :0x%08lx\n", (unsigned long)purb->dev);
164 printk("pipe :0x%08x\n", purb->pipe);
165 printk("status :%d\n", purb->status);
166 printk("transfer_flags :0x%08x\n", purb->transfer_flags);
167 printk("transfer_buffer :0x%08lx\n", (unsigned long)purb->transfer_buffer);
168 printk("transfer_buffer_length:%d\n", purb->transfer_buffer_length);
169 printk("actual_length :%d\n", purb->actual_length);
170 printk("setup_packet :0x%08lx\n", (unsigned long)purb->setup_packet);
171 printk("start_frame :%d\n", purb->start_frame);
172 printk("number_of_packets :%d\n", purb->number_of_packets);
173 printk("interval :%d\n", purb->interval);
174 printk("error_count :%d\n", purb->error_count);
175 printk("context :0x%08lx\n", (unsigned long)purb->context);
176 printk("complete :0x%08lx\n\n", (unsigned long)purb->complete);
177 }
178
179 static void __dump_in_desc(volatile struct USB_IN_Desc *in)
180 {
181 printk("\nUSB_IN_Desc at 0x%08lx\n", (unsigned long)in);
182 printk(" sw_len : 0x%04x (%d)\n", in->sw_len, in->sw_len);
183 printk(" command : 0x%04x\n", in->command);
184 printk(" next : 0x%08lx\n", in->next);
185 printk(" buf : 0x%08lx\n", in->buf);
186 printk(" hw_len : 0x%04x (%d)\n", in->hw_len, in->hw_len);
187 printk(" status : 0x%04x\n\n", in->status);
188 }
189
190 static void __dump_sb_desc(volatile struct USB_SB_Desc *sb)
191 {
192 char tt = (sb->command & 0x30) >> 4;
193 char *tt_string;
194
195 switch (tt) {
196 case 0:
197 tt_string = "zout";
198 break;
199 case 1:
200 tt_string = "in";
201 break;
202 case 2:
203 tt_string = "out";
204 break;
205 case 3:
206 tt_string = "setup";
207 break;
208 default:
209 tt_string = "unknown (weird)";
210 }
211
212 printk(" USB_SB_Desc at 0x%08lx ", (unsigned long)sb);
213 printk(" command:0x%04x (", sb->command);
214 printk("rem:%d ", (sb->command & 0x3f00) >> 8);
215 printk("full:%d ", (sb->command & 0x40) >> 6);
216 printk("tt:%d(%s) ", tt, tt_string);
217 printk("intr:%d ", (sb->command & 0x8) >> 3);
218 printk("eot:%d ", (sb->command & 0x2) >> 1);
219 printk("eol:%d)", sb->command & 0x1);
220 printk(" sw_len:0x%04x(%d)", sb->sw_len, sb->sw_len);
221 printk(" next:0x%08lx", sb->next);
222 printk(" buf:0x%08lx\n", sb->buf);
223 }
224
225
226 static void __dump_ep_desc(volatile struct USB_EP_Desc *ep)
227 {
228 printk("USB_EP_Desc at 0x%08lx ", (unsigned long)ep);
229 printk(" command:0x%04x (", ep->command);
230 printk("ep_id:%d ", (ep->command & 0x1f00) >> 8);
231 printk("enable:%d ", (ep->command & 0x10) >> 4);
232 printk("intr:%d ", (ep->command & 0x8) >> 3);
233 printk("eof:%d ", (ep->command & 0x2) >> 1);
234 printk("eol:%d)", ep->command & 0x1);
235 printk(" hw_len:0x%04x(%d)", ep->hw_len, ep->hw_len);
236 printk(" next:0x%08lx", ep->next);
237 printk(" sub:0x%08lx\n", ep->sub);
238 }
239
240 static inline void __dump_ep_list(int pipe_type)
241 {
242 volatile struct USB_EP_Desc *ep;
243 volatile struct USB_EP_Desc *first_ep;
244 volatile struct USB_SB_Desc *sb;
245
246 switch (pipe_type)
247 {
248 case PIPE_BULK:
249 first_ep = &TxBulkEPList[0];
250 break;
251 case PIPE_CONTROL:
252 first_ep = &TxCtrlEPList[0];
253 break;
254 case PIPE_INTERRUPT:
255 first_ep = &TxIntrEPList[0];
256 break;
257 case PIPE_ISOCHRONOUS:
258 first_ep = &TxIsocEPList[0];
259 break;
260 default:
261 warn("Cannot dump unknown traffic type");
262 return;
263 }
264 ep = first_ep;
265
266 printk("\n\nDumping EP list...\n\n");
267
268 do {
269 __dump_ep_desc(ep);
270 /* Cannot phys_to_virt on 0 as it turns into 80000000, which is != 0. */
271 sb = ep->sub ? phys_to_virt(ep->sub) : 0;
272 while (sb) {
273 __dump_sb_desc(sb);
274 sb = sb->next ? phys_to_virt(sb->next) : 0;
275 }
276 ep = (volatile struct USB_EP_Desc *)(phys_to_virt(ep->next));
277
278 } while (ep != first_ep);
279 }
280
281 static inline void __dump_ept_data(int epid)
282 {
283 unsigned long flags;
284 __u32 r_usb_ept_data;
285
286 if (epid < 0 || epid > 31) {
287 printk("Cannot dump ept data for invalid epid %d\n", epid);
288 return;
289 }
290
291 local_irq_save(flags);
292 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
293 nop();
294 r_usb_ept_data = *R_USB_EPT_DATA;
295 local_irq_restore(flags);
296
297 printk(" R_USB_EPT_DATA = 0x%x for epid %d :\n", r_usb_ept_data, epid);
298 if (r_usb_ept_data == 0) {
299 /* No need for more detailed printing. */
300 return;
301 }
302 printk(" valid : %d\n", (r_usb_ept_data & 0x80000000) >> 31);
303 printk(" hold : %d\n", (r_usb_ept_data & 0x40000000) >> 30);
304 printk(" error_count_in : %d\n", (r_usb_ept_data & 0x30000000) >> 28);
305 printk(" t_in : %d\n", (r_usb_ept_data & 0x08000000) >> 27);
306 printk(" low_speed : %d\n", (r_usb_ept_data & 0x04000000) >> 26);
307 printk(" port : %d\n", (r_usb_ept_data & 0x03000000) >> 24);
308 printk(" error_code : %d\n", (r_usb_ept_data & 0x00c00000) >> 22);
309 printk(" t_out : %d\n", (r_usb_ept_data & 0x00200000) >> 21);
310 printk(" error_count_out : %d\n", (r_usb_ept_data & 0x00180000) >> 19);
311 printk(" max_len : %d\n", (r_usb_ept_data & 0x0003f800) >> 11);
312 printk(" ep : %d\n", (r_usb_ept_data & 0x00000780) >> 7);
313 printk(" dev : %d\n", (r_usb_ept_data & 0x0000003f));
314 }
315
316 static inline void __dump_ept_data_iso(int epid)
317 {
318 unsigned long flags;
319 __u32 ept_data;
320
321 if (epid < 0 || epid > 31) {
322 printk("Cannot dump ept data for invalid epid %d\n", epid);
323 return;
324 }
325
326 local_irq_save(flags);
327 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
328 nop();
329 ept_data = *R_USB_EPT_DATA_ISO;
330 local_irq_restore(flags);
331
332 printk(" R_USB_EPT_DATA = 0x%x for epid %d :\n", ept_data, epid);
333 if (ept_data == 0) {
334 /* No need for more detailed printing. */
335 return;
336 }
337 printk(" valid : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, valid,
338 ept_data));
339 printk(" port : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, port,
340 ept_data));
341 printk(" error_code : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, error_code,
342 ept_data));
343 printk(" max_len : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, max_len,
344 ept_data));
345 printk(" ep : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, ep,
346 ept_data));
347 printk(" dev : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, dev,
348 ept_data));
349 }
350
351 static inline void __dump_ept_data_list(void)
352 {
353 int i;
354
355 printk("Dumping the whole R_USB_EPT_DATA list\n");
356
357 for (i = 0; i < 32; i++) {
358 __dump_ept_data(i);
359 }
360 }
361
362 static void debug_epid(int epid) {
363 int i;
364
365 if(epid_isoc(epid)) {
366 __dump_ept_data_iso(epid);
367 } else {
368 __dump_ept_data(epid);
369 }
370
371 printk("Bulk:\n");
372 for(i = 0; i < 32; i++) {
373 if(IO_EXTRACT(USB_EP_command, epid, TxBulkEPList[i].command) ==
374 epid) {
375 printk("%d: ", i); __dump_ep_desc(&(TxBulkEPList[i]));
376 }
377 }
378
379 printk("Ctrl:\n");
380 for(i = 0; i < 32; i++) {
381 if(IO_EXTRACT(USB_EP_command, epid, TxCtrlEPList[i].command) ==
382 epid) {
383 printk("%d: ", i); __dump_ep_desc(&(TxCtrlEPList[i]));
384 }
385 }
386
387 printk("Intr:\n");
388 for(i = 0; i < MAX_INTR_INTERVAL; i++) {
389 if(IO_EXTRACT(USB_EP_command, epid, TxIntrEPList[i].command) ==
390 epid) {
391 printk("%d: ", i); __dump_ep_desc(&(TxIntrEPList[i]));
392 }
393 }
394
395 printk("Isoc:\n");
396 for(i = 0; i < 32; i++) {
397 if(IO_EXTRACT(USB_EP_command, epid, TxIsocEPList[i].command) ==
398 epid) {
399 printk("%d: ", i); __dump_ep_desc(&(TxIsocEPList[i]));
400 }
401 }
402
403 __dump_ept_data_list();
404 __dump_ep_list(PIPE_INTERRUPT);
405 printk("\n\n");
406 }
407
408
409
410 char* hcd_status_to_str(__u8 bUsbStatus) {
411 static char hcd_status_str[128];
412 hcd_status_str[0] = '\0';
413 if(bUsbStatus & IO_STATE(R_USB_STATUS, ourun, yes)) {
414 strcat(hcd_status_str, "ourun ");
415 }
416 if(bUsbStatus & IO_STATE(R_USB_STATUS, perror, yes)) {
417 strcat(hcd_status_str, "perror ");
418 }
419 if(bUsbStatus & IO_STATE(R_USB_STATUS, device_mode, yes)) {
420 strcat(hcd_status_str, "device_mode ");
421 }
422 if(bUsbStatus & IO_STATE(R_USB_STATUS, host_mode, yes)) {
423 strcat(hcd_status_str, "host_mode ");
424 }
425 if(bUsbStatus & IO_STATE(R_USB_STATUS, started, yes)) {
426 strcat(hcd_status_str, "started ");
427 }
428 if(bUsbStatus & IO_STATE(R_USB_STATUS, running, yes)) {
429 strcat(hcd_status_str, "running ");
430 }
431 return hcd_status_str;
432 }
433
434
435 char* sblist_to_str(struct USB_SB_Desc* sb_desc) {
436 static char sblist_to_str_buff[128];
437 char tmp[32], tmp2[32];
438 sblist_to_str_buff[0] = '\0';
439 while(sb_desc != NULL) {
440 switch(IO_EXTRACT(USB_SB_command, tt, sb_desc->command)) {
441 case 0: sprintf(tmp, "zout"); break;
442 case 1: sprintf(tmp, "in"); break;
443 case 2: sprintf(tmp, "out"); break;
444 case 3: sprintf(tmp, "setup"); break;
445 }
446 sprintf(tmp2, "(%s %d)", tmp, sb_desc->sw_len);
447 strcat(sblist_to_str_buff, tmp2);
448 if(sb_desc->next != 0) {
449 sb_desc = phys_to_virt(sb_desc->next);
450 } else {
451 sb_desc = NULL;
452 }
453 }
454 return sblist_to_str_buff;
455 }
456
457 char* port_status_to_str(__u16 wPortStatus) {
458 static char port_status_str[128];
459 port_status_str[0] = '\0';
460 if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, connected, yes)) {
461 strcat(port_status_str, "connected ");
462 }
463 if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes)) {
464 strcat(port_status_str, "enabled ");
465 }
466 if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, suspended, yes)) {
467 strcat(port_status_str, "suspended ");
468 }
469 if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, reset, yes)) {
470 strcat(port_status_str, "reset ");
471 }
472 if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, speed, full)) {
473 strcat(port_status_str, "full-speed ");
474 } else {
475 strcat(port_status_str, "low-speed ");
476 }
477 return port_status_str;
478 }
479
480
481 char* endpoint_to_str(struct usb_endpoint_descriptor *ed) {
482 static char endpoint_to_str_buff[128];
483 char tmp[32];
484 int epnum = ed->bEndpointAddress & 0x0F;
485 int dir = ed->bEndpointAddress & 0x80;
486 int type = ed->bmAttributes & 0x03;
487 endpoint_to_str_buff[0] = '\0';
488 sprintf(endpoint_to_str_buff, "ep:%d ", epnum);
489 switch(type) {
490 case 0:
491 sprintf(tmp, " ctrl");
492 break;
493 case 1:
494 sprintf(tmp, " isoc");
495 break;
496 case 2:
497 sprintf(tmp, " bulk");
498 break;
499 case 3:
500 sprintf(tmp, " intr");
501 break;
502 }
503 strcat(endpoint_to_str_buff, tmp);
504 if(dir) {
505 sprintf(tmp, " in");
506 } else {
507 sprintf(tmp, " out");
508 }
509 strcat(endpoint_to_str_buff, tmp);
510
511 return endpoint_to_str_buff;
512 }
513
514 /* Debug helper functions for Transfer Controller */
515 char* pipe_to_str(unsigned int pipe) {
516 static char pipe_to_str_buff[128];
517 char tmp[64];
518 sprintf(pipe_to_str_buff, "dir:%s", str_dir(pipe));
519 sprintf(tmp, " type:%s", str_type(pipe));
520 strcat(pipe_to_str_buff, tmp);
521
522 sprintf(tmp, " dev:%d", usb_pipedevice(pipe));
523 strcat(pipe_to_str_buff, tmp);
524 sprintf(tmp, " ep:%d", usb_pipeendpoint(pipe));
525 strcat(pipe_to_str_buff, tmp);
526 return pipe_to_str_buff;
527 }
528
529
530 #define USB_DEBUG_DESC 1
531
532 #ifdef USB_DEBUG_DESC
533 #define dump_in_desc(x) __dump_in_desc(x)
534 #define dump_sb_desc(...) __dump_sb_desc(...)
535 #define dump_ep_desc(x) __dump_ep_desc(x)
536 #define dump_ept_data(x) __dump_ept_data(x)
537 #else
538 #define dump_in_desc(...) do {} while (0)
539 #define dump_sb_desc(...) do {} while (0)
540 #define dump_ep_desc(...) do {} while (0)
541 #endif
542
543
544 /* Uncomment this to enable massive function call trace
545 #define USB_DEBUG_TRACE */
546
547 #ifdef USB_DEBUG_TRACE
548 #define DBFENTER (printk(": Entering: %s\n", __FUNCTION__))
549 #define DBFEXIT (printk(": Exiting: %s\n", __FUNCTION__))
550 #else
551 #define DBFENTER do {} while (0)
552 #define DBFEXIT do {} while (0)
553 #endif
554
555 #define CHECK_ALIGN(x) if (((__u32)(x)) & 0x00000003) \
556 {panic("Alignment check (DWORD) failed at %s:%s:%d\n", __FILE__, __FUNCTION__, __LINE__);}
557
558 /* Most helpful debugging aid */
559 #define ASSERT(expr) ((void) ((expr) ? 0 : (err("assert failed at: %s %d",__FUNCTION__, __LINE__))))
560
561
562 /***************************************************************************/
563 /***************************************************************************/
564 /* Forward declarations */
565 /***************************************************************************/
566 /***************************************************************************/
567 void crisv10_hcd_epid_attn_irq(struct crisv10_irq_reg *reg);
568 void crisv10_hcd_port_status_irq(struct crisv10_irq_reg *reg);
569 void crisv10_hcd_ctl_status_irq(struct crisv10_irq_reg *reg);
570 void crisv10_hcd_isoc_eof_irq(struct crisv10_irq_reg *reg);
571
572 void rh_port_status_change(__u16[]);
573 int rh_clear_port_feature(__u8, __u16);
574 int rh_set_port_feature(__u8, __u16);
575 static void rh_disable_port(unsigned int port);
576
577 static void check_finished_bulk_tx_epids(struct usb_hcd *hcd,
578 int timer);
579
580 static int tc_setup_epid(struct usb_host_endpoint *ep, struct urb *urb,
581 int mem_flags);
582 static void tc_free_epid(struct usb_host_endpoint *ep);
583 static int tc_allocate_epid(void);
584 static void tc_finish_urb(struct usb_hcd *hcd, struct urb *urb, int status);
585 static void tc_finish_urb_later(struct usb_hcd *hcd, struct urb *urb,
586 int status);
587
588 static int urb_priv_create(struct usb_hcd *hcd, struct urb *urb, int epid,
589 int mem_flags);
590 static void urb_priv_free(struct usb_hcd *hcd, struct urb *urb);
591
592 static int crisv10_usb_check_bandwidth(struct usb_device *dev,struct urb *urb);
593 static void crisv10_usb_claim_bandwidth(
594 struct usb_device *dev, struct urb *urb, int bustime, int isoc);
595 static void crisv10_usb_release_bandwidth(
596 struct usb_hcd *hcd, int isoc, int bandwidth);
597
598 static inline struct urb *urb_list_first(int epid);
599 static inline void urb_list_add(struct urb *urb, int epid,
600 int mem_flags);
601 static inline urb_entry_t *urb_list_entry(struct urb *urb, int epid);
602 static inline void urb_list_del(struct urb *urb, int epid);
603 static inline void urb_list_move_last(struct urb *urb, int epid);
604 static inline struct urb *urb_list_next(struct urb *urb, int epid);
605
606 int create_sb_for_urb(struct urb *urb, int mem_flags);
607 int init_intr_urb(struct urb *urb, int mem_flags);
608
609 static inline void etrax_epid_set(__u8 index, __u32 data);
610 static inline void etrax_epid_clear_error(__u8 index);
611 static inline void etrax_epid_set_toggle(__u8 index, __u8 dirout,
612 __u8 toggle);
613 static inline __u8 etrax_epid_get_toggle(__u8 index, __u8 dirout);
614 static inline __u32 etrax_epid_get(__u8 index);
615
616 /* We're accessing the same register position in Etrax so
617 when we do full access the internal difference doesn't matter */
618 #define etrax_epid_iso_set(index, data) etrax_epid_set(index, data)
619 #define etrax_epid_iso_get(index) etrax_epid_get(index)
620
621
622 static void tc_dma_process_isoc_urb(struct urb *urb);
623 static void tc_dma_process_queue(int epid);
624 static void tc_dma_unlink_intr_urb(struct urb *urb);
625 static irqreturn_t tc_dma_tx_interrupt(int irq, void *vhc);
626 static irqreturn_t tc_dma_rx_interrupt(int irq, void *vhc);
627
628 static void tc_bulk_start_timer_func(unsigned long dummy);
629 static void tc_bulk_eot_timer_func(unsigned long dummy);
630
631
632 /*************************************************************/
633 /*************************************************************/
634 /* Host Controler Driver block */
635 /*************************************************************/
636 /*************************************************************/
637
638 /* HCD operations */
639 static irqreturn_t crisv10_hcd_top_irq(int irq, void*);
640 static int crisv10_hcd_reset(struct usb_hcd *);
641 static int crisv10_hcd_start(struct usb_hcd *);
642 static void crisv10_hcd_stop(struct usb_hcd *);
643 #ifdef CONFIG_PM
644 static int crisv10_hcd_suspend(struct device *, u32, u32);
645 static int crisv10_hcd_resume(struct device *, u32);
646 #endif /* CONFIG_PM */
647 static int crisv10_hcd_get_frame(struct usb_hcd *);
648
649 static int tc_urb_enqueue(struct usb_hcd *, struct urb *, gfp_t mem_flags);
650 static int tc_urb_dequeue(struct usb_hcd *, struct urb *, int);
651 static void tc_endpoint_disable(struct usb_hcd *, struct usb_host_endpoint *ep);
652
653 static int rh_status_data_request(struct usb_hcd *, char *);
654 static int rh_control_request(struct usb_hcd *, u16, u16, u16, char*, u16);
655
656 #ifdef CONFIG_PM
657 static int crisv10_hcd_hub_suspend(struct usb_hcd *);
658 static int crisv10_hcd_hub_resume(struct usb_hcd *);
659 #endif /* CONFIG_PM */
660 #ifdef CONFIG_USB_OTG
661 static int crisv10_hcd_start_port_reset(struct usb_hcd *, unsigned);
662 #endif /* CONFIG_USB_OTG */
663
664 /* host controller driver interface */
665 static const struct hc_driver crisv10_hc_driver =
666 {
667 .description = hc_name,
668 .product_desc = product_desc,
669 .hcd_priv_size = sizeof(struct crisv10_hcd),
670
671 /* Attaching IRQ handler manualy in probe() */
672 /* .irq = crisv10_hcd_irq, */
673
674 .flags = HCD_USB11,
675
676 /* called to init HCD and root hub */
677 .reset = crisv10_hcd_reset,
678 .start = crisv10_hcd_start,
679
680 /* cleanly make HCD stop writing memory and doing I/O */
681 .stop = crisv10_hcd_stop,
682
683 /* return current frame number */
684 .get_frame_number = crisv10_hcd_get_frame,
685
686
687 /* Manage i/o requests via the Transfer Controller */
688 .urb_enqueue = tc_urb_enqueue,
689 .urb_dequeue = tc_urb_dequeue,
690
691 /* hw synch, freeing endpoint resources that urb_dequeue can't */
692 .endpoint_disable = tc_endpoint_disable,
693
694
695 /* Root Hub support */
696 .hub_status_data = rh_status_data_request,
697 .hub_control = rh_control_request,
698 #ifdef CONFIG_PM
699 .hub_suspend = rh_suspend_request,
700 .hub_resume = rh_resume_request,
701 #endif /* CONFIG_PM */
702 #ifdef CONFIG_USB_OTG
703 .start_port_reset = crisv10_hcd_start_port_reset,
704 #endif /* CONFIG_USB_OTG */
705 };
706
707
708 /*
709 * conversion between pointers to a hcd and the corresponding
710 * crisv10_hcd
711 */
712
713 static inline struct crisv10_hcd *hcd_to_crisv10_hcd(struct usb_hcd *hcd)
714 {
715 return (struct crisv10_hcd *) hcd->hcd_priv;
716 }
717
718 static inline struct usb_hcd *crisv10_hcd_to_hcd(struct crisv10_hcd *hcd)
719 {
720 return container_of((void *) hcd, struct usb_hcd, hcd_priv);
721 }
722
723 /* check if specified port is in use */
724 static inline int port_in_use(unsigned int port)
725 {
726 return ports & (1 << port);
727 }
728
729 /* number of ports in use */
730 static inline unsigned int num_ports(void)
731 {
732 unsigned int i, num = 0;
733 for (i = 0; i < USB_ROOT_HUB_PORTS; i++)
734 if (port_in_use(i))
735 num++;
736 return num;
737 }
738
739 /* map hub port number to the port number used internally by the HC */
740 static inline unsigned int map_port(unsigned int port)
741 {
742 unsigned int i, num = 0;
743 for (i = 0; i < USB_ROOT_HUB_PORTS; i++)
744 if (port_in_use(i))
745 if (++num == port)
746 return i;
747 return -1;
748 }
749
750 /* size of descriptors in slab cache */
751 #ifndef MAX
752 #define MAX(x, y) ((x) > (y) ? (x) : (y))
753 #endif
754
755
756 /******************************************************************/
757 /* Hardware Interrupt functions */
758 /******************************************************************/
759
760 /* Fast interrupt handler for HC */
761 static irqreturn_t crisv10_hcd_top_irq(int irq, void *vcd)
762 {
763 struct usb_hcd *hcd = vcd;
764 struct crisv10_irq_reg reg;
765 __u32 irq_mask;
766 unsigned long flags;
767
768 DBFENTER;
769
770 ASSERT(hcd != NULL);
771 reg.hcd = hcd;
772
773 /* Turn of other interrupts while handling these sensitive cases */
774 local_irq_save(flags);
775
776 /* Read out which interrupts that are flaged */
777 irq_mask = *R_USB_IRQ_MASK_READ;
778 reg.r_usb_irq_mask_read = irq_mask;
779
780 /* Reading R_USB_STATUS clears the ctl_status interrupt. Note that
781 R_USB_STATUS must be read before R_USB_EPID_ATTN since reading the latter
782 clears the ourun and perror fields of R_USB_STATUS. */
783 reg.r_usb_status = *R_USB_STATUS;
784
785 /* Reading R_USB_EPID_ATTN clears the iso_eof, bulk_eot and epid_attn
786 interrupts. */
787 reg.r_usb_epid_attn = *R_USB_EPID_ATTN;
788
789 /* Reading R_USB_RH_PORT_STATUS_1 and R_USB_RH_PORT_STATUS_2 clears the
790 port_status interrupt. */
791 reg.r_usb_rh_port_status_1 = *R_USB_RH_PORT_STATUS_1;
792 reg.r_usb_rh_port_status_2 = *R_USB_RH_PORT_STATUS_2;
793
794 /* Reading R_USB_FM_NUMBER clears the sof interrupt. */
795 /* Note: the lower 11 bits contain the actual frame number, sent with each
796 sof. */
797 reg.r_usb_fm_number = *R_USB_FM_NUMBER;
798
799 /* Interrupts are handled in order of priority. */
800 if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, port_status)) {
801 crisv10_hcd_port_status_irq(&reg);
802 }
803 if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, epid_attn)) {
804 crisv10_hcd_epid_attn_irq(&reg);
805 }
806 if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, ctl_status)) {
807 crisv10_hcd_ctl_status_irq(&reg);
808 }
809 if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, iso_eof)) {
810 crisv10_hcd_isoc_eof_irq(&reg);
811 }
812 if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, bulk_eot)) {
813 /* Update/restart the bulk start timer since obviously the channel is
814 running. */
815 mod_timer(&bulk_start_timer, jiffies + BULK_START_TIMER_INTERVAL);
816 /* Update/restart the bulk eot timer since we just received an bulk eot
817 interrupt. */
818 mod_timer(&bulk_eot_timer, jiffies + BULK_EOT_TIMER_INTERVAL);
819
820 /* Check for finished bulk transfers on epids */
821 check_finished_bulk_tx_epids(hcd, 0);
822 }
823 local_irq_restore(flags);
824
825 DBFEXIT;
826 return IRQ_HANDLED;
827 }
828
829
830 void crisv10_hcd_epid_attn_irq(struct crisv10_irq_reg *reg) {
831 struct usb_hcd *hcd = reg->hcd;
832 struct crisv10_urb_priv *urb_priv;
833 int epid;
834 DBFENTER;
835
836 for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
837 if (test_bit(epid, (void *)&reg->r_usb_epid_attn)) {
838 struct urb *urb;
839 __u32 ept_data;
840 int error_code;
841
842 if (epid == DUMMY_EPID || epid == INVALID_EPID) {
843 /* We definitely don't care about these ones. Besides, they are
844 always disabled, so any possible disabling caused by the
845 epid attention interrupt is irrelevant. */
846 warn("Got epid_attn for INVALID_EPID or DUMMY_EPID (%d).", epid);
847 continue;
848 }
849
850 if(!epid_inuse(epid)) {
851 irq_err("Epid attention on epid:%d that isn't in use\n", epid);
852 printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
853 debug_epid(epid);
854 continue;
855 }
856
857 /* Note that although there are separate R_USB_EPT_DATA and
858 R_USB_EPT_DATA_ISO registers, they are located at the same address and
859 are of the same size. In other words, this read should be ok for isoc
860 also. */
861 ept_data = etrax_epid_get(epid);
862 error_code = IO_EXTRACT(R_USB_EPT_DATA, error_code, ept_data);
863
864 /* Get the active URB for this epid. We blatantly assume
865 that only this URB could have caused the epid attention. */
866 urb = activeUrbList[epid];
867 if (urb == NULL) {
868 irq_err("Attention on epid:%d error:%d with no active URB.\n",
869 epid, error_code);
870 printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
871 debug_epid(epid);
872 continue;
873 }
874
875 urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
876 ASSERT(urb_priv);
877
878 /* Using IO_STATE_VALUE on R_USB_EPT_DATA should be ok for isoc also. */
879 if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) {
880
881 /* Isoc traffic doesn't have error_count_in/error_count_out. */
882 if ((usb_pipetype(urb->pipe) != PIPE_ISOCHRONOUS) &&
883 (IO_EXTRACT(R_USB_EPT_DATA, error_count_in, ept_data) == 3 ||
884 IO_EXTRACT(R_USB_EPT_DATA, error_count_out, ept_data) == 3)) {
885 /* Check if URB allready is marked for late-finish, we can get
886 several 3rd error for Intr traffic when a device is unplugged */
887 if(urb_priv->later_data == NULL) {
888 /* 3rd error. */
889 irq_warn("3rd error for epid:%d (%s %s) URB:0x%x[%d]\n", epid,
890 str_dir(urb->pipe), str_type(urb->pipe),
891 (unsigned int)urb, urb_priv->urb_num);
892
893 tc_finish_urb_later(hcd, urb, -EPROTO);
894 }
895
896 } else if (reg->r_usb_status & IO_MASK(R_USB_STATUS, perror)) {
897 irq_warn("Perror for epid:%d\n", epid);
898 printk("FM_NUMBER: %d\n", reg->r_usb_fm_number & 0x7ff);
899 printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
900 __dump_urb(urb);
901 debug_epid(epid);
902
903 if (!(ept_data & IO_MASK(R_USB_EPT_DATA, valid))) {
904 /* invalid ep_id */
905 panic("Perror because of invalid epid."
906 " Deconfigured too early?");
907 } else {
908 /* past eof1, near eof, zout transfer, setup transfer */
909 /* Dump the urb and the relevant EP descriptor. */
910 panic("Something wrong with DMA descriptor contents."
911 " Too much traffic inserted?");
912 }
913 } else if (reg->r_usb_status & IO_MASK(R_USB_STATUS, ourun)) {
914 /* buffer ourun */
915 printk("FM_NUMBER: %d\n", reg->r_usb_fm_number & 0x7ff);
916 printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
917 __dump_urb(urb);
918 debug_epid(epid);
919
920 panic("Buffer overrun/underrun for epid:%d. DMA too busy?", epid);
921 } else {
922 irq_warn("Attention on epid:%d (%s %s) with no error code\n", epid,
923 str_dir(urb->pipe), str_type(urb->pipe));
924 printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
925 __dump_urb(urb);
926 debug_epid(epid);
927 }
928
929 } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code,
930 stall)) {
931 /* Not really a protocol error, just says that the endpoint gave
932 a stall response. Note that error_code cannot be stall for isoc. */
933 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
934 panic("Isoc traffic cannot stall");
935 }
936
937 tc_dbg("Stall for epid:%d (%s %s) URB:0x%x\n", epid,
938 str_dir(urb->pipe), str_type(urb->pipe), (unsigned int)urb);
939 tc_finish_urb(hcd, urb, -EPIPE);
940
941 } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code,
942 bus_error)) {
943 /* Two devices responded to a transaction request. Must be resolved
944 by software. FIXME: Reset ports? */
945 panic("Bus error for epid %d."
946 " Two devices responded to transaction request\n",
947 epid);
948
949 } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code,
950 buffer_error)) {
951 /* DMA overrun or underrun. */
952 irq_warn("Buffer overrun/underrun for epid:%d (%s %s)\n", epid,
953 str_dir(urb->pipe), str_type(urb->pipe));
954
955 /* It seems that error_code = buffer_error in
956 R_USB_EPT_DATA/R_USB_EPT_DATA_ISO and ourun = yes in R_USB_STATUS
957 are the same error. */
958 tc_finish_urb(hcd, urb, -EPROTO);
959 } else {
960 irq_warn("Unknown attention on epid:%d (%s %s)\n", epid,
961 str_dir(urb->pipe), str_type(urb->pipe));
962 dump_ept_data(epid);
963 }
964 }
965 }
966 DBFEXIT;
967 }
968
969 void crisv10_hcd_port_status_irq(struct crisv10_irq_reg *reg)
970 {
971 __u16 port_reg[USB_ROOT_HUB_PORTS];
972 DBFENTER;
973 port_reg[0] = reg->r_usb_rh_port_status_1;
974 port_reg[1] = reg->r_usb_rh_port_status_2;
975 rh_port_status_change(port_reg);
976 DBFEXIT;
977 }
978
979 void crisv10_hcd_isoc_eof_irq(struct crisv10_irq_reg *reg)
980 {
981 int epid;
982 struct urb *urb;
983 struct crisv10_urb_priv *urb_priv;
984
985 DBFENTER;
986
987 for (epid = 0; epid < NBR_OF_EPIDS - 1; epid++) {
988
989 /* Only check epids that are in use, is valid and has SB list */
990 if (!epid_inuse(epid) || epid == INVALID_EPID ||
991 TxIsocEPList[epid].sub == 0 || epid == DUMMY_EPID) {
992 /* Nothing here to see. */
993 continue;
994 }
995 ASSERT(epid_isoc(epid));
996
997 /* Get the active URB for this epid (if any). */
998 urb = activeUrbList[epid];
999 if (urb == 0) {
1000 isoc_warn("Ignoring NULL urb for epid:%d\n", epid);
1001 continue;
1002 }
1003 if(!epid_out_traffic(epid)) {
1004 /* Sanity check. */
1005 ASSERT(usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS);
1006
1007 urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
1008 ASSERT(urb_priv);
1009
1010 if (urb_priv->urb_state == NOT_STARTED) {
1011 /* If ASAP is not set and urb->start_frame is the current frame,
1012 start the transfer. */
1013 if (!(urb->transfer_flags & URB_ISO_ASAP) &&
1014 (urb->start_frame == (*R_USB_FM_NUMBER & 0x7ff))) {
1015 /* EP should not be enabled if we're waiting for start_frame */
1016 ASSERT((TxIsocEPList[epid].command &
1017 IO_STATE(USB_EP_command, enable, yes)) == 0);
1018
1019 isoc_warn("Enabling isoc IN EP descr for epid %d\n", epid);
1020 TxIsocEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
1021
1022 /* This urb is now active. */
1023 urb_priv->urb_state = STARTED;
1024 continue;
1025 }
1026 }
1027 }
1028 }
1029
1030 DBFEXIT;
1031 }
1032
1033 void crisv10_hcd_ctl_status_irq(struct crisv10_irq_reg *reg)
1034 {
1035 struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(reg->hcd);
1036
1037 DBFENTER;
1038 ASSERT(crisv10_hcd);
1039
1040 /* irq_dbg("ctr_status_irq, controller status: %s\n",
1041 hcd_status_to_str(reg->r_usb_status));*/
1042
1043 /* FIXME: What should we do if we get ourun or perror? Dump the EP and SB
1044 list for the corresponding epid? */
1045 if (reg->r_usb_status & IO_MASK(R_USB_STATUS, ourun)) {
1046 panic("USB controller got ourun.");
1047 }
1048 if (reg->r_usb_status & IO_MASK(R_USB_STATUS, perror)) {
1049
1050 /* Before, etrax_usb_do_intr_recover was called on this epid if it was
1051 an interrupt pipe. I don't see how re-enabling all EP descriptors
1052 will help if there was a programming error. */
1053 panic("USB controller got perror.");
1054 }
1055
1056 /* Keep track of USB Controller, if it's running or not */
1057 if(reg->r_usb_status & IO_STATE(R_USB_STATUS, running, yes)) {
1058 crisv10_hcd->running = 1;
1059 } else {
1060 crisv10_hcd->running = 0;
1061 }
1062
1063 if (reg->r_usb_status & IO_MASK(R_USB_STATUS, device_mode)) {
1064 /* We should never operate in device mode. */
1065 panic("USB controller in device mode.");
1066 }
1067
1068 /* Set the flag to avoid getting "Unlink after no-IRQ? Controller is probably
1069 using the wrong IRQ" from hcd_unlink_urb() in drivers/usb/core/hcd.c */
1070 set_bit(HCD_FLAG_SAW_IRQ, &reg->hcd->flags);
1071
1072 DBFEXIT;
1073 }
1074
1075
1076 /******************************************************************/
1077 /* Host Controller interface functions */
1078 /******************************************************************/
1079
1080 static inline void crisv10_ready_wait(void) {
1081 volatile int timeout = 10000;
1082 /* Check the busy bit of USB controller in Etrax */
1083 while((*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy)) &&
1084 (timeout-- > 0));
1085 if(timeout == 0) {
1086 warn("Timeout while waiting for USB controller to be idle\n");
1087 }
1088 }
1089
1090 /* reset host controller */
1091 static int crisv10_hcd_reset(struct usb_hcd *hcd)
1092 {
1093 DBFENTER;
1094 hcd_dbg(hcd, "reset\n");
1095
1096
1097 /* Reset the USB interface. */
1098 /*
1099 *R_USB_COMMAND =
1100 IO_STATE(R_USB_COMMAND, port_sel, nop) |
1101 IO_STATE(R_USB_COMMAND, port_cmd, reset) |
1102 IO_STATE(R_USB_COMMAND, ctrl_cmd, reset);
1103 nop();
1104 */
1105 DBFEXIT;
1106 return 0;
1107 }
1108
1109 /* start host controller */
1110 static int crisv10_hcd_start(struct usb_hcd *hcd)
1111 {
1112 DBFENTER;
1113 hcd_dbg(hcd, "start\n");
1114
1115 crisv10_ready_wait();
1116
1117 /* Start processing of USB traffic. */
1118 *R_USB_COMMAND =
1119 IO_STATE(R_USB_COMMAND, port_sel, nop) |
1120 IO_STATE(R_USB_COMMAND, port_cmd, reset) |
1121 IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run);
1122
1123 nop();
1124
1125 hcd->state = HC_STATE_RUNNING;
1126
1127 DBFEXIT;
1128 return 0;
1129 }
1130
1131 /* stop host controller */
1132 static void crisv10_hcd_stop(struct usb_hcd *hcd)
1133 {
1134 DBFENTER;
1135 hcd_dbg(hcd, "stop\n");
1136 crisv10_hcd_reset(hcd);
1137 DBFEXIT;
1138 }
1139
1140 /* return the current frame number */
1141 static int crisv10_hcd_get_frame(struct usb_hcd *hcd)
1142 {
1143 DBFENTER;
1144 DBFEXIT;
1145 return (*R_USB_FM_NUMBER & 0x7ff);
1146 }
1147
1148 #ifdef CONFIG_USB_OTG
1149
1150 static int crisv10_hcd_start_port_reset(struct usb_hcd *hcd, unsigned port)
1151 {
1152 return 0; /* no-op for now */
1153 }
1154
1155 #endif /* CONFIG_USB_OTG */
1156
1157
1158 /******************************************************************/
1159 /* Root Hub functions */
1160 /******************************************************************/
1161
1162 /* root hub status */
1163 static const struct usb_hub_status rh_hub_status =
1164 {
1165 .wHubStatus = 0,
1166 .wHubChange = 0,
1167 };
1168
1169 /* root hub descriptor */
1170 static const u8 rh_hub_descr[] =
1171 {
1172 0x09, /* bDescLength */
1173 0x29, /* bDescriptorType */
1174 USB_ROOT_HUB_PORTS, /* bNbrPorts */
1175 0x00, /* wHubCharacteristics */
1176 0x00,
1177 0x01, /* bPwrOn2pwrGood */
1178 0x00, /* bHubContrCurrent */
1179 0x00, /* DeviceRemovable */
1180 0xff /* PortPwrCtrlMask */
1181 };
1182
1183 /* Actual holder of root hub status*/
1184 struct crisv10_rh rh;
1185
1186 /* Initialize root hub data structures (called from dvdrv_hcd_probe()) */
1187 int rh_init(void) {
1188 int i;
1189 /* Reset port status flags */
1190 for (i = 0; i < USB_ROOT_HUB_PORTS; i++) {
1191 rh.wPortChange[i] = 0;
1192 rh.wPortStatusPrev[i] = 0;
1193 }
1194 return 0;
1195 }
1196
1197 #define RH_FEAT_MASK ((1<<USB_PORT_FEAT_CONNECTION)|\
1198 (1<<USB_PORT_FEAT_ENABLE)|\
1199 (1<<USB_PORT_FEAT_SUSPEND)|\
1200 (1<<USB_PORT_FEAT_RESET))
1201
1202 /* Handle port status change interrupt (called from bottom part interrupt) */
1203 void rh_port_status_change(__u16 port_reg[]) {
1204 int i;
1205 __u16 wChange;
1206
1207 for(i = 0; i < USB_ROOT_HUB_PORTS; i++) {
1208 /* Xor out changes since last read, masked for important flags */
1209 wChange = (port_reg[i] & RH_FEAT_MASK) ^ rh.wPortStatusPrev[i];
1210 /* Or changes together with (if any) saved changes */
1211 rh.wPortChange[i] |= wChange;
1212 /* Save new status */
1213 rh.wPortStatusPrev[i] = port_reg[i];
1214
1215 if(wChange) {
1216 rh_dbg("Interrupt port_status change port%d: %s Current-status:%s\n", i+1,
1217 port_status_to_str(wChange),
1218 port_status_to_str(port_reg[i]));
1219 }
1220 }
1221 }
1222
1223 /* Construct port status change bitmap for the root hub */
1224 static int rh_status_data_request(struct usb_hcd *hcd, char *buf)
1225 {
1226 struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(hcd);
1227 unsigned int i;
1228
1229 DBFENTER;
1230 /*
1231 * corresponds to hub status change EP (USB 2.0 spec section 11.13.4)
1232 * return bitmap indicating ports with status change
1233 */
1234 *buf = 0;
1235 spin_lock(&crisv10_hcd->lock);
1236 for (i = 1; i <= crisv10_hcd->num_ports; i++) {
1237 if (rh.wPortChange[map_port(i)]) {
1238 *buf |= (1 << i);
1239 rh_dbg("rh_status_data_request, change on port %d: %s Current Status: %s\n", i,
1240 port_status_to_str(rh.wPortChange[map_port(i)]),
1241 port_status_to_str(rh.wPortStatusPrev[map_port(i)]));
1242 }
1243 }
1244 spin_unlock(&crisv10_hcd->lock);
1245 DBFEXIT;
1246 return *buf == 0 ? 0 : 1;
1247 }
1248
1249 /* Handle a control request for the root hub (called from hcd_driver) */
1250 static int rh_control_request(struct usb_hcd *hcd,
1251 u16 typeReq,
1252 u16 wValue,
1253 u16 wIndex,
1254 char *buf,
1255 u16 wLength) {
1256
1257 struct crisv10_hcd *crisv10_hcd = hcd_to_crisv10_hcd(hcd);
1258 int retval = 0;
1259 int len;
1260 DBFENTER;
1261
1262 switch (typeReq) {
1263 case GetHubDescriptor:
1264 rh_dbg("GetHubDescriptor\n");
1265 len = min_t(unsigned int, sizeof rh_hub_descr, wLength);
1266 memcpy(buf, rh_hub_descr, len);
1267 buf[2] = crisv10_hcd->num_ports;
1268 break;
1269 case GetHubStatus:
1270 rh_dbg("GetHubStatus\n");
1271 len = min_t(unsigned int, sizeof rh_hub_status, wLength);
1272 memcpy(buf, &rh_hub_status, len);
1273 break;
1274 case GetPortStatus:
1275 if (!wIndex || wIndex > crisv10_hcd->num_ports)
1276 goto error;
1277 rh_dbg("GetportStatus, port:%d change:%s status:%s\n", wIndex,
1278 port_status_to_str(rh.wPortChange[map_port(wIndex)]),
1279 port_status_to_str(rh.wPortStatusPrev[map_port(wIndex)]));
1280 *(u16 *) buf = cpu_to_le16(rh.wPortStatusPrev[map_port(wIndex)]);
1281 *(u16 *) (buf + 2) = cpu_to_le16(rh.wPortChange[map_port(wIndex)]);
1282 break;
1283 case SetHubFeature:
1284 rh_dbg("SetHubFeature\n");
1285 case ClearHubFeature:
1286 rh_dbg("ClearHubFeature\n");
1287 switch (wValue) {
1288 case C_HUB_OVER_CURRENT:
1289 case C_HUB_LOCAL_POWER:
1290 rh_warn("Not implemented hub request:%d \n", typeReq);
1291 /* not implemented */
1292 break;
1293 default:
1294 goto error;
1295 }
1296 break;
1297 case SetPortFeature:
1298 if (!wIndex || wIndex > crisv10_hcd->num_ports)
1299 goto error;
1300 if(rh_set_port_feature(map_port(wIndex), wValue))
1301 goto error;
1302 break;
1303 case ClearPortFeature:
1304 if (!wIndex || wIndex > crisv10_hcd->num_ports)
1305 goto error;
1306 if(rh_clear_port_feature(map_port(wIndex), wValue))
1307 goto error;
1308 break;
1309 default:
1310 rh_warn("Unknown hub request: %d\n", typeReq);
1311 error:
1312 retval = -EPIPE;
1313 }
1314 DBFEXIT;
1315 return retval;
1316 }
1317
1318 int rh_set_port_feature(__u8 bPort, __u16 wFeature) {
1319 __u8 bUsbCommand = 0;
1320 __u8 reset_cnt;
1321 switch(wFeature) {
1322 case USB_PORT_FEAT_RESET:
1323 rh_dbg("SetPortFeature: reset\n");
1324
1325 if (rh.wPortStatusPrev[bPort] &
1326 IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes))
1327 {
1328 __u8 restart_controller = 0;
1329
1330 if ( (rh.wPortStatusPrev[0] &
1331 IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes)) &&
1332 (rh.wPortStatusPrev[1] &
1333 IO_STATE(R_USB_RH_PORT_STATUS_2, enabled, yes)) )
1334 {
1335 /* Both ports is enabled. The USB controller will not change state. */
1336 restart_controller = 0;
1337 }
1338 else
1339 {
1340 /* Only ports is enabled. The USB controller will change state and
1341 must be restarted. */
1342 restart_controller = 1;
1343 }
1344 /*
1345 In ETRAX 100LX it's not possible to reset an enabled root hub port.
1346 The workaround is to disable and enable the port before resetting it.
1347 Disabling the port can, if both ports are disabled at once, cause the
1348 USB controller to change state to HOST_MODE state.
1349 The USB controller state transition causes a lot of unwanted
1350 interrupts that must be avoided.
1351 Disabling the USB controller status and port status interrupts before
1352 disabling/resetting the port stops these interrupts.
1353
1354 These actions are performed:
1355 1. Disable USB controller status and port status interrupts.
1356 2. Disable the port
1357 3. Wait for the port to be disabled.
1358 4. Enable the port.
1359 5. Wait for the port to be enabled.
1360 6. Reset the port.
1361 7. Wait for for the reset to end.
1362 8. Wait for the USB controller entering started state.
1363 9. Order the USB controller to running state.
1364 10. Wait for the USB controller reaching running state.
1365 11. Clear all interrupts generated during the disable/enable/reset
1366 procedure.
1367 12. Enable the USB controller status and port status interrupts.
1368 */
1369
1370 /* 1. Disable USB controller status and USB port status interrupts. */
1371 *R_USB_IRQ_MASK_CLR = IO_STATE(R_USB_IRQ_MASK_CLR, ctl_status, clr);
1372 __asm__ __volatile__ (" nop");
1373 *R_USB_IRQ_MASK_CLR = IO_STATE(R_USB_IRQ_MASK_CLR, port_status, clr);
1374 __asm__ __volatile__ (" nop");
1375
1376 {
1377
1378 /* Since an root hub port reset shall be 50 ms and the ETRAX 100LX
1379 root hub port reset is 10 ms we must perform 5 port resets to
1380 achieve a proper root hub port reset. */
1381 for (reset_cnt = 0; reset_cnt < 5; reset_cnt ++)
1382 {
1383 rh_dbg("Disable Port %d\n", bPort + 1);
1384
1385 /* 2. Disable the port*/
1386 if (bPort == 0)
1387 {
1388 *R_USB_PORT1_DISABLE = IO_STATE(R_USB_PORT1_DISABLE, disable, yes);
1389 }
1390 else
1391 {
1392 *R_USB_PORT2_DISABLE = IO_STATE(R_USB_PORT2_DISABLE, disable, yes);
1393 }
1394
1395 /* 3. Wait for the port to be disabled. */
1396 while ( (bPort == 0) ?
1397 *R_USB_RH_PORT_STATUS_1 &
1398 IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes) :
1399 *R_USB_RH_PORT_STATUS_2 &
1400 IO_STATE(R_USB_RH_PORT_STATUS_2, enabled, yes) ) {}
1401
1402 rh_dbg("Port %d is disabled. Enable it!\n", bPort + 1);
1403
1404 /* 4. Enable the port. */
1405 if (bPort == 0)
1406 {
1407 *R_USB_PORT1_DISABLE = IO_STATE(R_USB_PORT1_DISABLE, disable, no);
1408 }
1409 else
1410 {
1411 *R_USB_PORT2_DISABLE = IO_STATE(R_USB_PORT2_DISABLE, disable, no);
1412 }
1413
1414 /* 5. Wait for the port to be enabled again. */
1415 while (!( (bPort == 0) ?
1416 *R_USB_RH_PORT_STATUS_1 &
1417 IO_STATE(R_USB_RH_PORT_STATUS_1, connected, yes) :
1418 *R_USB_RH_PORT_STATUS_2 &
1419 IO_STATE(R_USB_RH_PORT_STATUS_2, connected, yes) ) ) {}
1420
1421 rh_dbg("Port %d is enabled.\n", bPort + 1);
1422
1423 /* 6. Reset the port */
1424 crisv10_ready_wait();
1425 *R_USB_COMMAND =
1426 ( (bPort == 0) ?
1427 IO_STATE(R_USB_COMMAND, port_sel, port1):
1428 IO_STATE(R_USB_COMMAND, port_sel, port2) ) |
1429 IO_STATE(R_USB_COMMAND, port_cmd, reset) |
1430 IO_STATE(R_USB_COMMAND, busy, no) |
1431 IO_STATE(R_USB_COMMAND, ctrl_cmd, nop);
1432 rh_dbg("Port %d is resetting.\n", bPort + 1);
1433
1434 /* 7. The USB specification says that we should wait for at least
1435 10ms for device recover */
1436 udelay(10500); /* 10,5ms blocking wait */
1437
1438 crisv10_ready_wait();
1439 }
1440 }
1441
1442
1443 /* Check if the USB controller needs to be restarted. */
1444 if (restart_controller)
1445 {
1446 /* 8. Wait for the USB controller entering started state. */
1447 while (!(*R_USB_STATUS & IO_STATE(R_USB_STATUS, started, yes))) {}
1448
1449 /* 9. Order the USB controller to running state. */
1450 crisv10_ready_wait();
1451 *R_USB_COMMAND =
1452 IO_STATE(R_USB_COMMAND, port_sel, nop) |
1453 IO_STATE(R_USB_COMMAND, port_cmd, reset) |
1454 IO_STATE(R_USB_COMMAND, busy, no) |
1455 IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run);
1456
1457 /* 10. Wait for the USB controller reaching running state. */
1458 while (!(*R_USB_STATUS & IO_STATE(R_USB_STATUS, running, yes))) {}
1459 }
1460
1461 /* 11. Clear any controller or port satus interrupts before enabling
1462 the interrupts. */
1463 {
1464 u16 dummy;
1465
1466 /* Clear the port status interrupt of the reset port. */
1467 if (bPort == 0)
1468 {
1469 rh_dbg("Clearing port 1 interrupts\n");
1470 dummy = *R_USB_RH_PORT_STATUS_1;
1471 }
1472 else
1473 {
1474 rh_dbg("Clearing port 2 interrupts\n");
1475 dummy = *R_USB_RH_PORT_STATUS_2;
1476 }
1477
1478 if (restart_controller)
1479 {
1480 /* The USB controller is restarted. Clear all interupts. */
1481 rh_dbg("Clearing all interrupts\n");
1482 dummy = *R_USB_STATUS;
1483 dummy = *R_USB_RH_PORT_STATUS_1;
1484 dummy = *R_USB_RH_PORT_STATUS_2;
1485 }
1486 }
1487
1488 /* 12. Enable USB controller status and USB port status interrupts. */
1489 *R_USB_IRQ_MASK_SET = IO_STATE(R_USB_IRQ_MASK_SET, ctl_status, set);
1490 __asm__ __volatile__ (" nop");
1491 *R_USB_IRQ_MASK_SET = IO_STATE(R_USB_IRQ_MASK_SET, port_status, set);
1492 __asm__ __volatile__ (" nop");
1493
1494 }
1495 else
1496 {
1497
1498 bUsbCommand |= IO_STATE(R_USB_COMMAND, port_cmd, reset);
1499 /* Select which port via the port_sel field */
1500 bUsbCommand |= IO_FIELD(R_USB_COMMAND, port_sel, bPort+1);
1501
1502 /* Make sure the controller isn't busy. */
1503 crisv10_ready_wait();
1504 /* Send out the actual command to the USB controller */
1505 *R_USB_COMMAND = bUsbCommand;
1506
1507 /* Wait a while for controller to first become started after port reset */
1508 udelay(12000); /* 12ms blocking wait */
1509
1510 /* Make sure the controller isn't busy. */
1511 crisv10_ready_wait();
1512
1513 /* If all enabled ports were disabled the host controller goes down into
1514 started mode, so we need to bring it back into the running state.
1515 (This is safe even if it's already in the running state.) */
1516 *R_USB_COMMAND =
1517 IO_STATE(R_USB_COMMAND, port_sel, nop) |
1518 IO_STATE(R_USB_COMMAND, port_cmd, reset) |
1519 IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run);
1520 }
1521
1522 break;
1523 case USB_PORT_FEAT_SUSPEND:
1524 rh_dbg("SetPortFeature: suspend\n");
1525 bUsbCommand |= IO_STATE(R_USB_COMMAND, port_cmd, suspend);
1526 goto set;
1527 break;
1528 case USB_PORT_FEAT_POWER:
1529 rh_dbg("SetPortFeature: power\n");
1530 break;
1531 case USB_PORT_FEAT_C_CONNECTION:
1532 rh_dbg("SetPortFeature: c_connection\n");
1533 break;
1534 case USB_PORT_FEAT_C_RESET:
1535 rh_dbg("SetPortFeature: c_reset\n");
1536 break;
1537 case USB_PORT_FEAT_C_OVER_CURRENT:
1538 rh_dbg("SetPortFeature: c_over_current\n");
1539 break;
1540
1541 set:
1542 /* Select which port via the port_sel field */
1543 bUsbCommand |= IO_FIELD(R_USB_COMMAND, port_sel, bPort+1);
1544
1545 /* Make sure the controller isn't busy. */
1546 crisv10_ready_wait();
1547 /* Send out the actual command to the USB controller */
1548 *R_USB_COMMAND = bUsbCommand;
1549 break;
1550 default:
1551 rh_dbg("SetPortFeature: unknown feature\n");
1552 return -1;
1553 }
1554 return 0;
1555 }
1556
1557 int rh_clear_port_feature(__u8 bPort, __u16 wFeature) {
1558 switch(wFeature) {
1559 case USB_PORT_FEAT_ENABLE:
1560 rh_dbg("ClearPortFeature: enable\n");
1561 rh_disable_port(bPort);
1562 break;
1563 case USB_PORT_FEAT_SUSPEND:
1564 rh_dbg("ClearPortFeature: suspend\n");
1565 break;
1566 case USB_PORT_FEAT_POWER:
1567 rh_dbg("ClearPortFeature: power\n");
1568 break;
1569
1570 case USB_PORT_FEAT_C_ENABLE:
1571 rh_dbg("ClearPortFeature: c_enable\n");
1572 goto clear;
1573 case USB_PORT_FEAT_C_SUSPEND:
1574 rh_dbg("ClearPortFeature: c_suspend\n");
1575 goto clear;
1576 case USB_PORT_FEAT_C_CONNECTION:
1577 rh_dbg("ClearPortFeature: c_connection\n");
1578 goto clear;
1579 case USB_PORT_FEAT_C_OVER_CURRENT:
1580 rh_dbg("ClearPortFeature: c_over_current\n");
1581 goto clear;
1582 case USB_PORT_FEAT_C_RESET:
1583 rh_dbg("ClearPortFeature: c_reset\n");
1584 goto clear;
1585 clear:
1586 rh.wPortChange[bPort] &= ~(1 << (wFeature - 16));
1587 break;
1588 default:
1589 rh_dbg("ClearPortFeature: unknown feature\n");
1590 return -1;
1591 }
1592 return 0;
1593 }
1594
1595
1596 #ifdef CONFIG_PM
1597 /* Handle a suspend request for the root hub (called from hcd_driver) */
1598 static int rh_suspend_request(struct usb_hcd *hcd)
1599 {
1600 return 0; /* no-op for now */
1601 }
1602
1603 /* Handle a resume request for the root hub (called from hcd_driver) */
1604 static int rh_resume_request(struct usb_hcd *hcd)
1605 {
1606 return 0; /* no-op for now */
1607 }
1608 #endif /* CONFIG_PM */
1609
1610
1611
1612 /* Wrapper function for workaround port disable registers in USB controller */
1613 static void rh_disable_port(unsigned int port) {
1614 volatile int timeout = 10000;
1615 volatile char* usb_portx_disable;
1616 switch(port) {
1617 case 0:
1618 usb_portx_disable = R_USB_PORT1_DISABLE;
1619 break;
1620 case 1:
1621 usb_portx_disable = R_USB_PORT2_DISABLE;
1622 break;
1623 default:
1624 /* Invalid port index */
1625 return;
1626 }
1627 /* Set disable flag in special register */
1628 *usb_portx_disable = IO_STATE(R_USB_PORT1_DISABLE, disable, yes);
1629 /* Wait until not enabled anymore */
1630 while((rh.wPortStatusPrev[port] &
1631 IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes)) &&
1632 (timeout-- > 0));
1633 if(timeout == 0) {
1634 warn("Timeout while waiting for port %d to become disabled\n", port);
1635 }
1636 /* clear disable flag in special register */
1637 *usb_portx_disable = IO_STATE(R_USB_PORT1_DISABLE, disable, no);
1638 rh_info("Physical port %d disabled\n", port+1);
1639 }
1640
1641
1642 /******************************************************************/
1643 /* Transfer Controller (TC) functions */
1644 /******************************************************************/
1645
1646 /* FIXME: Should RX_BUF_SIZE be a config option, or maybe we should adjust it
1647 dynamically?
1648 To adjust it dynamically we would have to get an interrupt when we reach
1649 the end of the rx descriptor list, or when we get close to the end, and
1650 then allocate more descriptors. */
1651 #define NBR_OF_RX_DESC 512
1652 #define RX_DESC_BUF_SIZE 1024
1653 #define RX_BUF_SIZE (NBR_OF_RX_DESC * RX_DESC_BUF_SIZE)
1654
1655
1656 /* Local variables for Transfer Controller */
1657 /* --------------------------------------- */
1658
1659 /* This is a circular (double-linked) list of the active urbs for each epid.
1660 The head is never removed, and new urbs are linked onto the list as
1661 urb_entry_t elements. Don't reference urb_list directly; use the wrapper
1662 functions instead (which includes spin_locks) */
1663 static struct list_head urb_list[NBR_OF_EPIDS];
1664
1665 /* Read about the need and usage of this lock in submit_ctrl_urb. */
1666 /* Lock for URB lists for each EPID */
1667 static spinlock_t urb_list_lock;
1668
1669 /* Lock for EPID array register (R_USB_EPT_x) in Etrax */
1670 static spinlock_t etrax_epid_lock;
1671
1672 /* Lock for dma8 sub0 handling */
1673 static spinlock_t etrax_dma8_sub0_lock;
1674
1675 /* DMA IN cache bug. Align the DMA IN buffers to 32 bytes, i.e. a cache line.
1676 Since RX_DESC_BUF_SIZE is 1024 is a multiple of 32, all rx buffers will be
1677 cache aligned. */
1678 static volatile unsigned char RxBuf[RX_BUF_SIZE] __attribute__ ((aligned (32)));
1679 static volatile struct USB_IN_Desc RxDescList[NBR_OF_RX_DESC] __attribute__ ((aligned (4)));
1680
1681 /* Pointers into RxDescList. */
1682 static volatile struct USB_IN_Desc *myNextRxDesc;
1683 static volatile struct USB_IN_Desc *myLastRxDesc;
1684
1685 /* A zout transfer makes a memory access at the address of its buf pointer,
1686 which means that setting this buf pointer to 0 will cause an access to the
1687 flash. In addition to this, setting sw_len to 0 results in a 16/32 bytes
1688 (depending on DMA burst size) transfer.
1689 Instead, we set it to 1, and point it to this buffer. */
1690 static int zout_buffer[4] __attribute__ ((aligned (4)));
1691
1692 /* Cache for allocating new EP and SB descriptors. */
1693 static struct kmem_cache *usb_desc_cache;
1694
1695 /* Cache for the data allocated in the isoc descr top half. */
1696 static struct kmem_cache *isoc_compl_cache;
1697
1698 /* Cache for the data allocated when delayed finishing of URBs */
1699 static struct kmem_cache *later_data_cache;
1700
1701
1702 /* Counter to keep track of how many Isoc EP we have sat up. Used to enable
1703 and disable iso_eof interrupt. We only need these interrupts when we have
1704 Isoc data endpoints (consumes CPU cycles).
1705 FIXME: This could be more fine granular, so this interrupt is only enabled
1706 when we have a In Isoc URB not URB_ISO_ASAP flaged queued. */
1707 static int isoc_epid_counter;
1708
1709 /* Protecting wrapper functions for R_USB_EPT_x */
1710 /* -------------------------------------------- */
1711 static inline void etrax_epid_set(__u8 index, __u32 data) {
1712 unsigned long flags;
1713 spin_lock_irqsave(&etrax_epid_lock, flags);
1714 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
1715 nop();
1716 *R_USB_EPT_DATA = data;
1717 spin_unlock_irqrestore(&etrax_epid_lock, flags);
1718 }
1719
1720 static inline void etrax_epid_clear_error(__u8 index) {
1721 unsigned long flags;
1722 spin_lock_irqsave(&etrax_epid_lock, flags);
1723 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
1724 nop();
1725 *R_USB_EPT_DATA &=
1726 ~(IO_MASK(R_USB_EPT_DATA, error_count_in) |
1727 IO_MASK(R_USB_EPT_DATA, error_count_out) |
1728 IO_MASK(R_USB_EPT_DATA, error_code));
1729 spin_unlock_irqrestore(&etrax_epid_lock, flags);
1730 }
1731
1732 static inline void etrax_epid_set_toggle(__u8 index, __u8 dirout,
1733 __u8 toggle) {
1734 unsigned long flags;
1735 spin_lock_irqsave(&etrax_epid_lock, flags);
1736 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
1737 nop();
1738 if(dirout) {
1739 *R_USB_EPT_DATA &= ~IO_MASK(R_USB_EPT_DATA, t_out);
1740 *R_USB_EPT_DATA |= IO_FIELD(R_USB_EPT_DATA, t_out, toggle);
1741 } else {
1742 *R_USB_EPT_DATA &= ~IO_MASK(R_USB_EPT_DATA, t_in);
1743 *R_USB_EPT_DATA |= IO_FIELD(R_USB_EPT_DATA, t_in, toggle);
1744 }
1745 spin_unlock_irqrestore(&etrax_epid_lock, flags);
1746 }
1747
1748 static inline __u8 etrax_epid_get_toggle(__u8 index, __u8 dirout) {
1749 unsigned long flags;
1750 __u8 toggle;
1751 spin_lock_irqsave(&etrax_epid_lock, flags);
1752 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
1753 nop();
1754 if (dirout) {
1755 toggle = IO_EXTRACT(R_USB_EPT_DATA, t_out, *R_USB_EPT_DATA);
1756 } else {
1757 toggle = IO_EXTRACT(R_USB_EPT_DATA, t_in, *R_USB_EPT_DATA);
1758 }
1759 spin_unlock_irqrestore(&etrax_epid_lock, flags);
1760 return toggle;
1761 }
1762
1763
1764 static inline __u32 etrax_epid_get(__u8 index) {
1765 unsigned long flags;
1766 __u32 data;
1767 spin_lock_irqsave(&etrax_epid_lock, flags);
1768 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
1769 nop();
1770 data = *R_USB_EPT_DATA;
1771 spin_unlock_irqrestore(&etrax_epid_lock, flags);
1772 return data;
1773 }
1774
1775
1776
1777
1778 /* Main functions for Transfer Controller */
1779 /* -------------------------------------- */
1780
1781 /* Init structs, memories and lists used by Transfer Controller */
1782 int tc_init(struct usb_hcd *hcd) {
1783 int i;
1784 /* Clear software state info for all epids */
1785 memset(epid_state, 0, sizeof(struct etrax_epid) * NBR_OF_EPIDS);
1786
1787 /* Set Invalid and Dummy as being in use and disabled */
1788 epid_state[INVALID_EPID].inuse = 1;
1789 epid_state[DUMMY_EPID].inuse = 1;
1790 epid_state[INVALID_EPID].disabled = 1;
1791 epid_state[DUMMY_EPID].disabled = 1;
1792
1793 /* Clear counter for how many Isoc epids we have sat up */
1794 isoc_epid_counter = 0;
1795
1796 /* Initialize the urb list by initiating a head for each list.
1797 Also reset list hodling active URB for each epid */
1798 for (i = 0; i < NBR_OF_EPIDS; i++) {
1799 INIT_LIST_HEAD(&urb_list[i]);
1800 activeUrbList[i] = NULL;
1801 }
1802
1803 /* Init lock for URB lists */
1804 spin_lock_init(&urb_list_lock);
1805 /* Init lock for Etrax R_USB_EPT register */
1806 spin_lock_init(&etrax_epid_lock);
1807 /* Init lock for Etrax dma8 sub0 handling */
1808 spin_lock_init(&etrax_dma8_sub0_lock);
1809
1810 /* We use kmem_cache_* to make sure that all DMA desc. are dword aligned */
1811
1812 /* Note that we specify sizeof(struct USB_EP_Desc) as the size, but also
1813 allocate SB descriptors from this cache. This is ok since
1814 sizeof(struct USB_EP_Desc) == sizeof(struct USB_SB_Desc). */
1815 usb_desc_cache = kmem_cache_create("usb_desc_cache",
1816 sizeof(struct USB_EP_Desc), 0,
1817 SLAB_HWCACHE_ALIGN, 0);
1818 if(usb_desc_cache == NULL) {
1819 return -ENOMEM;
1820 }
1821
1822 /* Create slab cache for speedy allocation of memory for isoc bottom-half
1823 interrupt handling */
1824 isoc_compl_cache =
1825 kmem_cache_create("isoc_compl_cache",
1826 sizeof(struct crisv10_isoc_complete_data),
1827 0, SLAB_HWCACHE_ALIGN, 0);
1828 if(isoc_compl_cache == NULL) {
1829 return -ENOMEM;
1830 }
1831
1832 /* Create slab cache for speedy allocation of memory for later URB finish
1833 struct */
1834 later_data_cache =
1835 kmem_cache_create("later_data_cache",
1836 sizeof(struct urb_later_data),
1837 0, SLAB_HWCACHE_ALIGN, 0);
1838 if(later_data_cache == NULL) {
1839 return -ENOMEM;
1840 }
1841
1842
1843 /* Initiate the bulk start timer. */
1844 init_timer(&bulk_start_timer);
1845 bulk_start_timer.expires = jiffies + BULK_START_TIMER_INTERVAL;
1846 bulk_start_timer.function = tc_bulk_start_timer_func;
1847 add_timer(&bulk_start_timer);
1848
1849
1850 /* Initiate the bulk eot timer. */
1851 init_timer(&bulk_eot_timer);
1852 bulk_eot_timer.expires = jiffies + BULK_EOT_TIMER_INTERVAL;
1853 bulk_eot_timer.function = tc_bulk_eot_timer_func;
1854 bulk_eot_timer.data = (unsigned long)hcd;
1855 add_timer(&bulk_eot_timer);
1856
1857 return 0;
1858 }
1859
1860 /* Uninitialize all resources used by Transfer Controller */
1861 void tc_destroy(void) {
1862
1863 /* Destroy all slab cache */
1864 kmem_cache_destroy(usb_desc_cache);
1865 kmem_cache_destroy(isoc_compl_cache);
1866 kmem_cache_destroy(later_data_cache);
1867
1868 /* Remove timers */
1869 del_timer(&bulk_start_timer);
1870 del_timer(&bulk_eot_timer);
1871 }
1872
1873 static void restart_dma8_sub0(void) {
1874 unsigned long flags;
1875 spin_lock_irqsave(&etrax_dma8_sub0_lock, flags);
1876 /* Verify that the dma is not running */
1877 if ((*R_DMA_CH8_SUB0_CMD & IO_MASK(R_DMA_CH8_SUB0_CMD, cmd)) == 0) {
1878 struct USB_EP_Desc *ep = (struct USB_EP_Desc *)phys_to_virt(*R_DMA_CH8_SUB0_EP);
1879 while (DUMMY_EPID == IO_EXTRACT(USB_EP_command, epid, ep->command)) {
1880 ep = (struct USB_EP_Desc *)phys_to_virt(ep->next);
1881 }
1882 /* Advance the DMA to the next EP descriptor that is not a DUMMY_EPID. */
1883 *R_DMA_CH8_SUB0_EP = virt_to_phys(ep);
1884 /* Restart the DMA */
1885 *R_DMA_CH8_SUB0_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start);
1886 }
1887 spin_unlock_irqrestore(&etrax_dma8_sub0_lock, flags);
1888 }
1889
1890 /* queue an URB with the transfer controller (called from hcd_driver) */
1891 static int tc_urb_enqueue(struct usb_hcd *hcd,
1892 struct urb *urb,
1893 gfp_t mem_flags) {
1894 int epid;
1895 int retval;
1896 int bustime = 0;
1897 int maxpacket;
1898 unsigned long flags;
1899 struct crisv10_urb_priv *urb_priv;
1900 struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(hcd);
1901 DBFENTER;
1902
1903 if(!(crisv10_hcd->running)) {
1904 /* The USB Controller is not running, probably because no device is
1905 attached. No idea to enqueue URBs then */
1906 tc_warn("Rejected enqueueing of URB:0x%x because no dev attached\n",
1907 (unsigned int)urb);
1908 return -ENOENT;
1909 }
1910
1911 maxpacket = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
1912 /* Special case check for In Isoc transfers. Specification states that each
1913 In Isoc transfer consists of one packet and therefore it should fit into
1914 the transfer-buffer of an URB.
1915 We do the check here to be sure (an invalid scenario can be produced with
1916 parameters to the usbtest suite) */
1917 if(usb_pipeisoc(urb->pipe) && usb_pipein(urb->pipe) &&
1918 (urb->transfer_buffer_length < maxpacket)) {
1919 tc_err("Submit In Isoc URB with buffer length:%d to pipe with maxpacketlen: %d\n", urb->transfer_buffer_length, maxpacket);
1920 return -EMSGSIZE;
1921 }
1922
1923 /* Check if there is a epid for URBs destination, if not this function
1924 set up one. */
1925 epid = tc_setup_epid(urb->ep, urb, mem_flags);
1926 if (epid < 0) {
1927 tc_err("Failed setup epid:%d for URB:0x%x\n", epid, (unsigned int)urb);
1928 DBFEXIT;
1929 return -ENOMEM;
1930 }
1931
1932 if(urb == activeUrbList[epid]) {
1933 tc_err("Resubmition of allready active URB:0x%x\n", (unsigned int)urb);
1934 return -ENXIO;
1935 }
1936
1937 if(urb_list_entry(urb, epid)) {
1938 tc_err("Resubmition of allready queued URB:0x%x\n", (unsigned int)urb);
1939 return -ENXIO;
1940 }
1941
1942 /* If we actively have flaged endpoint as disabled then refuse submition */
1943 if(epid_state[epid].disabled) {
1944 return -ENOENT;
1945 }
1946
1947 /* Allocate and init HC-private data for URB */
1948 if(urb_priv_create(hcd, urb, epid, mem_flags) != 0) {
1949 DBFEXIT;
1950 return -ENOMEM;
1951 }
1952 urb_priv = urb->hcpriv;
1953
1954 /* Check if there is enough bandwidth for periodic transfer */
1955 if(usb_pipeint(urb->pipe) || usb_pipeisoc(urb->pipe)) {
1956 /* only check (and later claim) if not already claimed */
1957 if (urb_priv->bandwidth == 0) {
1958 bustime = crisv10_usb_check_bandwidth(urb->dev, urb);
1959 if (bustime < 0) {
1960 tc_err("Not enough periodic bandwidth\n");
1961 urb_priv_free(hcd, urb);
1962 DBFEXIT;
1963 return -ENOSPC;
1964 }
1965 }
1966 }
1967
1968 tc_dbg("Enqueue URB:0x%x[%d] epid:%d (%s) bufflen:%d\n",
1969 (unsigned int)urb, urb_priv->urb_num, epid,
1970 pipe_to_str(urb->pipe), urb->transfer_buffer_length);
1971
1972 /* Create and link SBs required for this URB */
1973 retval = create_sb_for_urb(urb, mem_flags);
1974 if(retval != 0) {
1975 tc_err("Failed to create SBs for URB:0x%x[%d]\n", (unsigned int)urb,
1976 urb_priv->urb_num);
1977 urb_priv_free(hcd, urb);
1978 DBFEXIT;
1979 return retval;
1980 }
1981
1982 /* Init intr EP pool if this URB is a INTR transfer. This pool is later
1983 used when inserting EPs in the TxIntrEPList. We do the alloc here
1984 so we can't run out of memory later */
1985 if(usb_pipeint(urb->pipe)) {
1986 retval = init_intr_urb(urb, mem_flags);
1987 if(retval != 0) {
1988 tc_warn("Failed to init Intr URB\n");
1989 urb_priv_free(hcd, urb);
1990 DBFEXIT;
1991 return retval;
1992 }
1993 }
1994
1995 /* Disable other access when inserting USB */
1996 local_irq_save(flags);
1997
1998 /* Claim bandwidth, if needed */
1999 if(bustime) {
2000 crisv10_usb_claim_bandwidth(urb->dev,
2001 urb,
2002 bustime,
2003 (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS));
2004 }
2005
2006 /* Add URB to EP queue */
2007 urb_list_add(urb, epid, mem_flags);
2008
2009 if(usb_pipeisoc(urb->pipe)) {
2010 /* Special processing of Isoc URBs. */
2011 tc_dma_process_isoc_urb(urb);
2012 } else {
2013 /* Process EP queue for rest of the URB types (Bulk, Ctrl, Intr) */
2014 tc_dma_process_queue(epid);
2015 }
2016
2017 local_irq_restore(flags);
2018
2019 DBFEXIT;
2020 return 0;
2021 }
2022
2023 /* remove an URB from the transfer controller queues (called from hcd_driver)*/
2024 static int tc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) {
2025 struct crisv10_urb_priv *urb_priv;
2026 unsigned long flags;
2027 int epid;
2028
2029 DBFENTER;
2030 /* Disable interrupts here since a descriptor interrupt for the isoc epid
2031 will modify the sb list. This could possibly be done more granular, but
2032 urb_dequeue should not be used frequently anyway.
2033 */
2034 local_irq_save(flags);
2035
2036 urb->status = status;
2037 urb_priv = urb->hcpriv;
2038
2039 if (!urb_priv) {
2040 /* This happens if a device driver calls unlink on an urb that
2041 was never submitted (lazy driver) or if the urb was completed
2042 while dequeue was being called. */
2043 tc_warn("Dequeing of not enqueued URB:0x%x\n", (unsigned int)urb);
2044 local_irq_restore(flags);
2045 return 0;
2046 }
2047 epid = urb_priv->epid;
2048
2049 tc_warn("Dequeing %s URB:0x%x[%d] (%s %s epid:%d) status:%d %s\n",
2050 (urb == activeUrbList[epid]) ? "active" : "queued",
2051 (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
2052 str_type(urb->pipe), epid, urb->status,
2053 (urb_priv->later_data) ? "later-sched" : "");
2054
2055 /* For Bulk, Ctrl and Intr are only one URB active at a time. So any URB
2056 that isn't active can be dequeued by just removing it from the queue */
2057 if(usb_pipebulk(urb->pipe) || usb_pipecontrol(urb->pipe) ||
2058 usb_pipeint(urb->pipe)) {
2059
2060 /* Check if URB haven't gone further than the queue */
2061 if(urb != activeUrbList[epid]) {
2062 ASSERT(urb_priv->later_data == NULL);
2063 tc_warn("Dequeing URB:0x%x[%d] (%s %s epid:%d) from queue"
2064 " (not active)\n", (unsigned int)urb, urb_priv->urb_num,
2065 str_dir(urb->pipe), str_type(urb->pipe), epid);
2066
2067 /* Finish the URB with error status from USB core */
2068 tc_finish_urb(hcd, urb, urb->status);
2069 local_irq_restore(flags);
2070 return 0;
2071 }
2072 }
2073
2074 /* Set URB status to Unlink for handling when interrupt comes. */
2075 urb_priv->urb_state = UNLINK;
2076
2077 /* Differentiate dequeing of Bulk and Ctrl from Isoc and Intr */
2078 switch(usb_pipetype(urb->pipe)) {
2079 case PIPE_BULK:
2080 /* Check if EP still is enabled */
2081 if (TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
2082 /* The EP was enabled, disable it. */
2083 TxBulkEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
2084 }
2085 /* Kicking dummy list out of the party. */
2086 TxBulkEPList[epid].next = virt_to_phys(&TxBulkEPList[(epid + 1) % NBR_OF_EPIDS]);
2087 break;
2088 case PIPE_CONTROL:
2089 /* Check if EP still is enabled */
2090 if (TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
2091 /* The EP was enabled, disable it. */
2092 TxCtrlEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
2093 }
2094 break;
2095 case PIPE_ISOCHRONOUS:
2096 /* Disabling, busy-wait and unlinking of Isoc SBs will be done in
2097 finish_isoc_urb(). Because there might the case when URB is dequeued
2098 but there are other valid URBs waiting */
2099
2100 /* Check if In Isoc EP still is enabled */
2101 if (TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
2102 /* The EP was enabled, disable it. */
2103 TxIsocEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
2104 }
2105 break;
2106 case PIPE_INTERRUPT:
2107 /* Special care is taken for interrupt URBs. EPs are unlinked in
2108 tc_finish_urb */
2109 break;
2110 default:
2111 break;
2112 }
2113
2114 /* Asynchronous unlink, finish the URB later from scheduled or other
2115 event (data finished, error) */
2116 tc_finish_urb_later(hcd, urb, urb->status);
2117
2118 local_irq_restore(flags);
2119 DBFEXIT;
2120 return 0;
2121 }
2122
2123
2124 static void tc_sync_finish_epid(struct usb_hcd *hcd, int epid) {
2125 volatile int timeout = 10000;
2126 struct urb* urb;
2127 struct crisv10_urb_priv* urb_priv;
2128 unsigned long flags;
2129
2130 volatile struct USB_EP_Desc *first_ep; /* First EP in the list. */
2131 volatile struct USB_EP_Desc *curr_ep; /* Current EP, the iterator. */
2132 volatile struct USB_EP_Desc *next_ep; /* The EP after current. */
2133
2134 int type = epid_state[epid].type;
2135
2136 /* Setting this flag will cause enqueue() to return -ENOENT for new
2137 submitions on this endpoint and finish_urb() wont process queue further */
2138 epid_state[epid].disabled = 1;
2139
2140 switch(type) {
2141 case PIPE_BULK:
2142 /* Check if EP still is enabled */
2143 if (TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
2144 /* The EP was enabled, disable it. */
2145 TxBulkEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
2146 tc_warn("sync_finish: Disabling EP for epid:%d\n", epid);
2147
2148 /* Do busy-wait until DMA not using this EP descriptor anymore */
2149 while((*R_DMA_CH8_SUB0_EP ==
2150 virt_to_phys(&TxBulkEPList[epid])) &&
2151 (timeout-- > 0));
2152 if(timeout == 0) {
2153 warn("Timeout while waiting for DMA-TX-Bulk to leave EP for"
2154 " epid:%d\n", epid);
2155 }
2156 }
2157 break;
2158
2159 case PIPE_CONTROL:
2160 /* Check if EP still is enabled */
2161 if (TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
2162 /* The EP was enabled, disable it. */
2163 TxCtrlEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
2164 tc_warn("sync_finish: Disabling EP for epid:%d\n", epid);
2165
2166 /* Do busy-wait until DMA not using this EP descriptor anymore */
2167 while((*R_DMA_CH8_SUB1_EP ==
2168 virt_to_phys(&TxCtrlEPList[epid])) &&
2169 (timeout-- > 0));
2170 if(timeout == 0) {
2171 warn("Timeout while waiting for DMA-TX-Ctrl to leave EP for"
2172 " epid:%d\n", epid);
2173 }
2174 }
2175 break;
2176
2177 case PIPE_INTERRUPT:
2178 local_irq_save(flags);
2179 /* Disable all Intr EPs belonging to epid */
2180 first_ep = &TxIntrEPList[0];
2181 curr_ep = first_ep;
2182 do {
2183 next_ep = (struct USB_EP_Desc *)phys_to_virt(curr_ep->next);
2184 if (IO_EXTRACT(USB_EP_command, epid, next_ep->command) == epid) {
2185 /* Disable EP */
2186 next_ep->command &= ~IO_MASK(USB_EP_command, enable);
2187 }
2188 curr_ep = phys_to_virt(curr_ep->next);
2189 } while (curr_ep != first_ep);
2190
2191 local_irq_restore(flags);
2192 break;
2193
2194 case PIPE_ISOCHRONOUS:
2195 /* Check if EP still is enabled */
2196 if (TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
2197 tc_warn("sync_finish: Disabling Isoc EP for epid:%d\n", epid);
2198 /* The EP was enabled, disable it. */
2199 TxIsocEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
2200
2201 while((*R_DMA_CH8_SUB3_EP == virt_to_phys(&TxIsocEPList[epid])) &&
2202 (timeout-- > 0));
2203 if(timeout == 0) {
2204 warn("Timeout while waiting for DMA-TX-Isoc to leave EP for"
2205 " epid:%d\n", epid);
2206 }
2207 }
2208 break;
2209 }
2210
2211 local_irq_save(flags);
2212
2213 /* Finish if there is active URB for this endpoint */
2214 if(activeUrbList[epid] != NULL) {
2215 urb = activeUrbList[epid];
2216 urb_priv = urb->hcpriv;
2217 ASSERT(urb_priv);
2218 tc_warn("Sync finish %s URB:0x%x[%d] (%s %s epid:%d) status:%d %s\n",
2219 (urb == activeUrbList[epid]) ? "active" : "queued",
2220 (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
2221 str_type(urb->pipe), epid, urb->status,
2222 (urb_priv->later_data) ? "later-sched" : "");
2223
2224 tc_finish_urb(hcd, activeUrbList[epid], -ENOENT);
2225 ASSERT(activeUrbList[epid] == NULL);
2226 }
2227
2228 /* Finish any queued URBs for this endpoint. There won't be any resubmitions
2229 because epid_disabled causes enqueue() to fail for this endpoint */
2230 while((urb = urb_list_first(epid)) != NULL) {
2231 urb_priv = urb->hcpriv;
2232 ASSERT(urb_priv);
2233
2234 tc_warn("Sync finish %s URB:0x%x[%d] (%s %s epid:%d) status:%d %s\n",
2235 (urb == activeUrbList[epid]) ? "active" : "queued",
2236 (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
2237 str_type(urb->pipe), epid, urb->status,
2238 (urb_priv->later_data) ? "later-sched" : "");
2239
2240 tc_finish_urb(hcd, urb, -ENOENT);
2241 }
2242 epid_state[epid].disabled = 0;
2243 local_irq_restore(flags);
2244 }
2245
2246 /* free resources associated with an endpoint (called from hcd_driver) */
2247 static void tc_endpoint_disable(struct usb_hcd *hcd,
2248 struct usb_host_endpoint *ep) {
2249 DBFENTER;
2250 /* Only free epid if it has been allocated. We get two endpoint_disable
2251 requests for ctrl endpoints so ignore the second one */
2252 if(ep->hcpriv != NULL) {
2253 struct crisv10_ep_priv *ep_priv = ep->hcpriv;
2254 int epid = ep_priv->epid;
2255 tc_warn("endpoint_disable ep:0x%x ep-priv:0x%x (%s) (epid:%d freed)\n",
2256 (unsigned int)ep, (unsigned int)ep->hcpriv,
2257 endpoint_to_str(&(ep->desc)), epid);
2258
2259 tc_sync_finish_epid(hcd, epid);
2260
2261 ASSERT(activeUrbList[epid] == NULL);
2262 ASSERT(list_empty(&urb_list[epid]));
2263
2264 tc_free_epid(ep);
2265 } else {
2266 tc_dbg("endpoint_disable ep:0x%x ep-priv:0x%x (%s)\n", (unsigned int)ep,
2267 (unsigned int)ep->hcpriv, endpoint_to_str(&(ep->desc)));
2268 }
2269 DBFEXIT;
2270 }
2271
2272 static void tc_finish_urb_later_proc(struct work_struct* work) {
2273 unsigned long flags;
2274 struct urb_later_data* uld;
2275
2276 local_irq_save(flags);
2277 uld = container_of(work, struct urb_later_data, dws.work);
2278 if(uld->urb == NULL) {
2279 late_dbg("Later finish of URB = NULL (allready finished)\n");
2280 } else {
2281 struct crisv10_urb_priv* urb_priv = uld->urb->hcpriv;
2282 ASSERT(urb_priv);
2283 if(urb_priv->urb_num == uld->urb_num) {
2284 late_dbg("Later finish of URB:0x%x[%d]\n", (unsigned int)(uld->urb),
2285 urb_priv->urb_num);
2286 if(uld->status != uld->urb->status) {
2287 errno_dbg("Later-finish URB with status:%d, later-status:%d\n",
2288 uld->urb->status, uld->status);
2289 }
2290 if(uld != urb_priv->later_data) {
2291 panic("Scheduled uld not same as URBs uld\n");
2292 }
2293 tc_finish_urb(uld->hcd, uld->urb, uld->status);
2294 } else {
2295 late_warn("Ignoring later finish of URB:0x%x[%d]"
2296 ", urb_num doesn't match current URB:0x%x[%d]",
2297 (unsigned int)(uld->urb), uld->urb_num,
2298 (unsigned int)(uld->urb), urb_priv->urb_num);
2299 }
2300 }
2301 local_irq_restore(flags);
2302 kmem_cache_free(later_data_cache, uld);
2303 }
2304
2305 static void tc_finish_urb_later(struct usb_hcd *hcd, struct urb *urb,
2306 int status) {
2307 struct crisv10_urb_priv *urb_priv = urb->hcpriv;
2308 struct urb_later_data* uld;
2309
2310 ASSERT(urb_priv);
2311
2312 if(urb_priv->later_data != NULL) {
2313 /* Later-finish allready scheduled for this URB, just update status to
2314 return when finishing later */
2315 errno_dbg("Later-finish schedule change URB status:%d with new"
2316 " status:%d\n", urb_priv->later_data->status, status);
2317
2318 urb_priv->later_data->status = status;
2319 return;
2320 }
2321
2322 uld = kmem_cache_alloc(later_data_cache, GFP_ATOMIC);
2323 ASSERT(uld);
2324
2325 uld->hcd = hcd;
2326 uld->urb = urb;
2327 uld->urb_num = urb_priv->urb_num;
2328 uld->status = status;
2329
2330 INIT_DELAYED_WORK(&uld->dws, tc_finish_urb_later_proc);
2331 urb_priv->later_data = uld;
2332
2333 /* Schedule the finishing of the URB to happen later */
2334 schedule_delayed_work(&uld->dws, LATER_TIMER_DELAY);
2335 }
2336
2337 static void tc_finish_isoc_urb(struct usb_hcd *hcd, struct urb *urb,
2338 int status);
2339
2340 static void tc_finish_urb(struct usb_hcd *hcd, struct urb *urb, int status) {
2341 struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(hcd);
2342 struct crisv10_urb_priv *urb_priv = urb->hcpriv;
2343 int epid;
2344 char toggle;
2345 int urb_num;
2346
2347 DBFENTER;
2348 ASSERT(urb_priv != NULL);
2349 epid = urb_priv->epid;
2350 urb_num = urb_priv->urb_num;
2351
2352 if(urb != activeUrbList[epid]) {
2353 if(urb_list_entry(urb, epid)) {
2354 /* Remove this URB from the list. Only happens when URB are finished
2355 before having been processed (dequeing) */
2356 urb_list_del(urb, epid);
2357 } else {
2358 tc_warn("Finishing of URB:0x%x[%d] neither active or in queue for"
2359 " epid:%d\n", (unsigned int)urb, urb_num, epid);
2360 }
2361 }
2362
2363 /* Cancel any pending later-finish of this URB */
2364 if(urb_priv->later_data) {
2365 urb_priv->later_data->urb = NULL;
2366 }
2367
2368 /* For an IN pipe, we always set the actual length, regardless of whether
2369 there was an error or not (which means the device driver can use the data
2370 if it wants to). */
2371 if(usb_pipein(urb->pipe)) {
2372 urb->actual_length = urb_priv->rx_offset;
2373 } else {
2374 /* Set actual_length for OUT urbs also; the USB mass storage driver seems
2375 to want that. */
2376 if (status == 0 && urb->status == -EINPROGRESS) {
2377 urb->actual_length = urb->transfer_buffer_length;
2378 } else {
2379 /* We wouldn't know of any partial writes if there was an error. */
2380 urb->actual_length = 0;
2381 }
2382 }
2383
2384
2385 /* URB status mangling */
2386 if(urb->status == -EINPROGRESS) {
2387 /* The USB core hasn't changed the status, let's set our finish status */
2388 urb->status = status;
2389
2390 if ((status == 0) && (urb->transfer_flags & URB_SHORT_NOT_OK) &&
2391 usb_pipein(urb->pipe) &&
2392 (urb->actual_length != urb->transfer_buffer_length)) {
2393 /* URB_SHORT_NOT_OK means that short reads (shorter than the endpoint's
2394 max length) is to be treated as an error. */
2395 errno_dbg("Finishing URB:0x%x[%d] with SHORT_NOT_OK flag and short"
2396 " data:%d\n", (unsigned int)urb, urb_num,
2397 urb->actual_length);
2398 urb->status = -EREMOTEIO;
2399 }
2400
2401 if(urb_priv->urb_state == UNLINK) {
2402 /* URB has been requested to be unlinked asynchronously */
2403 urb->status = -ECONNRESET;
2404 errno_dbg("Fixing unlink status of URB:0x%x[%d] to:%d\n",
2405 (unsigned int)urb, urb_num, urb->status);
2406 }
2407 } else {
2408 /* The USB Core wants to signal some error via the URB, pass it through */
2409 }
2410
2411 /* use completely different finish function for Isoc URBs */
2412 if(usb_pipeisoc(urb->pipe)) {
2413 tc_finish_isoc_urb(hcd, urb, status);
2414 return;
2415 }
2416
2417 /* Do special unlinking of EPs for Intr traffic */
2418 if(usb_pipeint(urb->pipe)) {
2419 tc_dma_unlink_intr_urb(urb);
2420 }
2421
2422 /* Release allocated bandwidth for periodic transfers */
2423 if(usb_pipeint(urb->pipe) || usb_pipeisoc(urb->pipe))
2424 crisv10_usb_release_bandwidth(hcd,
2425 usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS,
2426 urb_priv->bandwidth);
2427
2428 /* This URB is active on EP */
2429 if(urb == activeUrbList[epid]) {
2430 /* We need to fiddle with the toggle bits because the hardware doesn't do
2431 it for us. */
2432 toggle = etrax_epid_get_toggle(epid, usb_pipeout(urb->pipe));
2433 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
2434 usb_pipeout(urb->pipe), toggle);
2435
2436 /* Checks for Ctrl and Bulk EPs */
2437 switch(usb_pipetype(urb->pipe)) {
2438 case PIPE_BULK:
2439 /* Check so Bulk EP realy is disabled before finishing active URB */
2440 ASSERT((TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) ==
2441 IO_STATE(USB_EP_command, enable, no));
2442 /* Disable sub-pointer for EP to avoid next tx_interrupt() to
2443 process Bulk EP. */
2444 TxBulkEPList[epid].sub = 0;
2445 /* No need to wait for the DMA before changing the next pointer.
2446 The modulo NBR_OF_EPIDS isn't actually necessary, since we will never use
2447 the last one (INVALID_EPID) for actual traffic. */
2448 TxBulkEPList[epid].next =
2449 virt_to_phys(&TxBulkEPList[(epid + 1) % NBR_OF_EPIDS]);
2450 break;
2451 case PIPE_CONTROL:
2452 /* Check so Ctrl EP realy is disabled before finishing active URB */
2453 ASSERT((TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) ==
2454 IO_STATE(USB_EP_command, enable, no));
2455 /* Disable sub-pointer for EP to avoid next tx_interrupt() to
2456 process Ctrl EP. */
2457 TxCtrlEPList[epid].sub = 0;
2458 break;
2459 }
2460 }
2461
2462 /* Free HC-private URB data*/
2463 urb_priv_free(hcd, urb);
2464
2465 if(urb->status) {
2466 errno_dbg("finish_urb (URB:0x%x[%d] %s %s) (data:%d) status:%d\n",
2467 (unsigned int)urb, urb_num, str_dir(urb->pipe),
2468 str_type(urb->pipe), urb->actual_length, urb->status);
2469 } else {
2470 tc_dbg("finish_urb (URB:0x%x[%d] %s %s) (data:%d) status:%d\n",
2471 (unsigned int)urb, urb_num, str_dir(urb->pipe),
2472 str_type(urb->pipe), urb->actual_length, urb->status);
2473 }
2474
2475 /* If we just finished an active URB, clear active pointer. */
2476 if (urb == activeUrbList[epid]) {
2477 /* Make URB not active on EP anymore */
2478 activeUrbList[epid] = NULL;
2479
2480 if(urb->status == 0) {
2481 /* URB finished sucessfully, process queue to see if there are any more
2482 URBs waiting before we call completion function.*/
2483 if(crisv10_hcd->running) {
2484 /* Only process queue if USB controller is running */
2485 tc_dma_process_queue(epid);
2486 } else {
2487 tc_warn("No processing of queue for epid:%d, USB Controller not"
2488 " running\n", epid);
2489 }
2490 }
2491 }
2492
2493 /* Hand the URB from HCD to its USB device driver, using its completion
2494 functions */
2495 usb_hcd_giveback_urb (hcd, urb, status);
2496
2497 /* Check the queue once more if the URB returned with error, because we
2498 didn't do it before the completion function because the specification
2499 states that the queue should not restart until all it's unlinked
2500 URBs have been fully retired, with the completion functions run */
2501 if(crisv10_hcd->running) {
2502 /* Only process queue if USB controller is running */
2503 tc_dma_process_queue(epid);
2504 } else {
2505 tc_warn("No processing of queue for epid:%d, USB Controller not running\n",
2506 epid);
2507 }
2508
2509 DBFEXIT;
2510 }
2511
2512 static void tc_finish_isoc_urb(struct usb_hcd *hcd, struct urb *urb,
2513 int status) {
2514 struct crisv10_urb_priv *urb_priv = urb->hcpriv;
2515 int epid, i;
2516 volatile int timeout = 10000;
2517 int bandwidth = 0;
2518
2519 ASSERT(urb_priv);
2520 epid = urb_priv->epid;
2521
2522 ASSERT(usb_pipeisoc(urb->pipe));
2523
2524 /* Set that all isoc packets have status and length set before
2525 completing the urb. */
2526 for (i = urb_priv->isoc_packet_counter; i < urb->number_of_packets; i++){
2527 urb->iso_frame_desc[i].actual_length = 0;
2528 urb->iso_frame_desc[i].status = -EPROTO;
2529 }
2530
2531 /* Check if the URB is currently active (done or error) */
2532 if(urb == activeUrbList[epid]) {
2533 /* Check if there are another In Isoc URB queued for this epid */
2534 if (!list_empty(&urb_list[epid])&& !epid_state[epid].disabled) {
2535 /* Move it from queue to active and mark it started so Isoc transfers
2536 won't be interrupted.
2537 All Isoc URBs data transfers are already added to DMA lists so we
2538 don't have to insert anything in DMA lists here. */
2539 activeUrbList[epid] = urb_list_first(epid);
2540 ((struct crisv10_urb_priv *)(activeUrbList[epid]->hcpriv))->urb_state =
2541 STARTED;
2542 urb_list_del(activeUrbList[epid], epid);
2543
2544 if(urb->status) {
2545 errno_dbg("finish_isoc_urb (URB:0x%x[%d] %s %s) (%d of %d packets)"
2546 " status:%d, new waiting URB:0x%x[%d]\n",
2547 (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
2548 str_type(urb->pipe), urb_priv->isoc_packet_counter,
2549 urb->number_of_packets, urb->status,
2550 (unsigned int)activeUrbList[epid],
2551 ((struct crisv10_urb_priv *)(activeUrbList[epid]->hcpriv))->urb_num);
2552 }
2553
2554 } else { /* No other URB queued for this epid */
2555 if(urb->status) {
2556 errno_dbg("finish_isoc_urb (URB:0x%x[%d] %s %s) (%d of %d packets)"
2557 " status:%d, no new URB waiting\n",
2558 (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
2559 str_type(urb->pipe), urb_priv->isoc_packet_counter,
2560 urb->number_of_packets, urb->status);
2561 }
2562
2563 /* Check if EP is still enabled, then shut it down. */
2564 if (TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
2565 isoc_dbg("Isoc EP enabled for epid:%d, disabling it\n", epid);
2566
2567 /* Should only occur for In Isoc EPs where SB isn't consumed. */
2568 ASSERT(usb_pipein(urb->pipe));
2569
2570 /* Disable it and wait for it to stop */
2571 TxIsocEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
2572
2573 /* Ah, the luxury of busy-wait. */
2574 while((*R_DMA_CH8_SUB3_EP == virt_to_phys(&TxIsocEPList[epid])) &&
2575 (timeout-- > 0));
2576 if(timeout == 0) {
2577 warn("Timeout while waiting for DMA-TX-Isoc to leave EP for epid:%d\n", epid);
2578 }
2579 }
2580
2581 /* Unlink SB to say that epid is finished. */
2582 TxIsocEPList[epid].sub = 0;
2583 TxIsocEPList[epid].hw_len = 0;
2584
2585 /* No URB active for EP anymore */
2586 activeUrbList[epid] = NULL;
2587 }
2588 } else { /* Finishing of not active URB (queued up with SBs thought) */
2589 isoc_warn("finish_isoc_urb (URB:0x%x %s) (%d of %d packets) status:%d,"
2590 " SB queued but not active\n",
2591 (unsigned int)urb, str_dir(urb->pipe),
2592 urb_priv->isoc_packet_counter, urb->number_of_packets,
2593 urb->status);
2594 if(usb_pipeout(urb->pipe)) {
2595 /* Finishing of not yet active Out Isoc URB needs unlinking of SBs. */
2596 struct USB_SB_Desc *iter_sb, *prev_sb, *next_sb;
2597
2598 iter_sb = TxIsocEPList[epid].sub ?
2599 phys_to_virt(TxIsocEPList[epid].sub) : 0;
2600 prev_sb = 0;
2601
2602 /* SB that is linked before this URBs first SB */
2603 while (iter_sb && (iter_sb != urb_priv->first_sb)) {
2604 prev_sb = iter_sb;
2605 iter_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0;
2606 }
2607
2608 if (iter_sb == 0) {
2609 /* Unlink of the URB currently being transmitted. */
2610 prev_sb = 0;
2611 iter_sb = TxIsocEPList[epid].sub ? phys_to_virt(TxIsocEPList[epid].sub) : 0;
2612 }
2613
2614 while (iter_sb && (iter_sb != urb_priv->last_sb)) {
2615 iter_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0;
2616 }
2617
2618 if (iter_sb) {
2619 next_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0;
2620 } else {
2621 /* This should only happen if the DMA has completed
2622 processing the SB list for this EP while interrupts
2623 are disabled. */
2624 isoc_dbg("Isoc urb not found, already sent?\n");
2625 next_sb = 0;
2626 }
2627 if (prev_sb) {
2628 prev_sb->next = next_sb ? virt_to_phys(next_sb) : 0;
2629 } else {
2630 TxIsocEPList[epid].sub = next_sb ? virt_to_phys(next_sb) : 0;
2631 }
2632 }
2633 }
2634
2635 /* Free HC-private URB data*/
2636 bandwidth = urb_priv->bandwidth;
2637 urb_priv_free(hcd, urb);
2638
2639 crisv10_usb_release_bandwidth(hcd, usb_pipeisoc(urb->pipe), bandwidth);
2640
2641 /* Hand the URB from HCD to its USB device driver, using its completion
2642 functions */
2643 usb_hcd_giveback_urb (hcd, urb, status);
2644 }
2645
2646 static __u32 urb_num = 0;
2647
2648 /* allocate and initialize URB private data */
2649 static int urb_priv_create(struct usb_hcd *hcd, struct urb *urb, int epid,
2650 int mem_flags) {
2651 struct crisv10_urb_priv *urb_priv;
2652
2653 urb_priv = kmalloc(sizeof *urb_priv, mem_flags);
2654 if (!urb_priv)
2655 return -ENOMEM;
2656 memset(urb_priv, 0, sizeof *urb_priv);
2657
2658 urb_priv->epid = epid;
2659 urb_priv->urb_state = NOT_STARTED;
2660
2661 urb->hcpriv = urb_priv;
2662 /* Assign URB a sequence number, and increment counter */
2663 urb_priv->urb_num = urb_num;
2664 urb_num++;
2665 urb_priv->bandwidth = 0;
2666 return 0;
2667 }
2668
2669 /* free URB private data */
2670 static void urb_priv_free(struct usb_hcd *hcd, struct urb *urb) {
2671 int i;
2672 struct crisv10_urb_priv *urb_priv = urb->hcpriv;
2673 ASSERT(urb_priv != 0);
2674
2675 /* Check it has any SBs linked that needs to be freed*/
2676 if(urb_priv->first_sb != NULL) {
2677 struct USB_SB_Desc *next_sb, *first_sb, *last_sb;
2678 int i = 0;
2679 first_sb = urb_priv->first_sb;
2680 last_sb = urb_priv->last_sb;
2681 ASSERT(last_sb);
2682 while(first_sb != last_sb) {
2683 next_sb = (struct USB_SB_Desc *)phys_to_virt(first_sb->next);
2684 kmem_cache_free(usb_desc_cache, first_sb);
2685 first_sb = next_sb;
2686 i++;
2687 }
2688 kmem_cache_free(usb_desc_cache, last_sb);
2689 i++;
2690 }
2691
2692 /* Check if it has any EPs in its Intr pool that also needs to be freed */
2693 if(urb_priv->intr_ep_pool_length > 0) {
2694 for(i = 0; i < urb_priv->intr_ep_pool_length; i++) {
2695 kfree(urb_priv->intr_ep_pool[i]);
2696 }
2697 /*
2698 tc_dbg("Freed %d EPs from URB:0x%x EP pool\n",
2699 urb_priv->intr_ep_pool_length, (unsigned int)urb);
2700 */
2701 }
2702
2703 kfree(urb_priv);
2704 urb->hcpriv = NULL;
2705 }
2706
2707 static int ep_priv_create(struct usb_host_endpoint *ep, int mem_flags) {
2708 struct crisv10_ep_priv *ep_priv;
2709
2710 ep_priv = kmalloc(sizeof *ep_priv, mem_flags);
2711 if (!ep_priv)
2712 return -ENOMEM;
2713 memset(ep_priv, 0, sizeof *ep_priv);
2714
2715 ep->hcpriv = ep_priv;
2716 return 0;
2717 }
2718
2719 static void ep_priv_free(struct usb_host_endpoint *ep) {
2720 struct crisv10_ep_priv *ep_priv = ep->hcpriv;
2721 ASSERT(ep_priv);
2722 kfree(ep_priv);
2723 ep->hcpriv = NULL;
2724 }
2725
2726 /*
2727 * usb_check_bandwidth():
2728 *
2729 * old_alloc is from host_controller->bandwidth_allocated in microseconds;
2730 * bustime is from calc_bus_time(), but converted to microseconds.
2731 *
2732 * returns <bustime in us> if successful,
2733 * or -ENOSPC if bandwidth request fails.
2734 *
2735 * FIXME:
2736 * This initial implementation does not use Endpoint.bInterval
2737 * in managing bandwidth allocation.
2738 * It probably needs to be expanded to use Endpoint.bInterval.
2739 * This can be done as a later enhancement (correction).
2740 *
2741 * This will also probably require some kind of
2742 * frame allocation tracking...meaning, for example,
2743 * that if multiple drivers request interrupts every 10 USB frames,
2744 * they don't all have to be allocated at
2745 * frame numbers N, N+10, N+20, etc. Some of them could be at
2746 * N+11, N+21, N+31, etc., and others at
2747 * N+12, N+22, N+32, etc.
2748 *
2749 * Similarly for isochronous transfers...
2750 *
2751 * Individual HCDs can schedule more directly ... this logic
2752 * is not correct for high speed transfers.
2753 */
2754 static int crisv10_usb_check_bandwidth(
2755 struct usb_device *dev,
2756 struct urb *urb)
2757 {
2758 unsigned int pipe = urb->pipe;
2759 long bustime;
2760 int is_in = usb_pipein (pipe);
2761 int is_iso = usb_pipeisoc (pipe);
2762 int old_alloc = dev->bus->bandwidth_allocated;
2763 int new_alloc;
2764
2765 bustime = NS_TO_US (usb_calc_bus_time (dev->speed, is_in, is_iso,
2766 usb_maxpacket (dev, pipe, !is_in)));
2767 if (is_iso)
2768 bustime /= urb->number_of_packets;
2769
2770 new_alloc = old_alloc + (int) bustime;
2771 if (new_alloc > FRAME_TIME_MAX_USECS_ALLOC) {
2772 dev_dbg (&dev->dev, "usb_check_bandwidth FAILED: %d + %ld = %d usec\n",
2773 old_alloc, bustime, new_alloc);
2774 bustime = -ENOSPC; /* report error */
2775 }
2776
2777 return bustime;
2778 }
2779
2780 /**
2781 * usb_claim_bandwidth - records bandwidth for a periodic transfer
2782 * @dev: source/target of request
2783 * @urb: request (urb->dev == dev)
2784 * @bustime: bandwidth consumed, in (average) microseconds per frame
2785 * @isoc: true iff the request is isochronous
2786 *
2787 * HCDs are expected not to overcommit periodic bandwidth, and to record such
2788 * reservations whenever endpoints are added to the periodic schedule.
2789 *
2790 * FIXME averaging per-frame is suboptimal. Better to sum over the HCD's
2791 * entire periodic schedule ... 32 frames for OHCI, 1024 for UHCI, settable
2792 * for EHCI (256/512/1024 frames, default 1024) and have the bus expose how
2793 * large its periodic schedule is.
2794 */
2795 static void crisv10_usb_claim_bandwidth(
2796 struct usb_device *dev,
2797 struct urb *urb, int bustime, int isoc)
2798 {
2799 dev->bus->bandwidth_allocated += bustime;
2800 if (isoc)
2801 dev->bus->bandwidth_isoc_reqs++;
2802 else
2803 dev->bus->bandwidth_int_reqs++;
2804 struct crisv10_urb_priv *urb_priv;
2805 urb_priv = urb->hcpriv;
2806 urb_priv->bandwidth = bustime;
2807 }
2808
2809 /**
2810 * usb_release_bandwidth - reverses effect of usb_claim_bandwidth()
2811 * @hcd: host controller
2812 * @isoc: true iff the request is isochronous
2813 * @bandwidth: bandwidth returned
2814 *
2815 * This records that previously allocated bandwidth has been released.
2816 * Bandwidth is released when endpoints are removed from the host controller's
2817 * periodic schedule.
2818 */
2819 static void crisv10_usb_release_bandwidth(
2820 struct usb_hcd *hcd,
2821 int isoc,
2822 int bandwidth)
2823 {
2824 hcd_to_bus(hcd)->bandwidth_allocated -= bandwidth;
2825 if (isoc)
2826 hcd_to_bus(hcd)->bandwidth_isoc_reqs--;
2827 else
2828 hcd_to_bus(hcd)->bandwidth_int_reqs--;
2829 }
2830
2831
2832 /* EPID handling functions, managing EP-list in Etrax through wrappers */
2833 /* ------------------------------------------------------------------- */
2834
2835 /* Sets up a new EPID for an endpoint or returns existing if found */
2836 static int tc_setup_epid(struct usb_host_endpoint *ep, struct urb *urb,
2837 int mem_flags) {
2838 int epid;
2839 char devnum, endpoint, out_traffic, slow;
2840 int maxlen;
2841 __u32 epid_data;
2842 struct crisv10_ep_priv *ep_priv = ep->hcpriv;
2843
2844 DBFENTER;
2845
2846 /* Check if a valid epid already is setup for this endpoint */
2847 if(ep_priv != NULL) {
2848 return ep_priv->epid;
2849 }
2850
2851 /* We must find and initiate a new epid for this urb. */
2852 epid = tc_allocate_epid();
2853
2854 if (epid == -1) {
2855 /* Failed to allocate a new epid. */
2856 DBFEXIT;
2857 return epid;
2858 }
2859
2860 /* We now have a new epid to use. Claim it. */
2861 epid_state[epid].inuse = 1;
2862
2863 /* Init private data for new endpoint */
2864 if(ep_priv_create(ep, mem_flags) != 0) {
2865 return -ENOMEM;
2866 }
2867 ep_priv = ep->hcpriv;
2868 ep_priv->epid = epid;
2869
2870 devnum = usb_pipedevice(urb->pipe);
2871 endpoint = usb_pipeendpoint(urb->pipe);
2872 slow = (urb->dev->speed == USB_SPEED_LOW);
2873 maxlen = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
2874
2875 if (usb_pipetype(urb->pipe) == PIPE_CONTROL) {
2876 /* We want both IN and OUT control traffic to be put on the same
2877 EP/SB list. */
2878 out_traffic = 1;
2879 } else {
2880 out_traffic = usb_pipeout(urb->pipe);
2881 }
2882
2883 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
2884 epid_data = IO_STATE(R_USB_EPT_DATA_ISO, valid, yes) |
2885 /* FIXME: Change any to the actual port? */
2886 IO_STATE(R_USB_EPT_DATA_ISO, port, any) |
2887 IO_FIELD(R_USB_EPT_DATA_ISO, max_len, maxlen) |
2888 IO_FIELD(R_USB_EPT_DATA_ISO, ep, endpoint) |
2889 IO_FIELD(R_USB_EPT_DATA_ISO, dev, devnum);
2890 etrax_epid_iso_set(epid, epid_data);
2891 } else {
2892 epid_data = IO_STATE(R_USB_EPT_DATA, valid, yes) |
2893 IO_FIELD(R_USB_EPT_DATA, low_speed, slow) |
2894 /* FIXME: Change any to the actual port? */
2895 IO_STATE(R_USB_EPT_DATA, port, any) |
2896 IO_FIELD(R_USB_EPT_DATA, max_len, maxlen) |
2897 IO_FIELD(R_USB_EPT_DATA, ep, endpoint) |
2898 IO_FIELD(R_USB_EPT_DATA, dev, devnum);
2899 etrax_epid_set(epid, epid_data);
2900 }
2901
2902 epid_state[epid].out_traffic = out_traffic;
2903 epid_state[epid].type = usb_pipetype(urb->pipe);
2904
2905 tc_warn("Setting up ep:0x%x epid:%d (addr:%d endp:%d max_len:%d %s %s %s)\n",
2906 (unsigned int)ep, epid, devnum, endpoint, maxlen,
2907 str_type(urb->pipe), out_traffic ? "out" : "in",
2908 slow ? "low" : "full");
2909
2910 /* Enable Isoc eof interrupt if we set up the first Isoc epid */
2911 if(usb_pipeisoc(urb->pipe)) {
2912 isoc_epid_counter++;
2913 if(isoc_epid_counter == 1) {
2914 isoc_warn("Enabled Isoc eof interrupt\n");
2915 *R_USB_IRQ_MASK_SET = IO_STATE(R_USB_IRQ_MASK_SET, iso_eof, set);
2916 }
2917 }
2918
2919 DBFEXIT;
2920 return epid;
2921 }
2922
2923 static void tc_free_epid(struct usb_host_endpoint *ep) {
2924 unsigned long flags;
2925 struct crisv10_ep_priv *ep_priv = ep->hcpriv;
2926 int epid;
2927 volatile int timeout = 10000;
2928
2929 DBFENTER;
2930
2931 if (ep_priv == NULL) {
2932 tc_warn("Trying to free unused epid on ep:0x%x\n", (unsigned int)ep);
2933 DBFEXIT;
2934 return;
2935 }
2936
2937 epid = ep_priv->epid;
2938
2939 /* Disable Isoc eof interrupt if we free the last Isoc epid */
2940 if(epid_isoc(epid)) {
2941 ASSERT(isoc_epid_counter > 0);
2942 isoc_epid_counter--;
2943 if(isoc_epid_counter == 0) {
2944 *R_USB_IRQ_MASK_CLR = IO_STATE(R_USB_IRQ_MASK_CLR, iso_eof, clr);
2945 isoc_warn("Disabled Isoc eof interrupt\n");
2946 }
2947 }
2948
2949 /* Take lock manualy instead of in epid_x_x wrappers,
2950 because we need to be polling here */
2951 spin_lock_irqsave(&etrax_epid_lock, flags);
2952
2953 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
2954 nop();
2955 while((*R_USB_EPT_DATA & IO_MASK(R_USB_EPT_DATA, hold)) &&
2956 (timeout-- > 0));
2957 if(timeout == 0) {
2958 warn("Timeout while waiting for epid:%d to drop hold\n", epid);
2959 }
2960 /* This will, among other things, set the valid field to 0. */
2961 *R_USB_EPT_DATA = 0;
2962 spin_unlock_irqrestore(&etrax_epid_lock, flags);
2963
2964 /* Free resource in software state info list */
2965 epid_state[epid].inuse = 0;
2966
2967 /* Free private endpoint data */
2968 ep_priv_free(ep);
2969
2970 DBFEXIT;
2971 }
2972
2973 static int tc_allocate_epid(void) {
2974 int i;
2975 DBFENTER;
2976 for (i = 0; i < NBR_OF_EPIDS; i++) {
2977 if (!epid_inuse(i)) {
2978 DBFEXIT;
2979 return i;
2980 }
2981 }
2982
2983 tc_warn("Found no free epids\n");
2984 DBFEXIT;
2985 return -1;
2986 }
2987
2988
2989 /* Wrappers around the list functions (include/linux/list.h). */
2990 /* ---------------------------------------------------------- */
2991 static inline int __urb_list_empty(int epid) {
2992 int retval;
2993 retval = list_empty(&urb_list[epid]);
2994 return retval;
2995 }
2996
2997 /* Returns first urb for this epid, or NULL if list is empty. */
2998 static inline struct urb *urb_list_first(int epid) {
2999 unsigned long flags;
3000 struct urb *first_urb = 0;
3001 spin_lock_irqsave(&urb_list_lock, flags);
3002 if (!__urb_list_empty(epid)) {
3003 /* Get the first urb (i.e. head->next). */
3004 urb_entry_t *urb_entry = list_entry((&urb_list[epid])->next, urb_entry_t, list);
3005 first_urb = urb_entry->urb;
3006 }
3007 spin_unlock_irqrestore(&urb_list_lock, flags);
3008 return first_urb;
3009 }
3010
3011 /* Adds an urb_entry last in the list for this epid. */
3012 static inline void urb_list_add(struct urb *urb, int epid, int mem_flags) {
3013 unsigned long flags;
3014 urb_entry_t *urb_entry = (urb_entry_t *)kmalloc(sizeof(urb_entry_t), mem_flags);
3015 ASSERT(urb_entry);
3016
3017 urb_entry->urb = urb;
3018 spin_lock_irqsave(&urb_list_lock, flags);
3019 list_add_tail(&urb_entry->list, &urb_list[epid]);
3020 spin_unlock_irqrestore(&urb_list_lock, flags);
3021 }
3022
3023 /* Search through the list for an element that contains this urb. (The list
3024 is expected to be short and the one we are about to delete will often be
3025 the first in the list.)
3026 Should be protected by spin_locks in calling function */
3027 static inline urb_entry_t *__urb_list_entry(struct urb *urb, int epid) {
3028 struct list_head *entry;
3029 struct list_head *tmp;
3030 urb_entry_t *urb_entry;
3031
3032 list_for_each_safe(entry, tmp, &urb_list[epid]) {
3033 urb_entry = list_entry(entry, urb_entry_t, list);
3034 ASSERT(urb_entry);
3035 ASSERT(urb_entry->urb);
3036
3037 if (urb_entry->urb == urb) {
3038 return urb_entry;
3039 }
3040 }
3041 return 0;
3042 }
3043
3044 /* Same function as above but for global use. Protects list by spinlock */
3045 static inline urb_entry_t *urb_list_entry(struct urb *urb, int epid) {
3046 unsigned long flags;
3047 urb_entry_t *urb_entry;
3048 spin_lock_irqsave(&urb_list_lock, flags);
3049 urb_entry = __urb_list_entry(urb, epid);
3050 spin_unlock_irqrestore(&urb_list_lock, flags);
3051 return (urb_entry);
3052 }
3053
3054 /* Delete an urb from the list. */
3055 static inline void urb_list_del(struct urb *urb, int epid) {
3056 unsigned long flags;
3057 urb_entry_t *urb_entry;
3058
3059 /* Delete entry and free. */
3060 spin_lock_irqsave(&urb_list_lock, flags);
3061 urb_entry = __urb_list_entry(urb, epid);
3062 ASSERT(urb_entry);
3063
3064 list_del(&urb_entry->list);
3065 spin_unlock_irqrestore(&urb_list_lock, flags);
3066 kfree(urb_entry);
3067 }
3068
3069 /* Move an urb to the end of the list. */
3070 static inline void urb_list_move_last(struct urb *urb, int epid) {
3071 unsigned long flags;
3072 urb_entry_t *urb_entry;
3073
3074 spin_lock_irqsave(&urb_list_lock, flags);
3075 urb_entry = __urb_list_entry(urb, epid);
3076 ASSERT(urb_entry);
3077
3078 list_del(&urb_entry->list);
3079 list_add_tail(&urb_entry->list, &urb_list[epid]);
3080 spin_unlock_irqrestore(&urb_list_lock, flags);
3081 }
3082
3083 /* Get the next urb in the list. */
3084 static inline struct urb *urb_list_next(struct urb *urb, int epid) {
3085 unsigned long flags;
3086 urb_entry_t *urb_entry;
3087
3088 spin_lock_irqsave(&urb_list_lock, flags);
3089 urb_entry = __urb_list_entry(urb, epid);
3090 ASSERT(urb_entry);
3091
3092 if (urb_entry->list.next != &urb_list[epid]) {
3093 struct list_head *elem = urb_entry->list.next;
3094 urb_entry = list_entry(elem, urb_entry_t, list);
3095 spin_unlock_irqrestore(&urb_list_lock, flags);
3096 return urb_entry->urb;
3097 } else {
3098 spin_unlock_irqrestore(&urb_list_lock, flags);
3099 return NULL;
3100 }
3101 }
3102
3103 struct USB_EP_Desc* create_ep(int epid, struct USB_SB_Desc* sb_desc,
3104 int mem_flags) {
3105 struct USB_EP_Desc *ep_desc;
3106 ep_desc = (struct USB_EP_Desc *) kmem_cache_alloc(usb_desc_cache, mem_flags);
3107 if(ep_desc == NULL)
3108 return NULL;
3109 memset(ep_desc, 0, sizeof(struct USB_EP_Desc));
3110
3111 ep_desc->hw_len = 0;
3112 ep_desc->command = (IO_FIELD(USB_EP_command, epid, epid) |
3113 IO_STATE(USB_EP_command, enable, yes));
3114 if(sb_desc == NULL) {
3115 ep_desc->sub = 0;
3116 } else {
3117 ep_desc->sub = virt_to_phys(sb_desc);
3118 }
3119 return ep_desc;
3120 }
3121
3122 #define TT_ZOUT 0
3123 #define TT_IN 1
3124 #define TT_OUT 2
3125 #define TT_SETUP 3
3126
3127 #define CMD_EOL IO_STATE(USB_SB_command, eol, yes)
3128 #define CMD_INTR IO_STATE(USB_SB_command, intr, yes)
3129 #define CMD_FULL IO_STATE(USB_SB_command, full, yes)
3130
3131 /* Allocation and setup of a generic SB. Used to create SETUP, OUT and ZOUT
3132 SBs. Also used by create_sb_in() to avoid same allocation procedure at two
3133 places */
3134 s