Port the usb host driver and install some missing headers
[openwrt/openwrt.git] / target / linux / etrax / files-2.6.30 / drivers / usb / host / hc-crisv10.c
1 /*
2 *
3 * ETRAX 100LX USB Host Controller Driver
4 *
5 * Copyright (C) 2005 - 2008 Axis Communications AB
6 *
7 * Author: Konrad Eriksson <konrad.eriksson@axis.se>
8 *
9 */
10
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/moduleparam.h>
15 #include <linux/spinlock.h>
16 #include <linux/usb.h>
17 #include <linux/platform_device.h>
18
19 #include <asm/io.h>
20 #include <asm/irq.h>
21 #include <asm/arch/dma.h>
22 #include <asm/arch/io_interface_mux.h>
23
24 #include "../core/hcd.h"
25 #include "../core/hub.h"
26 #include "hc-crisv10.h"
27 #include "hc-cris-dbg.h"
28
29
30 /***************************************************************************/
31 /***************************************************************************/
32 /* Host Controller settings */
33 /***************************************************************************/
34 /***************************************************************************/
35
36 #define VERSION "1.00-openwrt_diff"
37 #define COPYRIGHT "(c) 2005, 2006 Axis Communications AB"
38 #define DESCRIPTION "ETRAX 100LX USB Host Controller"
39
40 #define ETRAX_USB_HC_IRQ USB_HC_IRQ_NBR
41 #define ETRAX_USB_RX_IRQ USB_DMA_RX_IRQ_NBR
42 #define ETRAX_USB_TX_IRQ USB_DMA_TX_IRQ_NBR
43
44 /* Number of physical ports in Etrax 100LX */
45 #define USB_ROOT_HUB_PORTS 2
46
47 const char hc_name[] = "hc-crisv10";
48 const char product_desc[] = DESCRIPTION;
49
50 /* The number of epids is, among other things, used for pre-allocating
51 ctrl, bulk and isoc EP descriptors (one for each epid).
52 Assumed to be > 1 when initiating the DMA lists. */
53 #define NBR_OF_EPIDS 32
54
55 /* Support interrupt traffic intervals up to 128 ms. */
56 #define MAX_INTR_INTERVAL 128
57
58 /* If periodic traffic (intr or isoc) is to be used, then one entry in the EP
59 table must be "invalid". By this we mean that we shouldn't care about epid
60 attentions for this epid, or at least handle them differently from epid
61 attentions for "valid" epids. This define determines which one to use
62 (don't change it). */
63 #define INVALID_EPID 31
64 /* A special epid for the bulk dummys. */
65 #define DUMMY_EPID 30
66
67 /* Module settings */
68
69 MODULE_DESCRIPTION(DESCRIPTION);
70 MODULE_LICENSE("GPL");
71 MODULE_AUTHOR("Konrad Eriksson <konrad.eriksson@axis.se>");
72
73
74 /* Module parameters */
75
76 /* 0 = No ports enabled
77 1 = Only port 1 enabled (on board ethernet on devboard)
78 2 = Only port 2 enabled (external connector on devboard)
79 3 = Both ports enabled
80 */
81 static unsigned int ports = 3;
82 module_param(ports, uint, S_IRUGO);
83 MODULE_PARM_DESC(ports, "Bitmask indicating USB ports to use");
84
85
86 /***************************************************************************/
87 /***************************************************************************/
88 /* Shared global variables for this module */
89 /***************************************************************************/
90 /***************************************************************************/
91
92 /* EP descriptor lists for non period transfers. Must be 32-bit aligned. */
93 static volatile struct USB_EP_Desc TxBulkEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
94
95 static volatile struct USB_EP_Desc TxCtrlEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
96
97 /* EP descriptor lists for period transfers. Must be 32-bit aligned. */
98 static volatile struct USB_EP_Desc TxIntrEPList[MAX_INTR_INTERVAL] __attribute__ ((aligned (4)));
99 static volatile struct USB_SB_Desc TxIntrSB_zout __attribute__ ((aligned (4)));
100
101 static volatile struct USB_EP_Desc TxIsocEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
102 static volatile struct USB_SB_Desc TxIsocSB_zout __attribute__ ((aligned (4)));
103
104 static volatile struct USB_SB_Desc TxIsocSBList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
105
106 /* After each enabled bulk EP IN we put two disabled EP descriptors with the eol flag set,
107 causing the DMA to stop the DMA channel. The first of these two has the intr flag set, which
108 gives us a dma8_sub0_descr interrupt. When we receive this, we advance the DMA one step in the
109 EP list and then restart the bulk channel, thus forcing a switch between bulk EP descriptors
110 in each frame. */
111 static volatile struct USB_EP_Desc TxBulkDummyEPList[NBR_OF_EPIDS][2] __attribute__ ((aligned (4)));
112
113 /* List of URB pointers, where each points to the active URB for a epid.
114 For Bulk, Ctrl and Intr this means which URB that currently is added to
115 DMA lists (Isoc URBs are all directly added to DMA lists). As soon as
116 URB has completed is the queue examined and the first URB in queue is
117 removed and moved to the activeUrbList while its state change to STARTED and
118 its transfer(s) gets added to DMA list (exception Isoc where URBs enter
119 state STARTED directly and added transfers added to DMA lists). */
120 static struct urb *activeUrbList[NBR_OF_EPIDS];
121
122 /* Additional software state info for each epid */
123 static struct etrax_epid epid_state[NBR_OF_EPIDS];
124
125 /* Timer handles for bulk traffic timer used to avoid DMA bug where DMA stops
126 even if there is new data waiting to be processed */
127 static struct timer_list bulk_start_timer = TIMER_INITIALIZER(NULL, 0, 0);
128 static struct timer_list bulk_eot_timer = TIMER_INITIALIZER(NULL, 0, 0);
129
130 /* We want the start timer to expire before the eot timer, because the former
131 might start traffic, thus making it unnecessary for the latter to time
132 out. */
133 #define BULK_START_TIMER_INTERVAL (HZ/50) /* 20 ms */
134 #define BULK_EOT_TIMER_INTERVAL (HZ/16) /* 60 ms */
135
136 /* Delay before a URB completion happen when it's scheduled to be delayed */
137 #define LATER_TIMER_DELAY (HZ/50) /* 20 ms */
138
139 /* Simplifying macros for checking software state info of a epid */
140 /* ----------------------------------------------------------------------- */
141 #define epid_inuse(epid) epid_state[epid].inuse
142 #define epid_out_traffic(epid) epid_state[epid].out_traffic
143 #define epid_isoc(epid) (epid_state[epid].type == PIPE_ISOCHRONOUS ? 1 : 0)
144 #define epid_intr(epid) (epid_state[epid].type == PIPE_INTERRUPT ? 1 : 0)
145
146
147 /***************************************************************************/
148 /***************************************************************************/
149 /* DEBUG FUNCTIONS */
150 /***************************************************************************/
151 /***************************************************************************/
152 /* Note that these functions are always available in their "__" variants,
153 for use in error situations. The "__" missing variants are controlled by
154 the USB_DEBUG_DESC/USB_DEBUG_URB macros. */
155 static void __dump_urb(struct urb* purb)
156 {
157 struct crisv10_urb_priv *urb_priv = purb->hcpriv;
158 int urb_num = -1;
159 if(urb_priv) {
160 urb_num = urb_priv->urb_num;
161 }
162 printk("\nURB:0x%x[%d]\n", (unsigned int)purb, urb_num);
163 printk("dev :0x%08lx\n", (unsigned long)purb->dev);
164 printk("pipe :0x%08x\n", purb->pipe);
165 printk("status :%d\n", purb->status);
166 printk("transfer_flags :0x%08x\n", purb->transfer_flags);
167 printk("transfer_buffer :0x%08lx\n", (unsigned long)purb->transfer_buffer);
168 printk("transfer_buffer_length:%d\n", purb->transfer_buffer_length);
169 printk("actual_length :%d\n", purb->actual_length);
170 printk("setup_packet :0x%08lx\n", (unsigned long)purb->setup_packet);
171 printk("start_frame :%d\n", purb->start_frame);
172 printk("number_of_packets :%d\n", purb->number_of_packets);
173 printk("interval :%d\n", purb->interval);
174 printk("error_count :%d\n", purb->error_count);
175 printk("context :0x%08lx\n", (unsigned long)purb->context);
176 printk("complete :0x%08lx\n\n", (unsigned long)purb->complete);
177 }
178
179 static void __dump_in_desc(volatile struct USB_IN_Desc *in)
180 {
181 printk("\nUSB_IN_Desc at 0x%08lx\n", (unsigned long)in);
182 printk(" sw_len : 0x%04x (%d)\n", in->sw_len, in->sw_len);
183 printk(" command : 0x%04x\n", in->command);
184 printk(" next : 0x%08lx\n", in->next);
185 printk(" buf : 0x%08lx\n", in->buf);
186 printk(" hw_len : 0x%04x (%d)\n", in->hw_len, in->hw_len);
187 printk(" status : 0x%04x\n\n", in->status);
188 }
189
190 static void __dump_sb_desc(volatile struct USB_SB_Desc *sb)
191 {
192 char tt = (sb->command & 0x30) >> 4;
193 char *tt_string;
194
195 switch (tt) {
196 case 0:
197 tt_string = "zout";
198 break;
199 case 1:
200 tt_string = "in";
201 break;
202 case 2:
203 tt_string = "out";
204 break;
205 case 3:
206 tt_string = "setup";
207 break;
208 default:
209 tt_string = "unknown (weird)";
210 }
211
212 printk(" USB_SB_Desc at 0x%08lx ", (unsigned long)sb);
213 printk(" command:0x%04x (", sb->command);
214 printk("rem:%d ", (sb->command & 0x3f00) >> 8);
215 printk("full:%d ", (sb->command & 0x40) >> 6);
216 printk("tt:%d(%s) ", tt, tt_string);
217 printk("intr:%d ", (sb->command & 0x8) >> 3);
218 printk("eot:%d ", (sb->command & 0x2) >> 1);
219 printk("eol:%d)", sb->command & 0x1);
220 printk(" sw_len:0x%04x(%d)", sb->sw_len, sb->sw_len);
221 printk(" next:0x%08lx", sb->next);
222 printk(" buf:0x%08lx\n", sb->buf);
223 }
224
225
226 static void __dump_ep_desc(volatile struct USB_EP_Desc *ep)
227 {
228 printk("USB_EP_Desc at 0x%08lx ", (unsigned long)ep);
229 printk(" command:0x%04x (", ep->command);
230 printk("ep_id:%d ", (ep->command & 0x1f00) >> 8);
231 printk("enable:%d ", (ep->command & 0x10) >> 4);
232 printk("intr:%d ", (ep->command & 0x8) >> 3);
233 printk("eof:%d ", (ep->command & 0x2) >> 1);
234 printk("eol:%d)", ep->command & 0x1);
235 printk(" hw_len:0x%04x(%d)", ep->hw_len, ep->hw_len);
236 printk(" next:0x%08lx", ep->next);
237 printk(" sub:0x%08lx\n", ep->sub);
238 }
239
240 static inline void __dump_ep_list(int pipe_type)
241 {
242 volatile struct USB_EP_Desc *ep;
243 volatile struct USB_EP_Desc *first_ep;
244 volatile struct USB_SB_Desc *sb;
245
246 switch (pipe_type)
247 {
248 case PIPE_BULK:
249 first_ep = &TxBulkEPList[0];
250 break;
251 case PIPE_CONTROL:
252 first_ep = &TxCtrlEPList[0];
253 break;
254 case PIPE_INTERRUPT:
255 first_ep = &TxIntrEPList[0];
256 break;
257 case PIPE_ISOCHRONOUS:
258 first_ep = &TxIsocEPList[0];
259 break;
260 default:
261 return;
262 }
263 ep = first_ep;
264
265 printk("\n\nDumping EP list...\n\n");
266
267 do {
268 __dump_ep_desc(ep);
269 /* Cannot phys_to_virt on 0 as it turns into 80000000, which is != 0. */
270 sb = ep->sub ? phys_to_virt(ep->sub) : 0;
271 while (sb) {
272 __dump_sb_desc(sb);
273 sb = sb->next ? phys_to_virt(sb->next) : 0;
274 }
275 ep = (volatile struct USB_EP_Desc *)(phys_to_virt(ep->next));
276
277 } while (ep != first_ep);
278 }
279
280 static inline void __dump_ept_data(int epid)
281 {
282 unsigned long flags;
283 __u32 r_usb_ept_data;
284
285 if (epid < 0 || epid > 31) {
286 printk("Cannot dump ept data for invalid epid %d\n", epid);
287 return;
288 }
289
290 local_irq_save(flags);
291 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
292 nop();
293 r_usb_ept_data = *R_USB_EPT_DATA;
294 local_irq_restore(flags);
295
296 printk(" R_USB_EPT_DATA = 0x%x for epid %d :\n", r_usb_ept_data, epid);
297 if (r_usb_ept_data == 0) {
298 /* No need for more detailed printing. */
299 return;
300 }
301 printk(" valid : %d\n", (r_usb_ept_data & 0x80000000) >> 31);
302 printk(" hold : %d\n", (r_usb_ept_data & 0x40000000) >> 30);
303 printk(" error_count_in : %d\n", (r_usb_ept_data & 0x30000000) >> 28);
304 printk(" t_in : %d\n", (r_usb_ept_data & 0x08000000) >> 27);
305 printk(" low_speed : %d\n", (r_usb_ept_data & 0x04000000) >> 26);
306 printk(" port : %d\n", (r_usb_ept_data & 0x03000000) >> 24);
307 printk(" error_code : %d\n", (r_usb_ept_data & 0x00c00000) >> 22);
308 printk(" t_out : %d\n", (r_usb_ept_data & 0x00200000) >> 21);
309 printk(" error_count_out : %d\n", (r_usb_ept_data & 0x00180000) >> 19);
310 printk(" max_len : %d\n", (r_usb_ept_data & 0x0003f800) >> 11);
311 printk(" ep : %d\n", (r_usb_ept_data & 0x00000780) >> 7);
312 printk(" dev : %d\n", (r_usb_ept_data & 0x0000003f));
313 }
314
315 static inline void __dump_ept_data_iso(int epid)
316 {
317 unsigned long flags;
318 __u32 ept_data;
319
320 if (epid < 0 || epid > 31) {
321 printk("Cannot dump ept data for invalid epid %d\n", epid);
322 return;
323 }
324
325 local_irq_save(flags);
326 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
327 nop();
328 ept_data = *R_USB_EPT_DATA_ISO;
329 local_irq_restore(flags);
330
331 printk(" R_USB_EPT_DATA = 0x%x for epid %d :\n", ept_data, epid);
332 if (ept_data == 0) {
333 /* No need for more detailed printing. */
334 return;
335 }
336 printk(" valid : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, valid,
337 ept_data));
338 printk(" port : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, port,
339 ept_data));
340 printk(" error_code : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, error_code,
341 ept_data));
342 printk(" max_len : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, max_len,
343 ept_data));
344 printk(" ep : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, ep,
345 ept_data));
346 printk(" dev : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, dev,
347 ept_data));
348 }
349
350 static inline void __dump_ept_data_list(void)
351 {
352 int i;
353
354 printk("Dumping the whole R_USB_EPT_DATA list\n");
355
356 for (i = 0; i < 32; i++) {
357 __dump_ept_data(i);
358 }
359 }
360
361 static void debug_epid(int epid) {
362 int i;
363
364 if(epid_isoc(epid)) {
365 __dump_ept_data_iso(epid);
366 } else {
367 __dump_ept_data(epid);
368 }
369
370 printk("Bulk:\n");
371 for(i = 0; i < 32; i++) {
372 if(IO_EXTRACT(USB_EP_command, epid, TxBulkEPList[i].command) ==
373 epid) {
374 printk("%d: ", i); __dump_ep_desc(&(TxBulkEPList[i]));
375 }
376 }
377
378 printk("Ctrl:\n");
379 for(i = 0; i < 32; i++) {
380 if(IO_EXTRACT(USB_EP_command, epid, TxCtrlEPList[i].command) ==
381 epid) {
382 printk("%d: ", i); __dump_ep_desc(&(TxCtrlEPList[i]));
383 }
384 }
385
386 printk("Intr:\n");
387 for(i = 0; i < MAX_INTR_INTERVAL; i++) {
388 if(IO_EXTRACT(USB_EP_command, epid, TxIntrEPList[i].command) ==
389 epid) {
390 printk("%d: ", i); __dump_ep_desc(&(TxIntrEPList[i]));
391 }
392 }
393
394 printk("Isoc:\n");
395 for(i = 0; i < 32; i++) {
396 if(IO_EXTRACT(USB_EP_command, epid, TxIsocEPList[i].command) ==
397 epid) {
398 printk("%d: ", i); __dump_ep_desc(&(TxIsocEPList[i]));
399 }
400 }
401
402 __dump_ept_data_list();
403 __dump_ep_list(PIPE_INTERRUPT);
404 printk("\n\n");
405 }
406
407
408
409 char* hcd_status_to_str(__u8 bUsbStatus) {
410 static char hcd_status_str[128];
411 hcd_status_str[0] = '\0';
412 if(bUsbStatus & IO_STATE(R_USB_STATUS, ourun, yes)) {
413 strcat(hcd_status_str, "ourun ");
414 }
415 if(bUsbStatus & IO_STATE(R_USB_STATUS, perror, yes)) {
416 strcat(hcd_status_str, "perror ");
417 }
418 if(bUsbStatus & IO_STATE(R_USB_STATUS, device_mode, yes)) {
419 strcat(hcd_status_str, "device_mode ");
420 }
421 if(bUsbStatus & IO_STATE(R_USB_STATUS, host_mode, yes)) {
422 strcat(hcd_status_str, "host_mode ");
423 }
424 if(bUsbStatus & IO_STATE(R_USB_STATUS, started, yes)) {
425 strcat(hcd_status_str, "started ");
426 }
427 if(bUsbStatus & IO_STATE(R_USB_STATUS, running, yes)) {
428 strcat(hcd_status_str, "running ");
429 }
430 return hcd_status_str;
431 }
432
433
434 char* sblist_to_str(struct USB_SB_Desc* sb_desc) {
435 static char sblist_to_str_buff[128];
436 char tmp[32], tmp2[32];
437 sblist_to_str_buff[0] = '\0';
438 while(sb_desc != NULL) {
439 switch(IO_EXTRACT(USB_SB_command, tt, sb_desc->command)) {
440 case 0: sprintf(tmp, "zout"); break;
441 case 1: sprintf(tmp, "in"); break;
442 case 2: sprintf(tmp, "out"); break;
443 case 3: sprintf(tmp, "setup"); break;
444 }
445 sprintf(tmp2, "(%s %d)", tmp, sb_desc->sw_len);
446 strcat(sblist_to_str_buff, tmp2);
447 if(sb_desc->next != 0) {
448 sb_desc = phys_to_virt(sb_desc->next);
449 } else {
450 sb_desc = NULL;
451 }
452 }
453 return sblist_to_str_buff;
454 }
455
456 char* port_status_to_str(__u16 wPortStatus) {
457 static char port_status_str[128];
458 port_status_str[0] = '\0';
459 if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, connected, yes)) {
460 strcat(port_status_str, "connected ");
461 }
462 if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes)) {
463 strcat(port_status_str, "enabled ");
464 }
465 if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, suspended, yes)) {
466 strcat(port_status_str, "suspended ");
467 }
468 if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, reset, yes)) {
469 strcat(port_status_str, "reset ");
470 }
471 if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, speed, full)) {
472 strcat(port_status_str, "full-speed ");
473 } else {
474 strcat(port_status_str, "low-speed ");
475 }
476 return port_status_str;
477 }
478
479
480 char* endpoint_to_str(struct usb_endpoint_descriptor *ed) {
481 static char endpoint_to_str_buff[128];
482 char tmp[32];
483 int epnum = ed->bEndpointAddress & 0x0F;
484 int dir = ed->bEndpointAddress & 0x80;
485 int type = ed->bmAttributes & 0x03;
486 endpoint_to_str_buff[0] = '\0';
487 sprintf(endpoint_to_str_buff, "ep:%d ", epnum);
488 switch(type) {
489 case 0:
490 sprintf(tmp, " ctrl");
491 break;
492 case 1:
493 sprintf(tmp, " isoc");
494 break;
495 case 2:
496 sprintf(tmp, " bulk");
497 break;
498 case 3:
499 sprintf(tmp, " intr");
500 break;
501 }
502 strcat(endpoint_to_str_buff, tmp);
503 if(dir) {
504 sprintf(tmp, " in");
505 } else {
506 sprintf(tmp, " out");
507 }
508 strcat(endpoint_to_str_buff, tmp);
509
510 return endpoint_to_str_buff;
511 }
512
513 /* Debug helper functions for Transfer Controller */
514 char* pipe_to_str(unsigned int pipe) {
515 static char pipe_to_str_buff[128];
516 char tmp[64];
517 sprintf(pipe_to_str_buff, "dir:%s", str_dir(pipe));
518 sprintf(tmp, " type:%s", str_type(pipe));
519 strcat(pipe_to_str_buff, tmp);
520
521 sprintf(tmp, " dev:%d", usb_pipedevice(pipe));
522 strcat(pipe_to_str_buff, tmp);
523 sprintf(tmp, " ep:%d", usb_pipeendpoint(pipe));
524 strcat(pipe_to_str_buff, tmp);
525 return pipe_to_str_buff;
526 }
527
528
529 #define USB_DEBUG_DESC 1
530
531 #ifdef USB_DEBUG_DESC
532 #define dump_in_desc(x) __dump_in_desc(x)
533 #define dump_sb_desc(...) __dump_sb_desc(...)
534 #define dump_ep_desc(x) __dump_ep_desc(x)
535 #define dump_ept_data(x) __dump_ept_data(x)
536 #else
537 #define dump_in_desc(...) do {} while (0)
538 #define dump_sb_desc(...) do {} while (0)
539 #define dump_ep_desc(...) do {} while (0)
540 #endif
541
542
543 /* Uncomment this to enable massive function call trace
544 #define USB_DEBUG_TRACE */
545
546 #ifdef USB_DEBUG_TRACE
547 #define DBFENTER (printk(": Entering: %s\n", __FUNCTION__))
548 #define DBFEXIT (printk(": Exiting: %s\n", __FUNCTION__))
549 #else
550 #define DBFENTER do {} while (0)
551 #define DBFEXIT do {} while (0)
552 #endif
553
554 #define CHECK_ALIGN(x) if (((__u32)(x)) & 0x00000003) \
555 {panic("Alignment check (DWORD) failed at %s:%s:%d\n", __FILE__, __FUNCTION__, __LINE__);}
556
557 /* Most helpful debugging aid */
558 #define ASSERT(expr) ((void) ((expr) ? 0 : (err("assert failed at: %s %d",__FUNCTION__, __LINE__))))
559
560
561 /***************************************************************************/
562 /***************************************************************************/
563 /* Forward declarations */
564 /***************************************************************************/
565 /***************************************************************************/
566 void crisv10_hcd_epid_attn_irq(struct crisv10_irq_reg *reg);
567 void crisv10_hcd_port_status_irq(struct crisv10_irq_reg *reg);
568 void crisv10_hcd_ctl_status_irq(struct crisv10_irq_reg *reg);
569 void crisv10_hcd_isoc_eof_irq(struct crisv10_irq_reg *reg);
570
571 void rh_port_status_change(__u16[]);
572 int rh_clear_port_feature(__u8, __u16);
573 int rh_set_port_feature(__u8, __u16);
574 static void rh_disable_port(unsigned int port);
575
576 static void check_finished_bulk_tx_epids(struct usb_hcd *hcd,
577 int timer);
578
579 static int tc_setup_epid(struct usb_host_endpoint *ep, struct urb *urb,
580 int mem_flags);
581 static void tc_free_epid(struct usb_host_endpoint *ep);
582 static int tc_allocate_epid(void);
583 static void tc_finish_urb(struct usb_hcd *hcd, struct urb *urb, int status);
584 static void tc_finish_urb_later(struct usb_hcd *hcd, struct urb *urb,
585 int status);
586
587 static int urb_priv_create(struct usb_hcd *hcd, struct urb *urb, int epid,
588 int mem_flags);
589 static void urb_priv_free(struct usb_hcd *hcd, struct urb *urb);
590
591 static int crisv10_usb_check_bandwidth(struct usb_device *dev,struct urb *urb);
592 static void crisv10_usb_claim_bandwidth(
593 struct usb_device *dev, struct urb *urb, int bustime, int isoc);
594 static void crisv10_usb_release_bandwidth(
595 struct usb_hcd *hcd, int isoc, int bandwidth);
596
597 static inline struct urb *urb_list_first(int epid);
598 static inline void urb_list_add(struct urb *urb, int epid,
599 int mem_flags);
600 static inline urb_entry_t *urb_list_entry(struct urb *urb, int epid);
601 static inline void urb_list_del(struct urb *urb, int epid);
602 static inline void urb_list_move_last(struct urb *urb, int epid);
603 static inline struct urb *urb_list_next(struct urb *urb, int epid);
604
605 int create_sb_for_urb(struct urb *urb, int mem_flags);
606 int init_intr_urb(struct urb *urb, int mem_flags);
607
608 static inline void etrax_epid_set(__u8 index, __u32 data);
609 static inline void etrax_epid_clear_error(__u8 index);
610 static inline void etrax_epid_set_toggle(__u8 index, __u8 dirout,
611 __u8 toggle);
612 static inline __u8 etrax_epid_get_toggle(__u8 index, __u8 dirout);
613 static inline __u32 etrax_epid_get(__u8 index);
614
615 /* We're accessing the same register position in Etrax so
616 when we do full access the internal difference doesn't matter */
617 #define etrax_epid_iso_set(index, data) etrax_epid_set(index, data)
618 #define etrax_epid_iso_get(index) etrax_epid_get(index)
619
620
621 static void tc_dma_process_isoc_urb(struct urb *urb);
622 static void tc_dma_process_queue(int epid);
623 static void tc_dma_unlink_intr_urb(struct urb *urb);
624 static irqreturn_t tc_dma_tx_interrupt(int irq, void *vhc);
625 static irqreturn_t tc_dma_rx_interrupt(int irq, void *vhc);
626
627 static void tc_bulk_start_timer_func(unsigned long dummy);
628 static void tc_bulk_eot_timer_func(unsigned long dummy);
629
630
631 /*************************************************************/
632 /*************************************************************/
633 /* Host Controler Driver block */
634 /*************************************************************/
635 /*************************************************************/
636
637 /* HCD operations */
638 static irqreturn_t crisv10_hcd_top_irq(int irq, void*);
639 static int crisv10_hcd_reset(struct usb_hcd *);
640 static int crisv10_hcd_start(struct usb_hcd *);
641 static void crisv10_hcd_stop(struct usb_hcd *);
642 #ifdef CONFIG_PM
643 static int crisv10_hcd_suspend(struct device *, u32, u32);
644 static int crisv10_hcd_resume(struct device *, u32);
645 #endif /* CONFIG_PM */
646 static int crisv10_hcd_get_frame(struct usb_hcd *);
647
648 static int tc_urb_enqueue(struct usb_hcd *, struct urb *, gfp_t mem_flags);
649 static int tc_urb_dequeue(struct usb_hcd *, struct urb *, int);
650 static void tc_endpoint_disable(struct usb_hcd *, struct usb_host_endpoint *ep);
651
652 static int rh_status_data_request(struct usb_hcd *, char *);
653 static int rh_control_request(struct usb_hcd *, u16, u16, u16, char*, u16);
654
655 #ifdef CONFIG_PM
656 static int crisv10_hcd_hub_suspend(struct usb_hcd *);
657 static int crisv10_hcd_hub_resume(struct usb_hcd *);
658 #endif /* CONFIG_PM */
659 #ifdef CONFIG_USB_OTG
660 static int crisv10_hcd_start_port_reset(struct usb_hcd *, unsigned);
661 #endif /* CONFIG_USB_OTG */
662
663 /* host controller driver interface */
664 static const struct hc_driver crisv10_hc_driver =
665 {
666 .description = hc_name,
667 .product_desc = product_desc,
668 .hcd_priv_size = sizeof(struct crisv10_hcd),
669
670 /* Attaching IRQ handler manualy in probe() */
671 /* .irq = crisv10_hcd_irq, */
672
673 .flags = HCD_USB11,
674
675 /* called to init HCD and root hub */
676 .reset = crisv10_hcd_reset,
677 .start = crisv10_hcd_start,
678
679 /* cleanly make HCD stop writing memory and doing I/O */
680 .stop = crisv10_hcd_stop,
681
682 /* return current frame number */
683 .get_frame_number = crisv10_hcd_get_frame,
684
685
686 /* Manage i/o requests via the Transfer Controller */
687 .urb_enqueue = tc_urb_enqueue,
688 .urb_dequeue = tc_urb_dequeue,
689
690 /* hw synch, freeing endpoint resources that urb_dequeue can't */
691 .endpoint_disable = tc_endpoint_disable,
692
693
694 /* Root Hub support */
695 .hub_status_data = rh_status_data_request,
696 .hub_control = rh_control_request,
697 #ifdef CONFIG_PM
698 .hub_suspend = rh_suspend_request,
699 .hub_resume = rh_resume_request,
700 #endif /* CONFIG_PM */
701 #ifdef CONFIG_USB_OTG
702 .start_port_reset = crisv10_hcd_start_port_reset,
703 #endif /* CONFIG_USB_OTG */
704 };
705
706
707 /*
708 * conversion between pointers to a hcd and the corresponding
709 * crisv10_hcd
710 */
711
712 static inline struct crisv10_hcd *hcd_to_crisv10_hcd(struct usb_hcd *hcd)
713 {
714 return (struct crisv10_hcd *) hcd->hcd_priv;
715 }
716
717 static inline struct usb_hcd *crisv10_hcd_to_hcd(struct crisv10_hcd *hcd)
718 {
719 return container_of((void *) hcd, struct usb_hcd, hcd_priv);
720 }
721
722 /* check if specified port is in use */
723 static inline int port_in_use(unsigned int port)
724 {
725 return ports & (1 << port);
726 }
727
728 /* number of ports in use */
729 static inline unsigned int num_ports(void)
730 {
731 unsigned int i, num = 0;
732 for (i = 0; i < USB_ROOT_HUB_PORTS; i++)
733 if (port_in_use(i))
734 num++;
735 return num;
736 }
737
738 /* map hub port number to the port number used internally by the HC */
739 static inline unsigned int map_port(unsigned int port)
740 {
741 unsigned int i, num = 0;
742 for (i = 0; i < USB_ROOT_HUB_PORTS; i++)
743 if (port_in_use(i))
744 if (++num == port)
745 return i;
746 return -1;
747 }
748
749 /* size of descriptors in slab cache */
750 #ifndef MAX
751 #define MAX(x, y) ((x) > (y) ? (x) : (y))
752 #endif
753
754
755 /******************************************************************/
756 /* Hardware Interrupt functions */
757 /******************************************************************/
758
759 /* Fast interrupt handler for HC */
760 static irqreturn_t crisv10_hcd_top_irq(int irq, void *vcd)
761 {
762 struct usb_hcd *hcd = vcd;
763 struct crisv10_irq_reg reg;
764 __u32 irq_mask;
765 unsigned long flags;
766
767 DBFENTER;
768
769 ASSERT(hcd != NULL);
770 reg.hcd = hcd;
771
772 /* Turn of other interrupts while handling these sensitive cases */
773 local_irq_save(flags);
774
775 /* Read out which interrupts that are flaged */
776 irq_mask = *R_USB_IRQ_MASK_READ;
777 reg.r_usb_irq_mask_read = irq_mask;
778
779 /* Reading R_USB_STATUS clears the ctl_status interrupt. Note that
780 R_USB_STATUS must be read before R_USB_EPID_ATTN since reading the latter
781 clears the ourun and perror fields of R_USB_STATUS. */
782 reg.r_usb_status = *R_USB_STATUS;
783
784 /* Reading R_USB_EPID_ATTN clears the iso_eof, bulk_eot and epid_attn
785 interrupts. */
786 reg.r_usb_epid_attn = *R_USB_EPID_ATTN;
787
788 /* Reading R_USB_RH_PORT_STATUS_1 and R_USB_RH_PORT_STATUS_2 clears the
789 port_status interrupt. */
790 reg.r_usb_rh_port_status_1 = *R_USB_RH_PORT_STATUS_1;
791 reg.r_usb_rh_port_status_2 = *R_USB_RH_PORT_STATUS_2;
792
793 /* Reading R_USB_FM_NUMBER clears the sof interrupt. */
794 /* Note: the lower 11 bits contain the actual frame number, sent with each
795 sof. */
796 reg.r_usb_fm_number = *R_USB_FM_NUMBER;
797
798 /* Interrupts are handled in order of priority. */
799 if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, port_status)) {
800 crisv10_hcd_port_status_irq(&reg);
801 }
802 if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, epid_attn)) {
803 crisv10_hcd_epid_attn_irq(&reg);
804 }
805 if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, ctl_status)) {
806 crisv10_hcd_ctl_status_irq(&reg);
807 }
808 if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, iso_eof)) {
809 crisv10_hcd_isoc_eof_irq(&reg);
810 }
811 if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, bulk_eot)) {
812 /* Update/restart the bulk start timer since obviously the channel is
813 running. */
814 mod_timer(&bulk_start_timer, jiffies + BULK_START_TIMER_INTERVAL);
815 /* Update/restart the bulk eot timer since we just received an bulk eot
816 interrupt. */
817 mod_timer(&bulk_eot_timer, jiffies + BULK_EOT_TIMER_INTERVAL);
818
819 /* Check for finished bulk transfers on epids */
820 check_finished_bulk_tx_epids(hcd, 0);
821 }
822 local_irq_restore(flags);
823
824 DBFEXIT;
825 return IRQ_HANDLED;
826 }
827
828
829 void crisv10_hcd_epid_attn_irq(struct crisv10_irq_reg *reg) {
830 struct usb_hcd *hcd = reg->hcd;
831 struct crisv10_urb_priv *urb_priv;
832 int epid;
833 DBFENTER;
834
835 for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
836 if (test_bit(epid, (void *)&reg->r_usb_epid_attn)) {
837 struct urb *urb;
838 __u32 ept_data;
839 int error_code;
840
841 if (epid == DUMMY_EPID || epid == INVALID_EPID) {
842 /* We definitely don't care about these ones. Besides, they are
843 always disabled, so any possible disabling caused by the
844 epid attention interrupt is irrelevant. */
845 continue;
846 }
847
848 if(!epid_inuse(epid)) {
849 irq_err("Epid attention on epid:%d that isn't in use\n", epid);
850 printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
851 debug_epid(epid);
852 continue;
853 }
854
855 /* Note that although there are separate R_USB_EPT_DATA and
856 R_USB_EPT_DATA_ISO registers, they are located at the same address and
857 are of the same size. In other words, this read should be ok for isoc
858 also. */
859 ept_data = etrax_epid_get(epid);
860 error_code = IO_EXTRACT(R_USB_EPT_DATA, error_code, ept_data);
861
862 /* Get the active URB for this epid. We blatantly assume
863 that only this URB could have caused the epid attention. */
864 urb = activeUrbList[epid];
865 if (urb == NULL) {
866 irq_err("Attention on epid:%d error:%d with no active URB.\n",
867 epid, error_code);
868 printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
869 debug_epid(epid);
870 continue;
871 }
872
873 urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
874 ASSERT(urb_priv);
875
876 /* Using IO_STATE_VALUE on R_USB_EPT_DATA should be ok for isoc also. */
877 if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) {
878
879 /* Isoc traffic doesn't have error_count_in/error_count_out. */
880 if ((usb_pipetype(urb->pipe) != PIPE_ISOCHRONOUS) &&
881 (IO_EXTRACT(R_USB_EPT_DATA, error_count_in, ept_data) == 3 ||
882 IO_EXTRACT(R_USB_EPT_DATA, error_count_out, ept_data) == 3)) {
883 /* Check if URB allready is marked for late-finish, we can get
884 several 3rd error for Intr traffic when a device is unplugged */
885 if(urb_priv->later_data == NULL) {
886 /* 3rd error. */
887 irq_warn("3rd error for epid:%d (%s %s) URB:0x%x[%d]\n", epid,
888 str_dir(urb->pipe), str_type(urb->pipe),
889 (unsigned int)urb, urb_priv->urb_num);
890
891 tc_finish_urb_later(hcd, urb, -EPROTO);
892 }
893
894 } else if (reg->r_usb_status & IO_MASK(R_USB_STATUS, perror)) {
895 irq_warn("Perror for epid:%d\n", epid);
896 printk("FM_NUMBER: %d\n", reg->r_usb_fm_number & 0x7ff);
897 printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
898 __dump_urb(urb);
899 debug_epid(epid);
900
901 if (!(ept_data & IO_MASK(R_USB_EPT_DATA, valid))) {
902 /* invalid ep_id */
903 panic("Perror because of invalid epid."
904 " Deconfigured too early?");
905 } else {
906 /* past eof1, near eof, zout transfer, setup transfer */
907 /* Dump the urb and the relevant EP descriptor. */
908 panic("Something wrong with DMA descriptor contents."
909 " Too much traffic inserted?");
910 }
911 } else if (reg->r_usb_status & IO_MASK(R_USB_STATUS, ourun)) {
912 /* buffer ourun */
913 printk("FM_NUMBER: %d\n", reg->r_usb_fm_number & 0x7ff);
914 printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
915 __dump_urb(urb);
916 debug_epid(epid);
917
918 panic("Buffer overrun/underrun for epid:%d. DMA too busy?", epid);
919 } else {
920 irq_warn("Attention on epid:%d (%s %s) with no error code\n", epid,
921 str_dir(urb->pipe), str_type(urb->pipe));
922 printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
923 __dump_urb(urb);
924 debug_epid(epid);
925 }
926
927 } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code,
928 stall)) {
929 /* Not really a protocol error, just says that the endpoint gave
930 a stall response. Note that error_code cannot be stall for isoc. */
931 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
932 panic("Isoc traffic cannot stall");
933 }
934
935 tc_dbg("Stall for epid:%d (%s %s) URB:0x%x\n", epid,
936 str_dir(urb->pipe), str_type(urb->pipe), (unsigned int)urb);
937 tc_finish_urb(hcd, urb, -EPIPE);
938
939 } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code,
940 bus_error)) {
941 /* Two devices responded to a transaction request. Must be resolved
942 by software. FIXME: Reset ports? */
943 panic("Bus error for epid %d."
944 " Two devices responded to transaction request\n",
945 epid);
946
947 } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code,
948 buffer_error)) {
949 /* DMA overrun or underrun. */
950 irq_warn("Buffer overrun/underrun for epid:%d (%s %s)\n", epid,
951 str_dir(urb->pipe), str_type(urb->pipe));
952
953 /* It seems that error_code = buffer_error in
954 R_USB_EPT_DATA/R_USB_EPT_DATA_ISO and ourun = yes in R_USB_STATUS
955 are the same error. */
956 tc_finish_urb(hcd, urb, -EPROTO);
957 } else {
958 irq_warn("Unknown attention on epid:%d (%s %s)\n", epid,
959 str_dir(urb->pipe), str_type(urb->pipe));
960 dump_ept_data(epid);
961 }
962 }
963 }
964 DBFEXIT;
965 }
966
967 void crisv10_hcd_port_status_irq(struct crisv10_irq_reg *reg)
968 {
969 __u16 port_reg[USB_ROOT_HUB_PORTS];
970 DBFENTER;
971 port_reg[0] = reg->r_usb_rh_port_status_1;
972 port_reg[1] = reg->r_usb_rh_port_status_2;
973 rh_port_status_change(port_reg);
974 DBFEXIT;
975 }
976
977 void crisv10_hcd_isoc_eof_irq(struct crisv10_irq_reg *reg)
978 {
979 int epid;
980 struct urb *urb;
981 struct crisv10_urb_priv *urb_priv;
982
983 DBFENTER;
984
985 for (epid = 0; epid < NBR_OF_EPIDS - 1; epid++) {
986
987 /* Only check epids that are in use, is valid and has SB list */
988 if (!epid_inuse(epid) || epid == INVALID_EPID ||
989 TxIsocEPList[epid].sub == 0 || epid == DUMMY_EPID) {
990 /* Nothing here to see. */
991 continue;
992 }
993 ASSERT(epid_isoc(epid));
994
995 /* Get the active URB for this epid (if any). */
996 urb = activeUrbList[epid];
997 if (urb == 0) {
998 isoc_warn("Ignoring NULL urb for epid:%d\n", epid);
999 continue;
1000 }
1001 if(!epid_out_traffic(epid)) {
1002 /* Sanity check. */
1003 ASSERT(usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS);
1004
1005 urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
1006 ASSERT(urb_priv);
1007
1008 if (urb_priv->urb_state == NOT_STARTED) {
1009 /* If ASAP is not set and urb->start_frame is the current frame,
1010 start the transfer. */
1011 if (!(urb->transfer_flags & URB_ISO_ASAP) &&
1012 (urb->start_frame == (*R_USB_FM_NUMBER & 0x7ff))) {
1013 /* EP should not be enabled if we're waiting for start_frame */
1014 ASSERT((TxIsocEPList[epid].command &
1015 IO_STATE(USB_EP_command, enable, yes)) == 0);
1016
1017 isoc_warn("Enabling isoc IN EP descr for epid %d\n", epid);
1018 TxIsocEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
1019
1020 /* This urb is now active. */
1021 urb_priv->urb_state = STARTED;
1022 continue;
1023 }
1024 }
1025 }
1026 }
1027
1028 DBFEXIT;
1029 }
1030
1031 void crisv10_hcd_ctl_status_irq(struct crisv10_irq_reg *reg)
1032 {
1033 struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(reg->hcd);
1034
1035 DBFENTER;
1036 ASSERT(crisv10_hcd);
1037
1038 /* irq_dbg("ctr_status_irq, controller status: %s\n",
1039 hcd_status_to_str(reg->r_usb_status));*/
1040
1041 /* FIXME: What should we do if we get ourun or perror? Dump the EP and SB
1042 list for the corresponding epid? */
1043 if (reg->r_usb_status & IO_MASK(R_USB_STATUS, ourun)) {
1044 panic("USB controller got ourun.");
1045 }
1046 if (reg->r_usb_status & IO_MASK(R_USB_STATUS, perror)) {
1047
1048 /* Before, etrax_usb_do_intr_recover was called on this epid if it was
1049 an interrupt pipe. I don't see how re-enabling all EP descriptors
1050 will help if there was a programming error. */
1051 panic("USB controller got perror.");
1052 }
1053
1054 /* Keep track of USB Controller, if it's running or not */
1055 if(reg->r_usb_status & IO_STATE(R_USB_STATUS, running, yes)) {
1056 crisv10_hcd->running = 1;
1057 } else {
1058 crisv10_hcd->running = 0;
1059 }
1060
1061 if (reg->r_usb_status & IO_MASK(R_USB_STATUS, device_mode)) {
1062 /* We should never operate in device mode. */
1063 panic("USB controller in device mode.");
1064 }
1065
1066 /* Set the flag to avoid getting "Unlink after no-IRQ? Controller is probably
1067 using the wrong IRQ" from hcd_unlink_urb() in drivers/usb/core/hcd.c */
1068 set_bit(HCD_FLAG_SAW_IRQ, &reg->hcd->flags);
1069
1070 DBFEXIT;
1071 }
1072
1073
1074 /******************************************************************/
1075 /* Host Controller interface functions */
1076 /******************************************************************/
1077
1078 static inline void crisv10_ready_wait(void) {
1079 volatile int timeout = 10000;
1080 /* Check the busy bit of USB controller in Etrax */
1081 while((*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy)) &&
1082 (timeout-- > 0));
1083 }
1084
1085 /* reset host controller */
1086 static int crisv10_hcd_reset(struct usb_hcd *hcd)
1087 {
1088 DBFENTER;
1089 hcd_dbg(hcd, "reset\n");
1090
1091
1092 /* Reset the USB interface. */
1093 /*
1094 *R_USB_COMMAND =
1095 IO_STATE(R_USB_COMMAND, port_sel, nop) |
1096 IO_STATE(R_USB_COMMAND, port_cmd, reset) |
1097 IO_STATE(R_USB_COMMAND, ctrl_cmd, reset);
1098 nop();
1099 */
1100 DBFEXIT;
1101 return 0;
1102 }
1103
1104 /* start host controller */
1105 static int crisv10_hcd_start(struct usb_hcd *hcd)
1106 {
1107 DBFENTER;
1108 hcd_dbg(hcd, "start\n");
1109
1110 crisv10_ready_wait();
1111
1112 /* Start processing of USB traffic. */
1113 *R_USB_COMMAND =
1114 IO_STATE(R_USB_COMMAND, port_sel, nop) |
1115 IO_STATE(R_USB_COMMAND, port_cmd, reset) |
1116 IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run);
1117
1118 nop();
1119
1120 hcd->state = HC_STATE_RUNNING;
1121
1122 DBFEXIT;
1123 return 0;
1124 }
1125
1126 /* stop host controller */
1127 static void crisv10_hcd_stop(struct usb_hcd *hcd)
1128 {
1129 DBFENTER;
1130 hcd_dbg(hcd, "stop\n");
1131 crisv10_hcd_reset(hcd);
1132 DBFEXIT;
1133 }
1134
1135 /* return the current frame number */
1136 static int crisv10_hcd_get_frame(struct usb_hcd *hcd)
1137 {
1138 DBFENTER;
1139 DBFEXIT;
1140 return (*R_USB_FM_NUMBER & 0x7ff);
1141 }
1142
1143 #ifdef CONFIG_USB_OTG
1144
1145 static int crisv10_hcd_start_port_reset(struct usb_hcd *hcd, unsigned port)
1146 {
1147 return 0; /* no-op for now */
1148 }
1149
1150 #endif /* CONFIG_USB_OTG */
1151
1152
1153 /******************************************************************/
1154 /* Root Hub functions */
1155 /******************************************************************/
1156
1157 /* root hub status */
1158 static const struct usb_hub_status rh_hub_status =
1159 {
1160 .wHubStatus = 0,
1161 .wHubChange = 0,
1162 };
1163
1164 /* root hub descriptor */
1165 static const u8 rh_hub_descr[] =
1166 {
1167 0x09, /* bDescLength */
1168 0x29, /* bDescriptorType */
1169 USB_ROOT_HUB_PORTS, /* bNbrPorts */
1170 0x00, /* wHubCharacteristics */
1171 0x00,
1172 0x01, /* bPwrOn2pwrGood */
1173 0x00, /* bHubContrCurrent */
1174 0x00, /* DeviceRemovable */
1175 0xff /* PortPwrCtrlMask */
1176 };
1177
1178 /* Actual holder of root hub status*/
1179 struct crisv10_rh rh;
1180
1181 /* Initialize root hub data structures (called from dvdrv_hcd_probe()) */
1182 int rh_init(void) {
1183 int i;
1184 /* Reset port status flags */
1185 for (i = 0; i < USB_ROOT_HUB_PORTS; i++) {
1186 rh.wPortChange[i] = 0;
1187 rh.wPortStatusPrev[i] = 0;
1188 }
1189 return 0;
1190 }
1191
1192 #define RH_FEAT_MASK ((1<<USB_PORT_FEAT_CONNECTION)|\
1193 (1<<USB_PORT_FEAT_ENABLE)|\
1194 (1<<USB_PORT_FEAT_SUSPEND)|\
1195 (1<<USB_PORT_FEAT_RESET))
1196
1197 /* Handle port status change interrupt (called from bottom part interrupt) */
1198 void rh_port_status_change(__u16 port_reg[]) {
1199 int i;
1200 __u16 wChange;
1201
1202 for(i = 0; i < USB_ROOT_HUB_PORTS; i++) {
1203 /* Xor out changes since last read, masked for important flags */
1204 wChange = (port_reg[i] & RH_FEAT_MASK) ^ rh.wPortStatusPrev[i];
1205 /* Or changes together with (if any) saved changes */
1206 rh.wPortChange[i] |= wChange;
1207 /* Save new status */
1208 rh.wPortStatusPrev[i] = port_reg[i];
1209
1210 if(wChange) {
1211 rh_dbg("Interrupt port_status change port%d: %s Current-status:%s\n", i+1,
1212 port_status_to_str(wChange),
1213 port_status_to_str(port_reg[i]));
1214 }
1215 }
1216 }
1217
1218 /* Construct port status change bitmap for the root hub */
1219 static int rh_status_data_request(struct usb_hcd *hcd, char *buf)
1220 {
1221 struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(hcd);
1222 unsigned int i;
1223
1224 DBFENTER;
1225 /*
1226 * corresponds to hub status change EP (USB 2.0 spec section 11.13.4)
1227 * return bitmap indicating ports with status change
1228 */
1229 *buf = 0;
1230 spin_lock(&crisv10_hcd->lock);
1231 for (i = 1; i <= crisv10_hcd->num_ports; i++) {
1232 if (rh.wPortChange[map_port(i)]) {
1233 *buf |= (1 << i);
1234 rh_dbg("rh_status_data_request, change on port %d: %s Current Status: %s\n", i,
1235 port_status_to_str(rh.wPortChange[map_port(i)]),
1236 port_status_to_str(rh.wPortStatusPrev[map_port(i)]));
1237 }
1238 }
1239 spin_unlock(&crisv10_hcd->lock);
1240 DBFEXIT;
1241 return *buf == 0 ? 0 : 1;
1242 }
1243
1244 /* Handle a control request for the root hub (called from hcd_driver) */
1245 static int rh_control_request(struct usb_hcd *hcd,
1246 u16 typeReq,
1247 u16 wValue,
1248 u16 wIndex,
1249 char *buf,
1250 u16 wLength) {
1251
1252 struct crisv10_hcd *crisv10_hcd = hcd_to_crisv10_hcd(hcd);
1253 int retval = 0;
1254 int len;
1255 DBFENTER;
1256
1257 switch (typeReq) {
1258 case GetHubDescriptor:
1259 rh_dbg("GetHubDescriptor\n");
1260 len = min_t(unsigned int, sizeof rh_hub_descr, wLength);
1261 memcpy(buf, rh_hub_descr, len);
1262 buf[2] = crisv10_hcd->num_ports;
1263 break;
1264 case GetHubStatus:
1265 rh_dbg("GetHubStatus\n");
1266 len = min_t(unsigned int, sizeof rh_hub_status, wLength);
1267 memcpy(buf, &rh_hub_status, len);
1268 break;
1269 case GetPortStatus:
1270 if (!wIndex || wIndex > crisv10_hcd->num_ports)
1271 goto error;
1272 rh_dbg("GetportStatus, port:%d change:%s status:%s\n", wIndex,
1273 port_status_to_str(rh.wPortChange[map_port(wIndex)]),
1274 port_status_to_str(rh.wPortStatusPrev[map_port(wIndex)]));
1275 *(u16 *) buf = cpu_to_le16(rh.wPortStatusPrev[map_port(wIndex)]);
1276 *(u16 *) (buf + 2) = cpu_to_le16(rh.wPortChange[map_port(wIndex)]);
1277 break;
1278 case SetHubFeature:
1279 rh_dbg("SetHubFeature\n");
1280 case ClearHubFeature:
1281 rh_dbg("ClearHubFeature\n");
1282 switch (wValue) {
1283 case C_HUB_OVER_CURRENT:
1284 case C_HUB_LOCAL_POWER:
1285 rh_warn("Not implemented hub request:%d \n", typeReq);
1286 /* not implemented */
1287 break;
1288 default:
1289 goto error;
1290 }
1291 break;
1292 case SetPortFeature:
1293 if (!wIndex || wIndex > crisv10_hcd->num_ports)
1294 goto error;
1295 if(rh_set_port_feature(map_port(wIndex), wValue))
1296 goto error;
1297 break;
1298 case ClearPortFeature:
1299 if (!wIndex || wIndex > crisv10_hcd->num_ports)
1300 goto error;
1301 if(rh_clear_port_feature(map_port(wIndex), wValue))
1302 goto error;
1303 break;
1304 default:
1305 rh_warn("Unknown hub request: %d\n", typeReq);
1306 error:
1307 retval = -EPIPE;
1308 }
1309 DBFEXIT;
1310 return retval;
1311 }
1312
1313 int rh_set_port_feature(__u8 bPort, __u16 wFeature) {
1314 __u8 bUsbCommand = 0;
1315 __u8 reset_cnt;
1316 switch(wFeature) {
1317 case USB_PORT_FEAT_RESET:
1318 rh_dbg("SetPortFeature: reset\n");
1319
1320 if (rh.wPortStatusPrev[bPort] &
1321 IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes))
1322 {
1323 __u8 restart_controller = 0;
1324
1325 if ( (rh.wPortStatusPrev[0] &
1326 IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes)) &&
1327 (rh.wPortStatusPrev[1] &
1328 IO_STATE(R_USB_RH_PORT_STATUS_2, enabled, yes)) )
1329 {
1330 /* Both ports is enabled. The USB controller will not change state. */
1331 restart_controller = 0;
1332 }
1333 else
1334 {
1335 /* Only ports is enabled. The USB controller will change state and
1336 must be restarted. */
1337 restart_controller = 1;
1338 }
1339 /*
1340 In ETRAX 100LX it's not possible to reset an enabled root hub port.
1341 The workaround is to disable and enable the port before resetting it.
1342 Disabling the port can, if both ports are disabled at once, cause the
1343 USB controller to change state to HOST_MODE state.
1344 The USB controller state transition causes a lot of unwanted
1345 interrupts that must be avoided.
1346 Disabling the USB controller status and port status interrupts before
1347 disabling/resetting the port stops these interrupts.
1348
1349 These actions are performed:
1350 1. Disable USB controller status and port status interrupts.
1351 2. Disable the port
1352 3. Wait for the port to be disabled.
1353 4. Enable the port.
1354 5. Wait for the port to be enabled.
1355 6. Reset the port.
1356 7. Wait for for the reset to end.
1357 8. Wait for the USB controller entering started state.
1358 9. Order the USB controller to running state.
1359 10. Wait for the USB controller reaching running state.
1360 11. Clear all interrupts generated during the disable/enable/reset
1361 procedure.
1362 12. Enable the USB controller status and port status interrupts.
1363 */
1364
1365 /* 1. Disable USB controller status and USB port status interrupts. */
1366 *R_USB_IRQ_MASK_CLR = IO_STATE(R_USB_IRQ_MASK_CLR, ctl_status, clr);
1367 __asm__ __volatile__ (" nop");
1368 *R_USB_IRQ_MASK_CLR = IO_STATE(R_USB_IRQ_MASK_CLR, port_status, clr);
1369 __asm__ __volatile__ (" nop");
1370
1371 {
1372
1373 /* Since an root hub port reset shall be 50 ms and the ETRAX 100LX
1374 root hub port reset is 10 ms we must perform 5 port resets to
1375 achieve a proper root hub port reset. */
1376 for (reset_cnt = 0; reset_cnt < 5; reset_cnt ++)
1377 {
1378 rh_dbg("Disable Port %d\n", bPort + 1);
1379
1380 /* 2. Disable the port*/
1381 if (bPort == 0)
1382 {
1383 *R_USB_PORT1_DISABLE = IO_STATE(R_USB_PORT1_DISABLE, disable, yes);
1384 }
1385 else
1386 {
1387 *R_USB_PORT2_DISABLE = IO_STATE(R_USB_PORT2_DISABLE, disable, yes);
1388 }
1389
1390 /* 3. Wait for the port to be disabled. */
1391 while ( (bPort == 0) ?
1392 *R_USB_RH_PORT_STATUS_1 &
1393 IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes) :
1394 *R_USB_RH_PORT_STATUS_2 &
1395 IO_STATE(R_USB_RH_PORT_STATUS_2, enabled, yes) ) {}
1396
1397 rh_dbg("Port %d is disabled. Enable it!\n", bPort + 1);
1398
1399 /* 4. Enable the port. */
1400 if (bPort == 0)
1401 {
1402 *R_USB_PORT1_DISABLE = IO_STATE(R_USB_PORT1_DISABLE, disable, no);
1403 }
1404 else
1405 {
1406 *R_USB_PORT2_DISABLE = IO_STATE(R_USB_PORT2_DISABLE, disable, no);
1407 }
1408
1409 /* 5. Wait for the port to be enabled again. */
1410 while (!( (bPort == 0) ?
1411 *R_USB_RH_PORT_STATUS_1 &
1412 IO_STATE(R_USB_RH_PORT_STATUS_1, connected, yes) :
1413 *R_USB_RH_PORT_STATUS_2 &
1414 IO_STATE(R_USB_RH_PORT_STATUS_2, connected, yes) ) ) {}
1415
1416 rh_dbg("Port %d is enabled.\n", bPort + 1);
1417
1418 /* 6. Reset the port */
1419 crisv10_ready_wait();
1420 *R_USB_COMMAND =
1421 ( (bPort == 0) ?
1422 IO_STATE(R_USB_COMMAND, port_sel, port1):
1423 IO_STATE(R_USB_COMMAND, port_sel, port2) ) |
1424 IO_STATE(R_USB_COMMAND, port_cmd, reset) |
1425 IO_STATE(R_USB_COMMAND, busy, no) |
1426 IO_STATE(R_USB_COMMAND, ctrl_cmd, nop);
1427 rh_dbg("Port %d is resetting.\n", bPort + 1);
1428
1429 /* 7. The USB specification says that we should wait for at least
1430 10ms for device recover */
1431 udelay(10500); /* 10,5ms blocking wait */
1432
1433 crisv10_ready_wait();
1434 }
1435 }
1436
1437
1438 /* Check if the USB controller needs to be restarted. */
1439 if (restart_controller)
1440 {
1441 /* 8. Wait for the USB controller entering started state. */
1442 while (!(*R_USB_STATUS & IO_STATE(R_USB_STATUS, started, yes))) {}
1443
1444 /* 9. Order the USB controller to running state. */
1445 crisv10_ready_wait();
1446 *R_USB_COMMAND =
1447 IO_STATE(R_USB_COMMAND, port_sel, nop) |
1448 IO_STATE(R_USB_COMMAND, port_cmd, reset) |
1449 IO_STATE(R_USB_COMMAND, busy, no) |
1450 IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run);
1451
1452 /* 10. Wait for the USB controller reaching running state. */
1453 while (!(*R_USB_STATUS & IO_STATE(R_USB_STATUS, running, yes))) {}
1454 }
1455
1456 /* 11. Clear any controller or port satus interrupts before enabling
1457 the interrupts. */
1458 {
1459 u16 dummy;
1460
1461 /* Clear the port status interrupt of the reset port. */
1462 if (bPort == 0)
1463 {
1464 rh_dbg("Clearing port 1 interrupts\n");
1465 dummy = *R_USB_RH_PORT_STATUS_1;
1466 }
1467 else
1468 {
1469 rh_dbg("Clearing port 2 interrupts\n");
1470 dummy = *R_USB_RH_PORT_STATUS_2;
1471 }
1472
1473 if (restart_controller)
1474 {
1475 /* The USB controller is restarted. Clear all interupts. */
1476 rh_dbg("Clearing all interrupts\n");
1477 dummy = *R_USB_STATUS;
1478 dummy = *R_USB_RH_PORT_STATUS_1;
1479 dummy = *R_USB_RH_PORT_STATUS_2;
1480 }
1481 }
1482
1483 /* 12. Enable USB controller status and USB port status interrupts. */
1484 *R_USB_IRQ_MASK_SET = IO_STATE(R_USB_IRQ_MASK_SET, ctl_status, set);
1485 __asm__ __volatile__ (" nop");
1486 *R_USB_IRQ_MASK_SET = IO_STATE(R_USB_IRQ_MASK_SET, port_status, set);
1487 __asm__ __volatile__ (" nop");
1488
1489 }
1490 else
1491 {
1492
1493 bUsbCommand |= IO_STATE(R_USB_COMMAND, port_cmd, reset);
1494 /* Select which port via the port_sel field */
1495 bUsbCommand |= IO_FIELD(R_USB_COMMAND, port_sel, bPort+1);
1496
1497 /* Make sure the controller isn't busy. */
1498 crisv10_ready_wait();
1499 /* Send out the actual command to the USB controller */
1500 *R_USB_COMMAND = bUsbCommand;
1501
1502 /* Wait a while for controller to first become started after port reset */
1503 udelay(12000); /* 12ms blocking wait */
1504
1505 /* Make sure the controller isn't busy. */
1506 crisv10_ready_wait();
1507
1508 /* If all enabled ports were disabled the host controller goes down into
1509 started mode, so we need to bring it back into the running state.
1510 (This is safe even if it's already in the running state.) */
1511 *R_USB_COMMAND =
1512 IO_STATE(R_USB_COMMAND, port_sel, nop) |
1513 IO_STATE(R_USB_COMMAND, port_cmd, reset) |
1514 IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run);
1515 }
1516
1517 break;
1518 case USB_PORT_FEAT_SUSPEND:
1519 rh_dbg("SetPortFeature: suspend\n");
1520 bUsbCommand |= IO_STATE(R_USB_COMMAND, port_cmd, suspend);
1521 goto set;
1522 break;
1523 case USB_PORT_FEAT_POWER:
1524 rh_dbg("SetPortFeature: power\n");
1525 break;
1526 case USB_PORT_FEAT_C_CONNECTION:
1527 rh_dbg("SetPortFeature: c_connection\n");
1528 break;
1529 case USB_PORT_FEAT_C_RESET:
1530 rh_dbg("SetPortFeature: c_reset\n");
1531 break;
1532 case USB_PORT_FEAT_C_OVER_CURRENT:
1533 rh_dbg("SetPortFeature: c_over_current\n");
1534 break;
1535
1536 set:
1537 /* Select which port via the port_sel field */
1538 bUsbCommand |= IO_FIELD(R_USB_COMMAND, port_sel, bPort+1);
1539
1540 /* Make sure the controller isn't busy. */
1541 crisv10_ready_wait();
1542 /* Send out the actual command to the USB controller */
1543 *R_USB_COMMAND = bUsbCommand;
1544 break;
1545 default:
1546 rh_dbg("SetPortFeature: unknown feature\n");
1547 return -1;
1548 }
1549 return 0;
1550 }
1551
1552 int rh_clear_port_feature(__u8 bPort, __u16 wFeature) {
1553 switch(wFeature) {
1554 case USB_PORT_FEAT_ENABLE:
1555 rh_dbg("ClearPortFeature: enable\n");
1556 rh_disable_port(bPort);
1557 break;
1558 case USB_PORT_FEAT_SUSPEND:
1559 rh_dbg("ClearPortFeature: suspend\n");
1560 break;
1561 case USB_PORT_FEAT_POWER:
1562 rh_dbg("ClearPortFeature: power\n");
1563 break;
1564
1565 case USB_PORT_FEAT_C_ENABLE:
1566 rh_dbg("ClearPortFeature: c_enable\n");
1567 goto clear;
1568 case USB_PORT_FEAT_C_SUSPEND:
1569 rh_dbg("ClearPortFeature: c_suspend\n");
1570 goto clear;
1571 case USB_PORT_FEAT_C_CONNECTION:
1572 rh_dbg("ClearPortFeature: c_connection\n");
1573 goto clear;
1574 case USB_PORT_FEAT_C_OVER_CURRENT:
1575 rh_dbg("ClearPortFeature: c_over_current\n");
1576 goto clear;
1577 case USB_PORT_FEAT_C_RESET:
1578 rh_dbg("ClearPortFeature: c_reset\n");
1579 goto clear;
1580 clear:
1581 rh.wPortChange[bPort] &= ~(1 << (wFeature - 16));
1582 break;
1583 default:
1584 rh_dbg("ClearPortFeature: unknown feature\n");
1585 return -1;
1586 }
1587 return 0;
1588 }
1589
1590
1591 #ifdef CONFIG_PM
1592 /* Handle a suspend request for the root hub (called from hcd_driver) */
1593 static int rh_suspend_request(struct usb_hcd *hcd)
1594 {
1595 return 0; /* no-op for now */
1596 }
1597
1598 /* Handle a resume request for the root hub (called from hcd_driver) */
1599 static int rh_resume_request(struct usb_hcd *hcd)
1600 {
1601 return 0; /* no-op for now */
1602 }
1603 #endif /* CONFIG_PM */
1604
1605
1606
1607 /* Wrapper function for workaround port disable registers in USB controller */
1608 static void rh_disable_port(unsigned int port) {
1609 volatile int timeout = 10000;
1610 volatile char* usb_portx_disable;
1611 switch(port) {
1612 case 0:
1613 usb_portx_disable = R_USB_PORT1_DISABLE;
1614 break;
1615 case 1:
1616 usb_portx_disable = R_USB_PORT2_DISABLE;
1617 break;
1618 default:
1619 /* Invalid port index */
1620 return;
1621 }
1622 /* Set disable flag in special register */
1623 *usb_portx_disable = IO_STATE(R_USB_PORT1_DISABLE, disable, yes);
1624 /* Wait until not enabled anymore */
1625 while((rh.wPortStatusPrev[port] &
1626 IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes)) &&
1627 (timeout-- > 0));
1628
1629 /* clear disable flag in special register */
1630 *usb_portx_disable = IO_STATE(R_USB_PORT1_DISABLE, disable, no);
1631 rh_info("Physical port %d disabled\n", port+1);
1632 }
1633
1634
1635 /******************************************************************/
1636 /* Transfer Controller (TC) functions */
1637 /******************************************************************/
1638
1639 /* FIXME: Should RX_BUF_SIZE be a config option, or maybe we should adjust it
1640 dynamically?
1641 To adjust it dynamically we would have to get an interrupt when we reach
1642 the end of the rx descriptor list, or when we get close to the end, and
1643 then allocate more descriptors. */
1644 #define NBR_OF_RX_DESC 512
1645 #define RX_DESC_BUF_SIZE 1024
1646 #define RX_BUF_SIZE (NBR_OF_RX_DESC * RX_DESC_BUF_SIZE)
1647
1648
1649 /* Local variables for Transfer Controller */
1650 /* --------------------------------------- */
1651
1652 /* This is a circular (double-linked) list of the active urbs for each epid.
1653 The head is never removed, and new urbs are linked onto the list as
1654 urb_entry_t elements. Don't reference urb_list directly; use the wrapper
1655 functions instead (which includes spin_locks) */
1656 static struct list_head urb_list[NBR_OF_EPIDS];
1657
1658 /* Read about the need and usage of this lock in submit_ctrl_urb. */
1659 /* Lock for URB lists for each EPID */
1660 static spinlock_t urb_list_lock;
1661
1662 /* Lock for EPID array register (R_USB_EPT_x) in Etrax */
1663 static spinlock_t etrax_epid_lock;
1664
1665 /* Lock for dma8 sub0 handling */
1666 static spinlock_t etrax_dma8_sub0_lock;
1667
1668 /* DMA IN cache bug. Align the DMA IN buffers to 32 bytes, i.e. a cache line.
1669 Since RX_DESC_BUF_SIZE is 1024 is a multiple of 32, all rx buffers will be
1670 cache aligned. */
1671 static volatile unsigned char RxBuf[RX_BUF_SIZE] __attribute__ ((aligned (32)));
1672 static volatile struct USB_IN_Desc RxDescList[NBR_OF_RX_DESC] __attribute__ ((aligned (4)));
1673
1674 /* Pointers into RxDescList. */
1675 static volatile struct USB_IN_Desc *myNextRxDesc;
1676 static volatile struct USB_IN_Desc *myLastRxDesc;
1677
1678 /* A zout transfer makes a memory access at the address of its buf pointer,
1679 which means that setting this buf pointer to 0 will cause an access to the
1680 flash. In addition to this, setting sw_len to 0 results in a 16/32 bytes
1681 (depending on DMA burst size) transfer.
1682 Instead, we set it to 1, and point it to this buffer. */
1683 static int zout_buffer[4] __attribute__ ((aligned (4)));
1684
1685 /* Cache for allocating new EP and SB descriptors. */
1686 static struct kmem_cache *usb_desc_cache;
1687
1688 /* Cache for the data allocated in the isoc descr top half. */
1689 static struct kmem_cache *isoc_compl_cache;
1690
1691 /* Cache for the data allocated when delayed finishing of URBs */
1692 static struct kmem_cache *later_data_cache;
1693
1694
1695 /* Counter to keep track of how many Isoc EP we have sat up. Used to enable
1696 and disable iso_eof interrupt. We only need these interrupts when we have
1697 Isoc data endpoints (consumes CPU cycles).
1698 FIXME: This could be more fine granular, so this interrupt is only enabled
1699 when we have a In Isoc URB not URB_ISO_ASAP flaged queued. */
1700 static int isoc_epid_counter;
1701
1702 /* Protecting wrapper functions for R_USB_EPT_x */
1703 /* -------------------------------------------- */
1704 static inline void etrax_epid_set(__u8 index, __u32 data) {
1705 unsigned long flags;
1706 spin_lock_irqsave(&etrax_epid_lock, flags);
1707 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
1708 nop();
1709 *R_USB_EPT_DATA = data;
1710 spin_unlock_irqrestore(&etrax_epid_lock, flags);
1711 }
1712
1713 static inline void etrax_epid_clear_error(__u8 index) {
1714 unsigned long flags;
1715 spin_lock_irqsave(&etrax_epid_lock, flags);
1716 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
1717 nop();
1718 *R_USB_EPT_DATA &=
1719 ~(IO_MASK(R_USB_EPT_DATA, error_count_in) |
1720 IO_MASK(R_USB_EPT_DATA, error_count_out) |
1721 IO_MASK(R_USB_EPT_DATA, error_code));
1722 spin_unlock_irqrestore(&etrax_epid_lock, flags);
1723 }
1724
1725 static inline void etrax_epid_set_toggle(__u8 index, __u8 dirout,
1726 __u8 toggle) {
1727 unsigned long flags;
1728 spin_lock_irqsave(&etrax_epid_lock, flags);
1729 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
1730 nop();
1731 if(dirout) {
1732 *R_USB_EPT_DATA &= ~IO_MASK(R_USB_EPT_DATA, t_out);
1733 *R_USB_EPT_DATA |= IO_FIELD(R_USB_EPT_DATA, t_out, toggle);
1734 } else {
1735 *R_USB_EPT_DATA &= ~IO_MASK(R_USB_EPT_DATA, t_in);
1736 *R_USB_EPT_DATA |= IO_FIELD(R_USB_EPT_DATA, t_in, toggle);
1737 }
1738 spin_unlock_irqrestore(&etrax_epid_lock, flags);
1739 }
1740
1741 static inline __u8 etrax_epid_get_toggle(__u8 index, __u8 dirout) {
1742 unsigned long flags;
1743 __u8 toggle;
1744 spin_lock_irqsave(&etrax_epid_lock, flags);
1745 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
1746 nop();
1747 if (dirout) {
1748 toggle = IO_EXTRACT(R_USB_EPT_DATA, t_out, *R_USB_EPT_DATA);
1749 } else {
1750 toggle = IO_EXTRACT(R_USB_EPT_DATA, t_in, *R_USB_EPT_DATA);
1751 }
1752 spin_unlock_irqrestore(&etrax_epid_lock, flags);
1753 return toggle;
1754 }
1755
1756
1757 static inline __u32 etrax_epid_get(__u8 index) {
1758 unsigned long flags;
1759 __u32 data;
1760 spin_lock_irqsave(&etrax_epid_lock, flags);
1761 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
1762 nop();
1763 data = *R_USB_EPT_DATA;
1764 spin_unlock_irqrestore(&etrax_epid_lock, flags);
1765 return data;
1766 }
1767
1768
1769
1770
1771 /* Main functions for Transfer Controller */
1772 /* -------------------------------------- */
1773
1774 /* Init structs, memories and lists used by Transfer Controller */
1775 int tc_init(struct usb_hcd *hcd) {
1776 int i;
1777 /* Clear software state info for all epids */
1778 memset(epid_state, 0, sizeof(struct etrax_epid) * NBR_OF_EPIDS);
1779
1780 /* Set Invalid and Dummy as being in use and disabled */
1781 epid_state[INVALID_EPID].inuse = 1;
1782 epid_state[DUMMY_EPID].inuse = 1;
1783 epid_state[INVALID_EPID].disabled = 1;
1784 epid_state[DUMMY_EPID].disabled = 1;
1785
1786 /* Clear counter for how many Isoc epids we have sat up */
1787 isoc_epid_counter = 0;
1788
1789 /* Initialize the urb list by initiating a head for each list.
1790 Also reset list hodling active URB for each epid */
1791 for (i = 0; i < NBR_OF_EPIDS; i++) {
1792 INIT_LIST_HEAD(&urb_list[i]);
1793 activeUrbList[i] = NULL;
1794 }
1795
1796 /* Init lock for URB lists */
1797 spin_lock_init(&urb_list_lock);
1798 /* Init lock for Etrax R_USB_EPT register */
1799 spin_lock_init(&etrax_epid_lock);
1800 /* Init lock for Etrax dma8 sub0 handling */
1801 spin_lock_init(&etrax_dma8_sub0_lock);
1802
1803 /* We use kmem_cache_* to make sure that all DMA desc. are dword aligned */
1804
1805 /* Note that we specify sizeof(struct USB_EP_Desc) as the size, but also
1806 allocate SB descriptors from this cache. This is ok since
1807 sizeof(struct USB_EP_Desc) == sizeof(struct USB_SB_Desc). */
1808 usb_desc_cache = kmem_cache_create("usb_desc_cache",
1809 sizeof(struct USB_EP_Desc), 0,
1810 SLAB_HWCACHE_ALIGN, 0);
1811 if(usb_desc_cache == NULL) {
1812 return -ENOMEM;
1813 }
1814
1815 /* Create slab cache for speedy allocation of memory for isoc bottom-half
1816 interrupt handling */
1817 isoc_compl_cache =
1818 kmem_cache_create("isoc_compl_cache",
1819 sizeof(struct crisv10_isoc_complete_data),
1820 0, SLAB_HWCACHE_ALIGN, 0);
1821 if(isoc_compl_cache == NULL) {
1822 return -ENOMEM;
1823 }
1824
1825 /* Create slab cache for speedy allocation of memory for later URB finish
1826 struct */
1827 later_data_cache =
1828 kmem_cache_create("later_data_cache",
1829 sizeof(struct urb_later_data),
1830 0, SLAB_HWCACHE_ALIGN, 0);
1831 if(later_data_cache == NULL) {
1832 return -ENOMEM;
1833 }
1834
1835
1836 /* Initiate the bulk start timer. */
1837 init_timer(&bulk_start_timer);
1838 bulk_start_timer.expires = jiffies + BULK_START_TIMER_INTERVAL;
1839 bulk_start_timer.function = tc_bulk_start_timer_func;
1840 add_timer(&bulk_start_timer);
1841
1842
1843 /* Initiate the bulk eot timer. */
1844 init_timer(&bulk_eot_timer);
1845 bulk_eot_timer.expires = jiffies + BULK_EOT_TIMER_INTERVAL;
1846 bulk_eot_timer.function = tc_bulk_eot_timer_func;
1847 bulk_eot_timer.data = (unsigned long)hcd;
1848 add_timer(&bulk_eot_timer);
1849
1850 return 0;
1851 }
1852
1853 /* Uninitialize all resources used by Transfer Controller */
1854 void tc_destroy(void) {
1855
1856 /* Destroy all slab cache */
1857 kmem_cache_destroy(usb_desc_cache);
1858 kmem_cache_destroy(isoc_compl_cache);
1859 kmem_cache_destroy(later_data_cache);
1860
1861 /* Remove timers */
1862 del_timer(&bulk_start_timer);
1863 del_timer(&bulk_eot_timer);
1864 }
1865
1866 static void restart_dma8_sub0(void) {
1867 unsigned long flags;
1868 spin_lock_irqsave(&etrax_dma8_sub0_lock, flags);
1869 /* Verify that the dma is not running */
1870 if ((*R_DMA_CH8_SUB0_CMD & IO_MASK(R_DMA_CH8_SUB0_CMD, cmd)) == 0) {
1871 struct USB_EP_Desc *ep = (struct USB_EP_Desc *)phys_to_virt(*R_DMA_CH8_SUB0_EP);
1872 while (DUMMY_EPID == IO_EXTRACT(USB_EP_command, epid, ep->command)) {
1873 ep = (struct USB_EP_Desc *)phys_to_virt(ep->next);
1874 }
1875 /* Advance the DMA to the next EP descriptor that is not a DUMMY_EPID. */
1876 *R_DMA_CH8_SUB0_EP = virt_to_phys(ep);
1877 /* Restart the DMA */
1878 *R_DMA_CH8_SUB0_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start);
1879 }
1880 spin_unlock_irqrestore(&etrax_dma8_sub0_lock, flags);
1881 }
1882
1883 /* queue an URB with the transfer controller (called from hcd_driver) */
1884 static int tc_urb_enqueue(struct usb_hcd *hcd,
1885 struct urb *urb,
1886 gfp_t mem_flags) {
1887 int epid;
1888 int retval;
1889 int bustime = 0;
1890 int maxpacket;
1891 unsigned long flags;
1892 struct crisv10_urb_priv *urb_priv;
1893 struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(hcd);
1894 DBFENTER;
1895
1896 if(!(crisv10_hcd->running)) {
1897 /* The USB Controller is not running, probably because no device is
1898 attached. No idea to enqueue URBs then */
1899 tc_warn("Rejected enqueueing of URB:0x%x because no dev attached\n",
1900 (unsigned int)urb);
1901 return -ENOENT;
1902 }
1903
1904 maxpacket = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
1905 /* Special case check for In Isoc transfers. Specification states that each
1906 In Isoc transfer consists of one packet and therefore it should fit into
1907 the transfer-buffer of an URB.
1908 We do the check here to be sure (an invalid scenario can be produced with
1909 parameters to the usbtest suite) */
1910 if(usb_pipeisoc(urb->pipe) && usb_pipein(urb->pipe) &&
1911 (urb->transfer_buffer_length < maxpacket)) {
1912 tc_err("Submit In Isoc URB with buffer length:%d to pipe with maxpacketlen: %d\n", urb->transfer_buffer_length, maxpacket);
1913 return -EMSGSIZE;
1914 }
1915
1916 /* Check if there is a epid for URBs destination, if not this function
1917 set up one. */
1918 epid = tc_setup_epid(urb->ep, urb, mem_flags);
1919 if (epid < 0) {
1920 tc_err("Failed setup epid:%d for URB:0x%x\n", epid, (unsigned int)urb);
1921 DBFEXIT;
1922 return -ENOMEM;
1923 }
1924
1925 if(urb == activeUrbList[epid]) {
1926 tc_err("Resubmition of allready active URB:0x%x\n", (unsigned int)urb);
1927 return -ENXIO;
1928 }
1929
1930 if(urb_list_entry(urb, epid)) {
1931 tc_err("Resubmition of allready queued URB:0x%x\n", (unsigned int)urb);
1932 return -ENXIO;
1933 }
1934
1935 /* If we actively have flaged endpoint as disabled then refuse submition */
1936 if(epid_state[epid].disabled) {
1937 return -ENOENT;
1938 }
1939
1940 /* Allocate and init HC-private data for URB */
1941 if(urb_priv_create(hcd, urb, epid, mem_flags) != 0) {
1942 DBFEXIT;
1943 return -ENOMEM;
1944 }
1945 urb_priv = urb->hcpriv;
1946
1947 /* Check if there is enough bandwidth for periodic transfer */
1948 if(usb_pipeint(urb->pipe) || usb_pipeisoc(urb->pipe)) {
1949 /* only check (and later claim) if not already claimed */
1950 if (urb_priv->bandwidth == 0) {
1951 bustime = crisv10_usb_check_bandwidth(urb->dev, urb);
1952 if (bustime < 0) {
1953 tc_err("Not enough periodic bandwidth\n");
1954 urb_priv_free(hcd, urb);
1955 DBFEXIT;
1956 return -ENOSPC;
1957 }
1958 }
1959 }
1960
1961 tc_dbg("Enqueue URB:0x%x[%d] epid:%d (%s) bufflen:%d\n",
1962 (unsigned int)urb, urb_priv->urb_num, epid,
1963 pipe_to_str(urb->pipe), urb->transfer_buffer_length);
1964
1965 /* Create and link SBs required for this URB */
1966 retval = create_sb_for_urb(urb, mem_flags);
1967 if(retval != 0) {
1968 tc_err("Failed to create SBs for URB:0x%x[%d]\n", (unsigned int)urb,
1969 urb_priv->urb_num);
1970 urb_priv_free(hcd, urb);
1971 DBFEXIT;
1972 return retval;
1973 }
1974
1975 /* Init intr EP pool if this URB is a INTR transfer. This pool is later
1976 used when inserting EPs in the TxIntrEPList. We do the alloc here
1977 so we can't run out of memory later */
1978 if(usb_pipeint(urb->pipe)) {
1979 retval = init_intr_urb(urb, mem_flags);
1980 if(retval != 0) {
1981 tc_warn("Failed to init Intr URB\n");
1982 urb_priv_free(hcd, urb);
1983 DBFEXIT;
1984 return retval;
1985 }
1986 }
1987
1988 /* Disable other access when inserting USB */
1989 local_irq_save(flags);
1990
1991 /* Claim bandwidth, if needed */
1992 if(bustime) {
1993 crisv10_usb_claim_bandwidth(urb->dev,
1994 urb,
1995 bustime,
1996 (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS));
1997 }
1998
1999 /* Add URB to EP queue */
2000 urb_list_add(urb, epid, mem_flags);
2001
2002 if(usb_pipeisoc(urb->pipe)) {
2003 /* Special processing of Isoc URBs. */
2004 tc_dma_process_isoc_urb(urb);
2005 } else {
2006 /* Process EP queue for rest of the URB types (Bulk, Ctrl, Intr) */
2007 tc_dma_process_queue(epid);
2008 }
2009
2010 local_irq_restore(flags);
2011
2012 DBFEXIT;
2013 return 0;
2014 }
2015
2016 /* remove an URB from the transfer controller queues (called from hcd_driver)*/
2017 static int tc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) {
2018 struct crisv10_urb_priv *urb_priv;
2019 unsigned long flags;
2020 int epid;
2021
2022 DBFENTER;
2023 /* Disable interrupts here since a descriptor interrupt for the isoc epid
2024 will modify the sb list. This could possibly be done more granular, but
2025 urb_dequeue should not be used frequently anyway.
2026 */
2027 local_irq_save(flags);
2028
2029 urb->status = status;
2030 urb_priv = urb->hcpriv;
2031
2032 if (!urb_priv) {
2033 /* This happens if a device driver calls unlink on an urb that
2034 was never submitted (lazy driver) or if the urb was completed
2035 while dequeue was being called. */
2036 tc_warn("Dequeing of not enqueued URB:0x%x\n", (unsigned int)urb);
2037 local_irq_restore(flags);
2038 return 0;
2039 }
2040 epid = urb_priv->epid;
2041
2042 tc_warn("Dequeing %s URB:0x%x[%d] (%s %s epid:%d) status:%d %s\n",
2043 (urb == activeUrbList[epid]) ? "active" : "queued",
2044 (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
2045 str_type(urb->pipe), epid, urb->status,
2046 (urb_priv->later_data) ? "later-sched" : "");
2047
2048 /* For Bulk, Ctrl and Intr are only one URB active at a time. So any URB
2049 that isn't active can be dequeued by just removing it from the queue */
2050 if(usb_pipebulk(urb->pipe) || usb_pipecontrol(urb->pipe) ||
2051 usb_pipeint(urb->pipe)) {
2052
2053 /* Check if URB haven't gone further than the queue */
2054 if(urb != activeUrbList[epid]) {
2055 ASSERT(urb_priv->later_data == NULL);
2056 tc_warn("Dequeing URB:0x%x[%d] (%s %s epid:%d) from queue"
2057 " (not active)\n", (unsigned int)urb, urb_priv->urb_num,
2058 str_dir(urb->pipe), str_type(urb->pipe), epid);
2059
2060 /* Finish the URB with error status from USB core */
2061 tc_finish_urb(hcd, urb, urb->status);
2062 local_irq_restore(flags);
2063 return 0;
2064 }
2065 }
2066
2067 /* Set URB status to Unlink for handling when interrupt comes. */
2068 urb_priv->urb_state = UNLINK;
2069
2070 /* Differentiate dequeing of Bulk and Ctrl from Isoc and Intr */
2071 switch(usb_pipetype(urb->pipe)) {
2072 case PIPE_BULK:
2073 /* Check if EP still is enabled */
2074 if (TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
2075 /* The EP was enabled, disable it. */
2076 TxBulkEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
2077 }
2078 /* Kicking dummy list out of the party. */
2079 TxBulkEPList[epid].next = virt_to_phys(&TxBulkEPList[(epid + 1) % NBR_OF_EPIDS]);
2080 break;
2081 case PIPE_CONTROL:
2082 /* Check if EP still is enabled */
2083 if (TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
2084 /* The EP was enabled, disable it. */
2085 TxCtrlEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
2086 }
2087 break;
2088 case PIPE_ISOCHRONOUS:
2089 /* Disabling, busy-wait and unlinking of Isoc SBs will be done in
2090 finish_isoc_urb(). Because there might the case when URB is dequeued
2091 but there are other valid URBs waiting */
2092
2093 /* Check if In Isoc EP still is enabled */
2094 if (TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
2095 /* The EP was enabled, disable it. */
2096 TxIsocEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
2097 }
2098 break;
2099 case PIPE_INTERRUPT:
2100 /* Special care is taken for interrupt URBs. EPs are unlinked in
2101 tc_finish_urb */
2102 break;
2103 default:
2104 break;
2105 }
2106
2107 /* Asynchronous unlink, finish the URB later from scheduled or other
2108 event (data finished, error) */
2109 tc_finish_urb_later(hcd, urb, urb->status);
2110
2111 local_irq_restore(flags);
2112 DBFEXIT;
2113 return 0;
2114 }
2115
2116
2117 static void tc_sync_finish_epid(struct usb_hcd *hcd, int epid) {
2118 volatile int timeout = 10000;
2119 struct urb* urb;
2120 struct crisv10_urb_priv* urb_priv;
2121 unsigned long flags;
2122
2123 volatile struct USB_EP_Desc *first_ep; /* First EP in the list. */
2124 volatile struct USB_EP_Desc *curr_ep; /* Current EP, the iterator. */
2125 volatile struct USB_EP_Desc *next_ep; /* The EP after current. */
2126
2127 int type = epid_state[epid].type;
2128
2129 /* Setting this flag will cause enqueue() to return -ENOENT for new
2130 submitions on this endpoint and finish_urb() wont process queue further */
2131 epid_state[epid].disabled = 1;
2132
2133 switch(type) {
2134 case PIPE_BULK:
2135 /* Check if EP still is enabled */
2136 if (TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
2137 /* The EP was enabled, disable it. */
2138 TxBulkEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
2139 tc_warn("sync_finish: Disabling EP for epid:%d\n", epid);
2140
2141 /* Do busy-wait until DMA not using this EP descriptor anymore */
2142 while((*R_DMA_CH8_SUB0_EP ==
2143 virt_to_phys(&TxBulkEPList[epid])) &&
2144 (timeout-- > 0));
2145
2146 }
2147 break;
2148
2149 case PIPE_CONTROL:
2150 /* Check if EP still is enabled */
2151 if (TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
2152 /* The EP was enabled, disable it. */
2153 TxCtrlEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
2154 tc_warn("sync_finish: Disabling EP for epid:%d\n", epid);
2155
2156 /* Do busy-wait until DMA not using this EP descriptor anymore */
2157 while((*R_DMA_CH8_SUB1_EP ==
2158 virt_to_phys(&TxCtrlEPList[epid])) &&
2159 (timeout-- > 0));
2160 }
2161 break;
2162
2163 case PIPE_INTERRUPT:
2164 local_irq_save(flags);
2165 /* Disable all Intr EPs belonging to epid */
2166 first_ep = &TxIntrEPList[0];
2167 curr_ep = first_ep;
2168 do {
2169 next_ep = (struct USB_EP_Desc *)phys_to_virt(curr_ep->next);
2170 if (IO_EXTRACT(USB_EP_command, epid, next_ep->command) == epid) {
2171 /* Disable EP */
2172 next_ep->command &= ~IO_MASK(USB_EP_command, enable);
2173 }
2174 curr_ep = phys_to_virt(curr_ep->next);
2175 } while (curr_ep != first_ep);
2176
2177 local_irq_restore(flags);
2178 break;
2179
2180 case PIPE_ISOCHRONOUS:
2181 /* Check if EP still is enabled */
2182 if (TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
2183 tc_warn("sync_finish: Disabling Isoc EP for epid:%d\n", epid);
2184 /* The EP was enabled, disable it. */
2185 TxIsocEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
2186
2187 while((*R_DMA_CH8_SUB3_EP == virt_to_phys(&TxIsocEPList[epid])) &&
2188 (timeout-- > 0));
2189 }
2190 break;
2191 }
2192
2193 local_irq_save(flags);
2194
2195 /* Finish if there is active URB for this endpoint */
2196 if(activeUrbList[epid] != NULL) {
2197 urb = activeUrbList[epid];
2198 urb_priv = urb->hcpriv;
2199 ASSERT(urb_priv);
2200 tc_warn("Sync finish %s URB:0x%x[%d] (%s %s epid:%d) status:%d %s\n",
2201 (urb == activeUrbList[epid]) ? "active" : "queued",
2202 (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
2203 str_type(urb->pipe), epid, urb->status,
2204 (urb_priv->later_data) ? "later-sched" : "");
2205
2206 tc_finish_urb(hcd, activeUrbList[epid], -ENOENT);
2207 ASSERT(activeUrbList[epid] == NULL);
2208 }
2209
2210 /* Finish any queued URBs for this endpoint. There won't be any resubmitions
2211 because epid_disabled causes enqueue() to fail for this endpoint */
2212 while((urb = urb_list_first(epid)) != NULL) {
2213 urb_priv = urb->hcpriv;
2214 ASSERT(urb_priv);
2215
2216 tc_warn("Sync finish %s URB:0x%x[%d] (%s %s epid:%d) status:%d %s\n",
2217 (urb == activeUrbList[epid]) ? "active" : "queued",
2218 (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
2219 str_type(urb->pipe), epid, urb->status,
2220 (urb_priv->later_data) ? "later-sched" : "");
2221
2222 tc_finish_urb(hcd, urb, -ENOENT);
2223 }
2224 epid_state[epid].disabled = 0;
2225 local_irq_restore(flags);
2226 }
2227
2228 /* free resources associated with an endpoint (called from hcd_driver) */
2229 static void tc_endpoint_disable(struct usb_hcd *hcd,
2230 struct usb_host_endpoint *ep) {
2231 DBFENTER;
2232 /* Only free epid if it has been allocated. We get two endpoint_disable
2233 requests for ctrl endpoints so ignore the second one */
2234 if(ep->hcpriv != NULL) {
2235 struct crisv10_ep_priv *ep_priv = ep->hcpriv;
2236 int epid = ep_priv->epid;
2237 tc_warn("endpoint_disable ep:0x%x ep-priv:0x%x (%s) (epid:%d freed)\n",
2238 (unsigned int)ep, (unsigned int)ep->hcpriv,
2239 endpoint_to_str(&(ep->desc)), epid);
2240
2241 tc_sync_finish_epid(hcd, epid);
2242
2243 ASSERT(activeUrbList[epid] == NULL);
2244 ASSERT(list_empty(&urb_list[epid]));
2245
2246 tc_free_epid(ep);
2247 } else {
2248 tc_dbg("endpoint_disable ep:0x%x ep-priv:0x%x (%s)\n", (unsigned int)ep,
2249 (unsigned int)ep->hcpriv, endpoint_to_str(&(ep->desc)));
2250 }
2251 DBFEXIT;
2252 }
2253
2254 static void tc_finish_urb_later_proc(struct work_struct* work) {
2255 unsigned long flags;
2256 struct urb_later_data* uld;
2257
2258 local_irq_save(flags);
2259 uld = container_of(work, struct urb_later_data, dws.work);
2260 if(uld->urb == NULL) {
2261 late_dbg("Later finish of URB = NULL (allready finished)\n");
2262 } else {
2263 struct crisv10_urb_priv* urb_priv = uld->urb->hcpriv;
2264 ASSERT(urb_priv);
2265 if(urb_priv->urb_num == uld->urb_num) {
2266 late_dbg("Later finish of URB:0x%x[%d]\n", (unsigned int)(uld->urb),
2267 urb_priv->urb_num);
2268 if(uld->status != uld->urb->status) {
2269 errno_dbg("Later-finish URB with status:%d, later-status:%d\n",
2270 uld->urb->status, uld->status);
2271 }
2272 if(uld != urb_priv->later_data) {
2273 panic("Scheduled uld not same as URBs uld\n");
2274 }
2275 tc_finish_urb(uld->hcd, uld->urb, uld->status);
2276 } else {
2277 late_warn("Ignoring later finish of URB:0x%x[%d]"
2278 ", urb_num doesn't match current URB:0x%x[%d]",
2279 (unsigned int)(uld->urb), uld->urb_num,
2280 (unsigned int)(uld->urb), urb_priv->urb_num);
2281 }
2282 }
2283 local_irq_restore(flags);
2284 kmem_cache_free(later_data_cache, uld);
2285 }
2286
2287 static void tc_finish_urb_later(struct usb_hcd *hcd, struct urb *urb,
2288 int status) {
2289 struct crisv10_urb_priv *urb_priv = urb->hcpriv;
2290 struct urb_later_data* uld;
2291
2292 ASSERT(urb_priv);
2293
2294 if(urb_priv->later_data != NULL) {
2295 /* Later-finish allready scheduled for this URB, just update status to
2296 return when finishing later */
2297 errno_dbg("Later-finish schedule change URB status:%d with new"
2298 " status:%d\n", urb_priv->later_data->status, status);
2299
2300 urb_priv->later_data->status = status;
2301 return;
2302 }
2303
2304 uld = kmem_cache_alloc(later_data_cache, GFP_ATOMIC);
2305 ASSERT(uld);
2306
2307 uld->hcd = hcd;
2308 uld->urb = urb;
2309 uld->urb_num = urb_priv->urb_num;
2310 uld->status = status;
2311
2312 INIT_DELAYED_WORK(&uld->dws, tc_finish_urb_later_proc);
2313 urb_priv->later_data = uld;
2314
2315 /* Schedule the finishing of the URB to happen later */
2316 schedule_delayed_work(&uld->dws, LATER_TIMER_DELAY);
2317 }
2318
2319 static void tc_finish_isoc_urb(struct usb_hcd *hcd, struct urb *urb,
2320 int status);
2321
2322 static void tc_finish_urb(struct usb_hcd *hcd, struct urb *urb, int status) {
2323 struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(hcd);
2324 struct crisv10_urb_priv *urb_priv = urb->hcpriv;
2325 int epid;
2326 char toggle;
2327 int urb_num;
2328
2329 DBFENTER;
2330 ASSERT(urb_priv != NULL);
2331 epid = urb_priv->epid;
2332 urb_num = urb_priv->urb_num;
2333
2334 if(urb != activeUrbList[epid]) {
2335 if(urb_list_entry(urb, epid)) {
2336 /* Remove this URB from the list. Only happens when URB are finished
2337 before having been processed (dequeing) */
2338 urb_list_del(urb, epid);
2339 } else {
2340 tc_warn("Finishing of URB:0x%x[%d] neither active or in queue for"
2341 " epid:%d\n", (unsigned int)urb, urb_num, epid);
2342 }
2343 }
2344
2345 /* Cancel any pending later-finish of this URB */
2346 if(urb_priv->later_data) {
2347 urb_priv->later_data->urb = NULL;
2348 }
2349
2350 /* For an IN pipe, we always set the actual length, regardless of whether
2351 there was an error or not (which means the device driver can use the data
2352 if it wants to). */
2353 if(usb_pipein(urb->pipe)) {
2354 urb->actual_length = urb_priv->rx_offset;
2355 } else {
2356 /* Set actual_length for OUT urbs also; the USB mass storage driver seems
2357 to want that. */
2358 if (status == 0 && urb->status == -EINPROGRESS) {
2359 urb->actual_length = urb->transfer_buffer_length;
2360 } else {
2361 /* We wouldn't know of any partial writes if there was an error. */
2362 urb->actual_length = 0;
2363 }
2364 }
2365
2366
2367 /* URB status mangling */
2368 if(urb->status == -EINPROGRESS) {
2369 /* The USB core hasn't changed the status, let's set our finish status */
2370 urb->status = status;
2371
2372 if ((status == 0) && (urb->transfer_flags & URB_SHORT_NOT_OK) &&
2373 usb_pipein(urb->pipe) &&
2374 (urb->actual_length != urb->transfer_buffer_length)) {
2375 /* URB_SHORT_NOT_OK means that short reads (shorter than the endpoint's
2376 max length) is to be treated as an error. */
2377 errno_dbg("Finishing URB:0x%x[%d] with SHORT_NOT_OK flag and short"
2378 " data:%d\n", (unsigned int)urb, urb_num,
2379 urb->actual_length);
2380 urb->status = -EREMOTEIO;
2381 }
2382
2383 if(urb_priv->urb_state == UNLINK) {
2384 /* URB has been requested to be unlinked asynchronously */
2385 urb->status = -ECONNRESET;
2386 errno_dbg("Fixing unlink status of URB:0x%x[%d] to:%d\n",
2387 (unsigned int)urb, urb_num, urb->status);
2388 }
2389 } else {
2390 /* The USB Core wants to signal some error via the URB, pass it through */
2391 }
2392
2393 /* use completely different finish function for Isoc URBs */
2394 if(usb_pipeisoc(urb->pipe)) {
2395 tc_finish_isoc_urb(hcd, urb, status);
2396 return;
2397 }
2398
2399 /* Do special unlinking of EPs for Intr traffic */
2400 if(usb_pipeint(urb->pipe)) {
2401 tc_dma_unlink_intr_urb(urb);
2402 }
2403
2404 /* Release allocated bandwidth for periodic transfers */
2405 if(usb_pipeint(urb->pipe) || usb_pipeisoc(urb->pipe))
2406 crisv10_usb_release_bandwidth(hcd,
2407 usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS,
2408 urb_priv->bandwidth);
2409
2410 /* This URB is active on EP */
2411 if(urb == activeUrbList[epid]) {
2412 /* We need to fiddle with the toggle bits because the hardware doesn't do
2413 it for us. */
2414 toggle = etrax_epid_get_toggle(epid, usb_pipeout(urb->pipe));
2415 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
2416 usb_pipeout(urb->pipe), toggle);
2417
2418 /* Checks for Ctrl and Bulk EPs */
2419 switch(usb_pipetype(urb->pipe)) {
2420 case PIPE_BULK:
2421 /* Check so Bulk EP realy is disabled before finishing active URB */
2422 ASSERT((TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) ==
2423 IO_STATE(USB_EP_command, enable, no));
2424 /* Disable sub-pointer for EP to avoid next tx_interrupt() to
2425 process Bulk EP. */
2426 TxBulkEPList[epid].sub = 0;
2427 /* No need to wait for the DMA before changing the next pointer.
2428 The modulo NBR_OF_EPIDS isn't actually necessary, since we will never use
2429 the last one (INVALID_EPID) for actual traffic. */
2430 TxBulkEPList[epid].next =
2431 virt_to_phys(&TxBulkEPList[(epid + 1) % NBR_OF_EPIDS]);
2432 break;
2433 case PIPE_CONTROL:
2434 /* Check so Ctrl EP realy is disabled before finishing active URB */
2435 ASSERT((TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) ==
2436 IO_STATE(USB_EP_command, enable, no));
2437 /* Disable sub-pointer for EP to avoid next tx_interrupt() to
2438 process Ctrl EP. */
2439 TxCtrlEPList[epid].sub = 0;
2440 break;
2441 }
2442 }
2443
2444 /* Free HC-private URB data*/
2445 urb_priv_free(hcd, urb);
2446
2447 if(urb->status) {
2448 errno_dbg("finish_urb (URB:0x%x[%d] %s %s) (data:%d) status:%d\n",
2449 (unsigned int)urb, urb_num, str_dir(urb->pipe),
2450 str_type(urb->pipe), urb->actual_length, urb->status);
2451 } else {
2452 tc_dbg("finish_urb (URB:0x%x[%d] %s %s) (data:%d) status:%d\n",
2453 (unsigned int)urb, urb_num, str_dir(urb->pipe),
2454 str_type(urb->pipe), urb->actual_length, urb->status);
2455 }
2456
2457 /* If we just finished an active URB, clear active pointer. */
2458 if (urb == activeUrbList[epid]) {
2459 /* Make URB not active on EP anymore */
2460 activeUrbList[epid] = NULL;
2461
2462 if(urb->status == 0) {
2463 /* URB finished sucessfully, process queue to see if there are any more
2464 URBs waiting before we call completion function.*/
2465 if(crisv10_hcd->running) {
2466 /* Only process queue if USB controller is running */
2467 tc_dma_process_queue(epid);
2468 } else {
2469 tc_warn("No processing of queue for epid:%d, USB Controller not"
2470 " running\n", epid);
2471 }
2472 }
2473 }
2474
2475 /* Hand the URB from HCD to its USB device driver, using its completion
2476 functions */
2477 usb_hcd_giveback_urb (hcd, urb, status);
2478
2479 /* Check the queue once more if the URB returned with error, because we
2480 didn't do it before the completion function because the specification
2481 states that the queue should not restart until all it's unlinked
2482 URBs have been fully retired, with the completion functions run */
2483 if(crisv10_hcd->running) {
2484 /* Only process queue if USB controller is running */
2485 tc_dma_process_queue(epid);
2486 } else {
2487 tc_warn("No processing of queue for epid:%d, USB Controller not running\n",
2488 epid);
2489 }
2490
2491 DBFEXIT;
2492 }
2493
2494 static void tc_finish_isoc_urb(struct usb_hcd *hcd, struct urb *urb,
2495 int status) {
2496 struct crisv10_urb_priv *urb_priv = urb->hcpriv;
2497 int epid, i;
2498 volatile int timeout = 10000;
2499 int bandwidth = 0;
2500
2501 ASSERT(urb_priv);
2502 epid = urb_priv->epid;
2503
2504 ASSERT(usb_pipeisoc(urb->pipe));
2505
2506 /* Set that all isoc packets have status and length set before
2507 completing the urb. */
2508 for (i = urb_priv->isoc_packet_counter; i < urb->number_of_packets; i++){
2509 urb->iso_frame_desc[i].actual_length = 0;
2510 urb->iso_frame_desc[i].status = -EPROTO;
2511 }
2512
2513 /* Check if the URB is currently active (done or error) */
2514 if(urb == activeUrbList[epid]) {
2515 /* Check if there are another In Isoc URB queued for this epid */
2516 if (!list_empty(&urb_list[epid])&& !epid_state[epid].disabled) {
2517 /* Move it from queue to active and mark it started so Isoc transfers
2518 won't be interrupted.
2519 All Isoc URBs data transfers are already added to DMA lists so we
2520 don't have to insert anything in DMA lists here. */
2521 activeUrbList[epid] = urb_list_first(epid);
2522 ((struct crisv10_urb_priv *)(activeUrbList[epid]->hcpriv))->urb_state =
2523 STARTED;
2524 urb_list_del(activeUrbList[epid], epid);
2525
2526 if(urb->status) {
2527 errno_dbg("finish_isoc_urb (URB:0x%x[%d] %s %s) (%d of %d packets)"
2528 " status:%d, new waiting URB:0x%x[%d]\n",
2529 (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
2530 str_type(urb->pipe), urb_priv->isoc_packet_counter,
2531 urb->number_of_packets, urb->status,
2532 (unsigned int)activeUrbList[epid],
2533 ((struct crisv10_urb_priv *)(activeUrbList[epid]->hcpriv))->urb_num);
2534 }
2535
2536 } else { /* No other URB queued for this epid */
2537 if(urb->status) {
2538 errno_dbg("finish_isoc_urb (URB:0x%x[%d] %s %s) (%d of %d packets)"
2539 " status:%d, no new URB waiting\n",
2540 (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
2541 str_type(urb->pipe), urb_priv->isoc_packet_counter,
2542 urb->number_of_packets, urb->status);
2543 }
2544
2545 /* Check if EP is still enabled, then shut it down. */
2546 if (TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
2547 isoc_dbg("Isoc EP enabled for epid:%d, disabling it\n", epid);
2548
2549 /* Should only occur for In Isoc EPs where SB isn't consumed. */
2550 ASSERT(usb_pipein(urb->pipe));
2551
2552 /* Disable it and wait for it to stop */
2553 TxIsocEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
2554
2555 /* Ah, the luxury of busy-wait. */
2556 while((*R_DMA_CH8_SUB3_EP == virt_to_phys(&TxIsocEPList[epid])) &&
2557 (timeout-- > 0));
2558 }
2559
2560 /* Unlink SB to say that epid is finished. */
2561 TxIsocEPList[epid].sub = 0;
2562 TxIsocEPList[epid].hw_len = 0;
2563
2564 /* No URB active for EP anymore */
2565 activeUrbList[epid] = NULL;
2566 }
2567 } else { /* Finishing of not active URB (queued up with SBs thought) */
2568 isoc_warn("finish_isoc_urb (URB:0x%x %s) (%d of %d packets) status:%d,"
2569 " SB queued but not active\n",
2570 (unsigned int)urb, str_dir(urb->pipe),
2571 urb_priv->isoc_packet_counter, urb->number_of_packets,
2572 urb->status);
2573 if(usb_pipeout(urb->pipe)) {
2574 /* Finishing of not yet active Out Isoc URB needs unlinking of SBs. */
2575 struct USB_SB_Desc *iter_sb, *prev_sb, *next_sb;
2576
2577 iter_sb = TxIsocEPList[epid].sub ?
2578 phys_to_virt(TxIsocEPList[epid].sub) : 0;
2579 prev_sb = 0;
2580
2581 /* SB that is linked before this URBs first SB */
2582 while (iter_sb && (iter_sb != urb_priv->first_sb)) {
2583 prev_sb = iter_sb;
2584 iter_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0;
2585 }
2586
2587 if (iter_sb == 0) {
2588 /* Unlink of the URB currently being transmitted. */
2589 prev_sb = 0;
2590 iter_sb = TxIsocEPList[epid].sub ? phys_to_virt(TxIsocEPList[epid].sub) : 0;
2591 }
2592
2593 while (iter_sb && (iter_sb != urb_priv->last_sb)) {
2594 iter_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0;
2595 }
2596
2597 if (iter_sb) {
2598 next_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0;
2599 } else {
2600 /* This should only happen if the DMA has completed
2601 processing the SB list for this EP while interrupts
2602 are disabled. */
2603 isoc_dbg("Isoc urb not found, already sent?\n");
2604 next_sb = 0;
2605 }
2606 if (prev_sb) {
2607 prev_sb->next = next_sb ? virt_to_phys(next_sb) : 0;
2608 } else {
2609 TxIsocEPList[epid].sub = next_sb ? virt_to_phys(next_sb) : 0;
2610 }
2611 }
2612 }
2613
2614 /* Free HC-private URB data*/
2615 bandwidth = urb_priv->bandwidth;
2616 urb_priv_free(hcd, urb);
2617
2618 crisv10_usb_release_bandwidth(hcd, usb_pipeisoc(urb->pipe), bandwidth);
2619
2620 /* Hand the URB from HCD to its USB device driver, using its completion
2621 functions */
2622 usb_hcd_giveback_urb (hcd, urb, status);
2623 }
2624
2625 static __u32 urb_num = 0;
2626
2627 /* allocate and initialize URB private data */
2628 static int urb_priv_create(struct usb_hcd *hcd, struct urb *urb, int epid,
2629 int mem_flags) {
2630 struct crisv10_urb_priv *urb_priv;
2631
2632 urb_priv = kmalloc(sizeof *urb_priv, mem_flags);
2633 if (!urb_priv)
2634 return -ENOMEM;
2635 memset(urb_priv, 0, sizeof *urb_priv);
2636
2637 urb_priv->epid = epid;
2638 urb_priv->urb_state = NOT_STARTED;
2639
2640 urb->hcpriv = urb_priv;
2641 /* Assign URB a sequence number, and increment counter */
2642 urb_priv->urb_num = urb_num;
2643 urb_num++;
2644 urb_priv->bandwidth = 0;
2645 return 0;
2646 }
2647
2648 /* free URB private data */
2649 static void urb_priv_free(struct usb_hcd *hcd, struct urb *urb) {
2650 int i;
2651 struct crisv10_urb_priv *urb_priv = urb->hcpriv;
2652 ASSERT(urb_priv != 0);
2653
2654 /* Check it has any SBs linked that needs to be freed*/
2655 if(urb_priv->first_sb != NULL) {
2656 struct USB_SB_Desc *next_sb, *first_sb, *last_sb;
2657 int i = 0;
2658 first_sb = urb_priv->first_sb;
2659 last_sb = urb_priv->last_sb;
2660 ASSERT(last_sb);
2661 while(first_sb != last_sb) {
2662 next_sb = (struct USB_SB_Desc *)phys_to_virt(first_sb->next);
2663 kmem_cache_free(usb_desc_cache, first_sb);
2664 first_sb = next_sb;
2665 i++;
2666 }
2667 kmem_cache_free(usb_desc_cache, last_sb);
2668 i++;
2669 }
2670
2671 /* Check if it has any EPs in its Intr pool that also needs to be freed */
2672 if(urb_priv->intr_ep_pool_length > 0) {
2673 for(i = 0; i < urb_priv->intr_ep_pool_length; i++) {
2674 kfree(urb_priv->intr_ep_pool[i]);
2675 }
2676 /*
2677 tc_dbg("Freed %d EPs from URB:0x%x EP pool\n",
2678 urb_priv->intr_ep_pool_length, (unsigned int)urb);
2679 */
2680 }
2681
2682 kfree(urb_priv);
2683 urb->hcpriv = NULL;
2684 }
2685
2686 static int ep_priv_create(struct usb_host_endpoint *ep, int mem_flags) {
2687 struct crisv10_ep_priv *ep_priv;
2688
2689 ep_priv = kmalloc(sizeof *ep_priv, mem_flags);
2690 if (!ep_priv)
2691 return -ENOMEM;
2692 memset(ep_priv, 0, sizeof *ep_priv);
2693
2694 ep->hcpriv = ep_priv;
2695 return 0;
2696 }
2697
2698 static void ep_priv_free(struct usb_host_endpoint *ep) {
2699 struct crisv10_ep_priv *ep_priv = ep->hcpriv;
2700 ASSERT(ep_priv);
2701 kfree(ep_priv);
2702 ep->hcpriv = NULL;
2703 }
2704
2705 /*
2706 * usb_check_bandwidth():
2707 *
2708 * old_alloc is from host_controller->bandwidth_allocated in microseconds;
2709 * bustime is from calc_bus_time(), but converted to microseconds.
2710 *
2711 * returns <bustime in us> if successful,
2712 * or -ENOSPC if bandwidth request fails.
2713 *
2714 * FIXME:
2715 * This initial implementation does not use Endpoint.bInterval
2716 * in managing bandwidth allocation.
2717 * It probably needs to be expanded to use Endpoint.bInterval.
2718 * This can be done as a later enhancement (correction).
2719 *
2720 * This will also probably require some kind of
2721 * frame allocation tracking...meaning, for example,
2722 * that if multiple drivers request interrupts every 10 USB frames,
2723 * they don't all have to be allocated at
2724 * frame numbers N, N+10, N+20, etc. Some of them could be at
2725 * N+11, N+21, N+31, etc., and others at
2726 * N+12, N+22, N+32, etc.
2727 *
2728 * Similarly for isochronous transfers...
2729 *
2730 * Individual HCDs can schedule more directly ... this logic
2731 * is not correct for high speed transfers.
2732 */
2733 static int crisv10_usb_check_bandwidth(
2734 struct usb_device *dev,
2735 struct urb *urb)
2736 {
2737 unsigned int pipe = urb->pipe;
2738 long bustime;
2739 int is_in = usb_pipein (pipe);
2740 int is_iso = usb_pipeisoc (pipe);
2741 int old_alloc = dev->bus->bandwidth_allocated;
2742 int new_alloc;
2743
2744 bustime = NS_TO_US (usb_calc_bus_time (dev->speed, is_in, is_iso,
2745 usb_maxpacket (dev, pipe, !is_in)));
2746 if (is_iso)
2747 bustime /= urb->number_of_packets;
2748
2749 new_alloc = old_alloc + (int) bustime;
2750 if (new_alloc > FRAME_TIME_MAX_USECS_ALLOC) {
2751 dev_dbg (&dev->dev, "usb_check_bandwidth FAILED: %d + %ld = %d usec\n",
2752 old_alloc, bustime, new_alloc);
2753 bustime = -ENOSPC; /* report error */
2754 }
2755
2756 return bustime;
2757 }
2758
2759 /**
2760 * usb_claim_bandwidth - records bandwidth for a periodic transfer
2761 * @dev: source/target of request
2762 * @urb: request (urb->dev == dev)
2763 * @bustime: bandwidth consumed, in (average) microseconds per frame
2764 * @isoc: true iff the request is isochronous
2765 *
2766 * HCDs are expected not to overcommit periodic bandwidth, and to record such
2767 * reservations whenever endpoints are added to the periodic schedule.
2768 *
2769 * FIXME averaging per-frame is suboptimal. Better to sum over the HCD's
2770 * entire periodic schedule ... 32 frames for OHCI, 1024 for UHCI, settable
2771 * for EHCI (256/512/1024 frames, default 1024) and have the bus expose how
2772 * large its periodic schedule is.
2773 */
2774 static void crisv10_usb_claim_bandwidth(
2775 struct usb_device *dev,
2776 struct urb *urb, int bustime, int isoc)
2777 {
2778 dev->bus->bandwidth_allocated += bustime;
2779 if (isoc)
2780 dev->bus->bandwidth_isoc_reqs++;
2781 else
2782 dev->bus->bandwidth_int_reqs++;
2783 struct crisv10_urb_priv *urb_priv;
2784 urb_priv = urb->hcpriv;
2785 urb_priv->bandwidth = bustime;
2786 }
2787
2788 /**
2789 * usb_release_bandwidth - reverses effect of usb_claim_bandwidth()
2790 * @hcd: host controller
2791 * @isoc: true iff the request is isochronous
2792 * @bandwidth: bandwidth returned
2793 *
2794 * This records that previously allocated bandwidth has been released.
2795 * Bandwidth is released when endpoints are removed from the host controller's
2796 * periodic schedule.
2797 */
2798 static void crisv10_usb_release_bandwidth(
2799 struct usb_hcd *hcd,
2800 int isoc,
2801 int bandwidth)
2802 {
2803 hcd_to_bus(hcd)->bandwidth_allocated -= bandwidth;
2804 if (isoc)
2805 hcd_to_bus(hcd)->bandwidth_isoc_reqs--;
2806 else
2807 hcd_to_bus(hcd)->bandwidth_int_reqs--;
2808 }
2809
2810
2811 /* EPID handling functions, managing EP-list in Etrax through wrappers */
2812 /* ------------------------------------------------------------------- */
2813
2814 /* Sets up a new EPID for an endpoint or returns existing if found */
2815 static int tc_setup_epid(struct usb_host_endpoint *ep, struct urb *urb,
2816 int mem_flags) {
2817 int epid;
2818 char devnum, endpoint, out_traffic, slow;
2819 int maxlen;
2820 __u32 epid_data;
2821 struct crisv10_ep_priv *ep_priv = ep->hcpriv;
2822
2823 DBFENTER;
2824
2825 /* Check if a valid epid already is setup for this endpoint */
2826 if(ep_priv != NULL) {
2827 return ep_priv->epid;
2828 }
2829
2830 /* We must find and initiate a new epid for this urb. */
2831 epid = tc_allocate_epid();
2832
2833 if (epid == -1) {
2834 /* Failed to allocate a new epid. */
2835 DBFEXIT;
2836 return epid;
2837 }
2838
2839 /* We now have a new epid to use. Claim it. */
2840 epid_state[epid].inuse = 1;
2841
2842 /* Init private data for new endpoint */
2843 if(ep_priv_create(ep, mem_flags) != 0) {
2844 return -ENOMEM;
2845 }
2846 ep_priv = ep->hcpriv;
2847 ep_priv->epid = epid;
2848
2849 devnum = usb_pipedevice(urb->pipe);
2850 endpoint = usb_pipeendpoint(urb->pipe);
2851 slow = (urb->dev->speed == USB_SPEED_LOW);
2852 maxlen = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
2853
2854 if (usb_pipetype(urb->pipe) == PIPE_CONTROL) {
2855 /* We want both IN and OUT control traffic to be put on the same
2856 EP/SB list. */
2857 out_traffic = 1;
2858 } else {
2859 out_traffic = usb_pipeout(urb->pipe);
2860 }
2861
2862 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
2863 epid_data = IO_STATE(R_USB_EPT_DATA_ISO, valid, yes) |
2864 /* FIXME: Change any to the actual port? */
2865 IO_STATE(R_USB_EPT_DATA_ISO, port, any) |
2866 IO_FIELD(R_USB_EPT_DATA_ISO, max_len, maxlen) |
2867 IO_FIELD(R_USB_EPT_DATA_ISO, ep, endpoint) |
2868 IO_FIELD(R_USB_EPT_DATA_ISO, dev, devnum);
2869 etrax_epid_iso_set(epid, epid_data);
2870 } else {
2871 epid_data = IO_STATE(R_USB_EPT_DATA, valid, yes) |
2872 IO_FIELD(R_USB_EPT_DATA, low_speed, slow) |
2873 /* FIXME: Change any to the actual port? */
2874 IO_STATE(R_USB_EPT_DATA, port, any) |
2875 IO_FIELD(R_USB_EPT_DATA, max_len, maxlen) |
2876 IO_FIELD(R_USB_EPT_DATA, ep, endpoint) |
2877 IO_FIELD(R_USB_EPT_DATA, dev, devnum);
2878 etrax_epid_set(epid, epid_data);
2879 }
2880
2881 epid_state[epid].out_traffic = out_traffic;
2882 epid_state[epid].type = usb_pipetype(urb->pipe);
2883
2884 tc_warn("Setting up ep:0x%x epid:%d (addr:%d endp:%d max_len:%d %s %s %s)\n",
2885 (unsigned int)ep, epid, devnum, endpoint, maxlen,
2886 str_type(urb->pipe), out_traffic ? "out" : "in",
2887 slow ? "low" : "full");
2888
2889 /* Enable Isoc eof interrupt if we set up the first Isoc epid */
2890 if(usb_pipeisoc(urb->pipe)) {
2891 isoc_epid_counter++;
2892 if(isoc_epid_counter == 1) {
2893 isoc_warn("Enabled Isoc eof interrupt\n");
2894 *R_USB_IRQ_MASK_SET = IO_STATE(R_USB_IRQ_MASK_SET, iso_eof, set);
2895 }
2896 }
2897
2898 DBFEXIT;
2899 return epid;
2900 }
2901
2902 static void tc_free_epid(struct usb_host_endpoint *ep) {
2903 unsigned long flags;
2904 struct crisv10_ep_priv *ep_priv = ep->hcpriv;
2905 int epid;
2906 volatile int timeout = 10000;
2907
2908 DBFENTER;
2909
2910 if (ep_priv == NULL) {
2911 tc_warn("Trying to free unused epid on ep:0x%x\n", (unsigned int)ep);
2912 DBFEXIT;
2913 return;
2914 }
2915
2916 epid = ep_priv->epid;
2917
2918 /* Disable Isoc eof interrupt if we free the last Isoc epid */
2919 if(epid_isoc(epid)) {
2920 ASSERT(isoc_epid_counter > 0);
2921 isoc_epid_counter--;
2922 if(isoc_epid_counter == 0) {
2923 *R_USB_IRQ_MASK_CLR = IO_STATE(R_USB_IRQ_MASK_CLR, iso_eof, clr);
2924 isoc_warn("Disabled Isoc eof interrupt\n");
2925 }
2926 }
2927
2928 /* Take lock manualy instead of in epid_x_x wrappers,
2929 because we need to be polling here */
2930 spin_lock_irqsave(&etrax_epid_lock, flags);
2931
2932 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
2933 nop();
2934 while((*R_USB_EPT_DATA & IO_MASK(R_USB_EPT_DATA, hold)) &&
2935 (timeout-- > 0));
2936 /* This will, among other things, set the valid field to 0. */
2937 *R_USB_EPT_DATA = 0;
2938 spin_unlock_irqrestore(&etrax_epid_lock, flags);
2939
2940 /* Free resource in software state info list */
2941 epid_state[epid].inuse = 0;
2942
2943 /* Free private endpoint data */
2944 ep_priv_free(ep);
2945
2946 DBFEXIT;
2947 }
2948
2949 static int tc_allocate_epid(void) {
2950 int i;
2951 DBFENTER;
2952 for (i = 0; i < NBR_OF_EPIDS; i++) {
2953 if (!epid_inuse(i)) {
2954 DBFEXIT;
2955 return i;
2956 }
2957 }
2958
2959 tc_warn("Found no free epids\n");
2960 DBFEXIT;
2961 return -1;
2962 }
2963
2964
2965 /* Wrappers around the list functions (include/linux/list.h). */
2966 /* ---------------------------------------------------------- */
2967 static inline int __urb_list_empty(int epid) {
2968 int retval;
2969 retval = list_empty(&urb_list[epid]);
2970 return retval;
2971 }
2972
2973 /* Returns first urb for this epid, or NULL if list is empty. */
2974 static inline struct urb *urb_list_first(int epid) {
2975 unsigned long flags;
2976 struct urb *first_urb = 0;
2977 spin_lock_irqsave(&urb_list_lock, flags);
2978 if (!__urb_list_empty(epid)) {
2979 /* Get the first urb (i.e. head->next). */
2980 urb_entry_t *urb_entry = list_entry((&urb_list[epid])->next, urb_entry_t, list);
2981 first_urb = urb_entry->urb;
2982 }
2983 spin_unlock_irqrestore(&urb_list_lock, flags);
2984 return first_urb;
2985 }
2986
2987 /* Adds an urb_entry last in the list for this epid. */
2988 static inline void urb_list_add(struct urb *urb, int epid, int mem_flags) {
2989 unsigned long flags;
2990 urb_entry_t *urb_entry = (urb_entry_t *)kmalloc(sizeof(urb_entry_t), mem_flags);
2991 ASSERT(urb_entry);
2992
2993 urb_entry->urb = urb;
2994 spin_lock_irqsave(&urb_list_lock, flags);
2995 list_add_tail(&urb_entry->list, &urb_list[epid]);
2996 spin_unlock_irqrestore(&urb_list_lock, flags);
2997 }
2998
2999 /* Search through the list for an element that contains this urb. (The list
3000 is expected to be short and the one we are about to delete will often be
3001 the first in the list.)
3002 Should be protected by spin_locks in calling function */
3003 static inline urb_entry_t *__urb_list_entry(struct urb *urb, int epid) {
3004 struct list_head *entry;
3005 struct list_head *tmp;
3006 urb_entry_t *urb_entry;
3007
3008 list_for_each_safe(entry, tmp, &urb_list[epid]) {
3009 urb_entry = list_entry(entry, urb_entry_t, list);
3010 ASSERT(urb_entry);
3011 ASSERT(urb_entry->urb);
3012
3013 if (urb_entry->urb == urb) {
3014 return urb_entry;
3015 }
3016 }
3017 return 0;
3018 }
3019
3020 /* Same function as above but for global use. Protects list by spinlock */
3021 static inline urb_entry_t *urb_list_entry(struct urb *urb, int epid) {
3022 unsigned long flags;
3023 urb_entry_t *urb_entry;
3024 spin_lock_irqsave(&urb_list_lock, flags);
3025 urb_entry = __urb_list_entry(urb, epid);
3026 spin_unlock_irqrestore(&urb_list_lock, flags);
3027 return (urb_entry);
3028 }
3029
3030 /* Delete an urb from the list. */
3031 static inline void urb_list_del(struct urb *urb, int epid) {
3032 unsigned long flags;
3033 urb_entry_t *urb_entry;
3034
3035 /* Delete entry and free. */
3036 spin_lock_irqsave(&urb_list_lock, flags);
3037 urb_entry = __urb_list_entry(urb, epid);
3038 ASSERT(urb_entry);
3039
3040 list_del(&urb_entry->list);
3041 spin_unlock_irqrestore(&urb_list_lock, flags);
3042 kfree(urb_entry);
3043 }
3044
3045 /* Move an urb to the end of the list. */
3046 static inline void urb_list_move_last(struct urb *urb, int epid) {
3047 unsigned long flags;
3048 urb_entry_t *urb_entry;
3049
3050 spin_lock_irqsave(&urb_list_lock, flags);
3051 urb_entry = __urb_list_entry(urb, epid);
3052 ASSERT(urb_entry);
3053
3054 list_del(&urb_entry->list);
3055 list_add_tail(&urb_entry->list, &urb_list[epid]);
3056 spin_unlock_irqrestore(&urb_list_lock, flags);
3057 }
3058
3059 /* Get the next urb in the list. */
3060 static inline struct urb *urb_list_next(struct urb *urb, int epid) {
3061 unsigned long flags;
3062 urb_entry_t *urb_entry;
3063
3064 spin_lock_irqsave(&urb_list_lock, flags);
3065 urb_entry = __urb_list_entry(urb, epid);
3066 ASSERT(urb_entry);
3067
3068 if (urb_entry->list.next != &urb_list[epid]) {
3069 struct list_head *elem = urb_entry->list.next;
3070 urb_entry = list_entry(elem, urb_entry_t, list);
3071 spin_unlock_irqrestore(&urb_list_lock, flags);
3072 return urb_entry->urb;
3073 } else {
3074 spin_unlock_irqrestore(&urb_list_lock, flags);
3075 return NULL;
3076 }
3077 }
3078
3079 struct USB_EP_Desc* create_ep(int epid, struct USB_SB_Desc* sb_desc,
3080 int mem_flags) {
3081 struct USB_EP_Desc *ep_desc;
3082 ep_desc = (struct USB_EP_Desc *) kmem_cache_alloc(usb_desc_cache, mem_flags);
3083 if(ep_desc == NULL)
3084 return NULL;
3085 memset(ep_desc, 0, sizeof(struct USB_EP_Desc));
3086
3087 ep_desc->hw_len = 0;
3088 ep_desc->command = (IO_FIELD(USB_EP_command, epid, epid) |
3089 IO_STATE(USB_EP_command, enable, yes));
3090 if(sb_desc == NULL) {
3091 ep_desc->sub = 0;
3092 } else {
3093 ep_desc->sub = virt_to_phys(sb_desc);
3094 }
3095 return ep_desc;
3096 }
3097
3098 #define TT_ZOUT 0
3099 #define TT_IN 1
3100 #define TT_OUT 2
3101 #define TT_SETUP 3
3102
3103 #define CMD_EOL IO_STATE(USB_SB_command, eol, yes)
3104 #define CMD_INTR IO_STATE(USB_SB_command, intr, yes)
3105 #define CMD_FULL IO_STATE(USB_SB_command, full, yes)
3106
3107 /* Allocation and setup of a generic SB. Used to create SETUP, OUT and ZOUT
3108 SBs. Also used by create_sb_in() to avoid same allocation procedure at two
3109 places */
3110 struct USB_SB_Desc* create_sb(struct USB_SB_Desc* sb_prev, int tt, void* data,
3111 int datalen, int mem_flags) {
3112 struct USB_SB_Desc *sb_desc;
3113 sb_desc = (struct USB_SB_Desc*)kmem_cache_alloc(usb_desc_cache, mem_flags);
3114 if(sb_desc == NULL)
3115 return NULL;
3116 memset(sb_desc, 0, sizeof(struct USB_SB_Desc));
3117
3118 sb_desc->command = IO_FIELD(USB_SB_command, tt, tt) |
3119 IO_STATE(USB_SB_command, eot, yes);
3120
3121 sb_desc->sw_len = datalen;
3122 if(data != NULL) {
3123 sb_desc->buf = virt_to_phys(data);
3124 } else {
3125 sb_desc->buf = 0;
3126 }
3127 if(sb_prev != NULL) {
3128 sb_prev->next = virt_to_phys(sb_desc);
3129 }
3130 return sb_desc;
3131 }
3132
3133 /* Creates a copy of an existing SB by allocation space for it and copy
3134 settings */
3135 struct USB_SB_Desc* create_sb_copy(struct USB_SB_Desc* sb_orig, int mem_flags) {
3136 struct USB_SB_Desc *sb_desc;
3137 sb_desc = (struct USB_SB_Desc*)kmem_cache_alloc(usb_desc_cache, mem_flags);
3138 if(sb_desc == NULL)
3139 return NULL;
3140
3141 memcpy(sb_desc, sb_orig, sizeof(struct USB_SB_Desc));
3142 return sb_desc;
3143 }
3144
3145 /* A specific create_sb function for creation of in SBs. This is due to
3146 that datalen in In SBs shows how many packets we are expecting. It also
3147 sets up the rem field to show if how many bytes we expect in last packet
3148 if it's not a full one */
3149 struct USB_SB_Desc* create_sb_in(struct USB_SB_Desc* sb_prev, int datalen,
3150 int maxlen, int mem_flags) {
3151 struct USB_SB_Desc *sb_desc;
3152 sb_desc = create_sb(sb_prev, TT_IN, NULL,
3153 datalen ? (datalen - 1) / maxlen + 1 : 0, mem_flags);
3154 if(sb_desc == NULL)
3155 return NULL;
3156 sb_desc->command |= IO_FIELD(USB_SB_command, rem, datalen % maxlen);
3157 return sb_desc;
3158 }
3159
3160 void set_sb_cmds(struct USB_SB_Desc *sb_desc, __u16 flags) {
3161 sb_desc->command |= flags;
3162 }
3163
3164 int create_sb_for_urb(struct urb *urb, int mem_flags) {
3165 int is_out = !usb_pipein(urb->pipe);
3166 int type = usb_pipetype(urb->pipe);
3167 int maxlen = usb_maxpacket(urb->dev, urb->pipe, is_out);
3168 int buf_len = urb->transfer_buffer_length;
3169 void *buf = buf_len > 0 ? urb->transfer_buffer : NULL;
3170 struct USB_SB_Desc *sb_desc = NULL;
3171
3172 struct crisv10_urb_priv *urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
3173 ASSERT(urb_priv != NULL);
3174
3175 switch(type) {
3176 case PIPE_CONTROL:
3177 /* Setup stage */
3178 sb_desc = create_sb(NULL, TT_SETUP, urb->setup_packet, 8, mem_flags);
3179 if(sb_desc == NULL)
3180 return -ENOMEM;
3181 set_sb_cmds(sb_desc, CMD_FULL);
3182
3183 /* Attach first SB to URB */
3184 urb_priv->first_sb = sb_desc;
3185
3186 if (is_out) { /* Out Control URB */
3187 /* If this Control OUT transfer has an optional data stage we add
3188 an OUT token before the mandatory IN (status) token */
3189 if ((buf_len > 0) && buf) {
3190 sb_desc = create_sb(sb_desc, TT_OUT, buf, buf_len, mem_flags);
3191 if(sb_desc == NULL)
3192 return -ENOMEM;
3193 set_sb_cmds(sb_desc, CMD_FULL);
3194 }
3195
3196 /* Status stage */
3197 /* The data length has to be exactly 1. This is due to a requirement
3198 of the USB specification that a host must be prepared to receive
3199 data in the status phase */
3200 sb_desc = create_sb(sb_desc, TT_IN, NULL, 1, mem_flags);
3201 if(sb_desc == NULL)
3202 return -ENOMEM;
3203 } else { /* In control URB */
3204 /* Data stage */
3205 sb_desc = create_sb_in(sb_desc, buf_len, maxlen, mem_flags);
3206 if(sb_desc == NULL)
3207 return -ENOMEM;
3208
3209 /* Status stage */
3210 /* Read comment at zout_buffer declaration for an explanation to this. */
3211 sb_desc = create_sb(sb_desc, TT_ZOUT, &zout_buffer[0], 1, mem_flags);
3212 if(sb_desc == NULL)
3213 return -ENOMEM;
3214 /* Set descriptor interrupt flag for in URBs so we can finish URB after
3215 zout-packet has been sent */
3216 set_sb_cmds(sb_desc, CMD_INTR | CMD_FULL);
3217 }
3218 /* Set end-of-list flag in last SB */
3219 set_sb_cmds(sb_desc, CMD_EOL);
3220 /* Attach last SB to URB */
3221 urb_priv->last_sb = sb_desc;
3222 break;
3223
3224 case PIPE_BULK:
3225 if (is_out) { /* Out Bulk URB */
3226 sb_desc = create_sb(NULL, TT_OUT, buf, buf_len, mem_flags);
3227 if(sb_desc == NULL)
3228 return -ENOMEM;
3229 /* The full field is set to yes, even if we don't actually check that
3230 this is a full-length transfer (i.e., that transfer_buffer_length %
3231 maxlen = 0).
3232 Setting full prevents the USB controller from sending an empty packet
3233 in that case. However, if URB_ZERO_PACKET was set we want that. */
3234 if (!(urb->transfer_flags & URB_ZERO_PACKET)) {
3235 set_sb_cmds(sb_desc, CMD_FULL);
3236 }
3237 } else { /* In Bulk URB */
3238 sb_desc = create_sb_in(NULL, buf_len, maxlen, mem_flags);
3239 if(sb_desc == NULL)
3240 return -ENOMEM;
3241 }
3242 /* Set end-of-list flag for last SB */
3243 set_sb_cmds(sb_desc, CMD_EOL);
3244
3245 /* Attach SB to URB */
3246 urb_priv->first_sb = sb_desc;
3247 urb_priv->last_sb = sb_desc;
3248 break;
3249
3250 case PIPE_INTERRUPT:
3251 if(is_out) { /* Out Intr URB */
3252 sb_desc = create_sb(NULL, TT_OUT, buf, buf_len, mem_flags);
3253 if(sb_desc == NULL)
3254 return -ENOMEM;
3255
3256 /* The full field is set to yes, even if we don't actually check that
3257 this is a full-length transfer (i.e., that transfer_buffer_length %
3258 maxlen = 0).
3259 Setting full prevents the USB controller from sending an empty packet
3260 in that case. However, if URB_ZERO_PACKET was set we want that. */
3261 if (!(urb->transfer_flags & URB_ZERO_PACKET)) {
3262 set_sb_cmds(sb_desc, CMD_FULL);
3263 }
3264 /* Only generate TX interrupt if it's a Out URB*/
3265 set_sb_cmds(sb_desc, CMD_INTR);
3266
3267 } else { /* In Intr URB */
3268 sb_desc = create_sb_in(NULL, buf_len, maxlen, mem_flags);
3269 if(sb_desc == NULL)
3270 return -ENOMEM;
3271 }
3272 /* Set end-of-list flag for last SB */
3273 set_sb_cmds(sb_desc, CMD_EOL);
3274
3275 /* Attach SB to URB */
3276 urb_priv->first_sb = sb_desc;
3277 urb_priv->last_sb = sb_desc;
3278
3279 break;
3280 case PIPE_ISOCHRONOUS:
3281 if(is_out) { /* Out Isoc URB */
3282 int i;
3283 if(urb->number_of_packets == 0) {
3284 tc_err("Can't create SBs for Isoc URB with zero packets\n");
3285 return -EPIPE;
3286 }
3287 /* Create one SB descriptor for each packet and link them together. */
3288 for(i = 0; i < urb->number_of_packets; i++) {
3289 if (urb->iso_frame_desc[i].length > 0) {
3290
3291 sb_desc = create_sb(sb_desc, TT_OUT, urb->transfer_buffer +
3292 urb->iso_frame_desc[i].offset,
3293 urb->iso_frame_desc[i].length, mem_flags);
3294 if(sb_desc == NULL)
3295 return -ENOMEM;
3296
3297 /* Check if it's a full length packet */
3298 if (urb->iso_frame_desc[i].length ==
3299 usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe))) {
3300 set_sb_cmds(sb_desc, CMD_FULL);
3301 }
3302
3303 } else { /* zero length packet */
3304 sb_desc = create_sb(sb_desc, TT_ZOUT, &zout_buffer[0], 1, mem_flags);
3305 if(sb_desc == NULL)
3306 return -ENOMEM;
3307 set_sb_cmds(sb_desc, CMD_FULL);
3308 }
3309 /* Attach first SB descriptor to URB */
3310 if (i == 0) {
3311 urb_priv->first_sb = sb_desc;
3312 }
3313 }
3314 /* Set interrupt and end-of-list flags in last SB */
3315 set_sb_cmds(sb_desc, CMD_INTR | CMD_EOL);
3316 /* Attach last SB descriptor to URB */
3317 urb_priv->last_sb = sb_desc;
3318 tc_dbg("Created %d out SBs for Isoc URB:0x%x\n",
3319 urb->number_of_packets, (unsigned int)urb);
3320 } else { /* In Isoc URB */
3321 /* Actual number of packets is not relevant for periodic in traffic as
3322 long as it is more than zero. Set to 1 always. */
3323 sb_desc = create_sb(sb_desc, TT_IN, NULL, 1, mem_flags);
3324 if(sb_desc == NULL)
3325 return -ENOMEM;
3326 /* Set end-of-list flags for SB */
3327 set_sb_cmds(sb_desc, CMD_EOL);
3328
3329 /* Attach SB to URB */
3330 urb_priv->first_sb = sb_desc;
3331 urb_priv->last_sb = sb_desc;
3332 }
3333 break;
3334 default:
3335 tc_err("Unknown pipe-type\n");
3336 return -EPIPE;
3337 break;
3338 }
3339 return 0;
3340 }
3341
3342 int init_intr_urb(struct urb *urb, int mem_flags) {
3343 struct crisv10_urb_priv *urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
3344 struct USB_EP_Desc* ep_desc;
3345 int interval;
3346 int i;
3347 int ep_count;
3348
3349 ASSERT(urb_priv != NULL);
3350 ASSERT(usb_pipeint(urb->pipe));
3351 /* We can't support interval longer than amount of eof descriptors in
3352 TxIntrEPList */
3353 if(urb->interval > MAX_INTR_INTERVAL) {
3354 tc_err("Interrupt interval %dms too big (max: %dms)\n", urb->interval,
3355 MAX_INTR_INTERVAL);
3356 return -EINVAL;
3357 }
3358
3359 /* We assume that the SB descriptors already have been setup */
3360 ASSERT(urb_priv->first_sb != NULL);
3361
3362 /* Round of the interval to 2^n, it is obvious that this code favours
3363 smaller numbers, but that is actually a good thing */
3364 /* FIXME: The "rounding error" for larger intervals will be quite
3365 large. For in traffic this shouldn't be a problem since it will only
3366 mean that we "poll" more often. */
3367 interval = urb->interval;
3368 for (i = 0; interval; i++) {
3369 interval = interval >> 1;
3370 }
3371 urb_priv->interval = 1 << (i - 1);
3372
3373 /* We can only have max interval for Out Interrupt due to that we can only
3374 handle one linked in EP for a certain epid in the Intr descr array at the
3375 time. The USB Controller in the Etrax 100LX continues to process Intr EPs
3376 so we have no way of knowing which one that caused the actual transfer if
3377 we have several linked in. */
3378 if(usb_pipeout(urb->pipe)) {
3379 urb_priv->interval = MAX_INTR_INTERVAL;
3380 }
3381
3382 /* Calculate amount of EPs needed */
3383 ep_count = MAX_INTR_INTERVAL / urb_priv->interval;
3384
3385 for(i = 0; i < ep_count; i++) {
3386 ep_desc = create_ep(urb_priv->epid, urb_priv->first_sb, mem_flags);
3387 if(ep_desc == NULL) {
3388 /* Free any descriptors that we may have allocated before failure */
3389 while(i > 0) {
3390 i--;
3391 kfree(urb_priv->intr_ep_pool[i]);
3392 }
3393 return -ENOMEM;
3394 }
3395 urb_priv->intr_ep_pool[i] = ep_desc;
3396 }
3397 urb_priv->intr_ep_pool_length = ep_count;
3398 return 0;
3399 }
3400
3401 /* DMA RX/TX functions */
3402 /* ----------------------- */
3403
3404 static void tc_dma_init_rx_list(void) {
3405 int i;
3406
3407 /* Setup descriptor list except last one */
3408 for (i = 0; i < (NBR_OF_RX_DESC - 1); i++) {
3409 RxDescList[i].sw_len = RX_DESC_BUF_SIZE;
3410 RxDescList[i].command = 0;
3411 RxDescList[i].next = virt_to_phys(&RxDescList[i + 1]);
3412 RxDescList[i].buf = virt_to_phys(RxBuf + (i * RX_DESC_BUF_SIZE));
3413 RxDescList[i].hw_len = 0;
3414 RxDescList[i].status = 0;
3415
3416 /* DMA IN cache bug. (struct etrax_dma_descr has the same layout as
3417 USB_IN_Desc for the relevant fields.) */
3418 prepare_rx_descriptor((struct etrax_dma_descr*)&RxDescList[i]);
3419
3420 }
3421 /* Special handling of last descriptor */
3422 RxDescList[i].sw_len = RX_DESC_BUF_SIZE;
3423 RxDescList[i].command = IO_STATE(USB_IN_command, eol, yes);
3424 RxDescList[i].next = virt_to_phys(&RxDescList[0]);
3425 RxDescList[i].buf = virt_to_phys(RxBuf + (i * RX_DESC_BUF_SIZE));
3426 RxDescList[i].hw_len = 0;
3427 RxDescList[i].status = 0;
3428
3429 /* Setup list pointers that show progress in list */
3430 myNextRxDesc = &RxDescList[0];
3431 myLastRxDesc = &RxDescList[NBR_OF_RX_DESC - 1];
3432
3433 flush_etrax_cache();
3434 /* Point DMA to first descriptor in list and start it */
3435 *R_DMA_CH9_FIRST = virt_to_phys(myNextRxDesc);
3436 *R_DMA_CH9_CMD = IO_STATE(R_DMA_CH9_CMD, cmd, start);
3437 }
3438
3439
3440 static void tc_dma_init_tx_bulk_list(void) {
3441 int i;
3442 volatile struct USB_EP_Desc *epDescr;
3443
3444 for (i = 0; i < (NBR_OF_EPIDS - 1); i++) {
3445 epDescr = &(TxBulkEPList[i]);
3446 CHECK_ALIGN(epDescr);
3447 epDescr->hw_len = 0;
3448 epDescr->command = IO_FIELD(USB_EP_command, epid, i);
3449 epDescr->sub = 0;
3450 epDescr->next = virt_to_phys(&TxBulkEPList[i + 1]);
3451
3452 /* Initiate two EPs, disabled and with the eol flag set. No need for any
3453 preserved epid. */
3454
3455 /* The first one has the intr flag set so we get an interrupt when the DMA
3456 channel is about to become disabled. */
3457 CHECK_ALIGN(&TxBulkDummyEPList[i][0]);
3458 TxBulkDummyEPList[i][0].hw_len = 0;
3459 TxBulkDummyEPList[i][0].command = (IO_FIELD(USB_EP_command, epid, DUMMY_EPID) |
3460 IO_STATE(USB_EP_command, eol, yes) |
3461 IO_STATE(USB_EP_command, intr, yes));
3462 TxBulkDummyEPList[i][0].sub = 0;
3463 TxBulkDummyEPList[i][0].next = virt_to_phys(&TxBulkDummyEPList[i][1]);
3464
3465 /* The second one. */
3466 CHECK_ALIGN(&TxBulkDummyEPList[i][1]);
3467 TxBulkDummyEPList[i][1].hw_len = 0;
3468 TxBulkDummyEPList[i][1].command = (IO_FIELD(USB_EP_command, epid, DUMMY_EPID) |
3469 IO_STATE(USB_EP_command, eol, yes));
3470 TxBulkDummyEPList[i][1].sub = 0;
3471 /* The last dummy's next pointer is the same as the current EP's next pointer. */
3472 TxBulkDummyEPList[i][1].next = virt_to_phys(&TxBulkEPList[i + 1]);
3473 }
3474
3475 /* Special handling of last descr in list, make list circular */
3476 epDescr = &TxBulkEPList[i];
3477 CHECK_ALIGN(epDescr);
3478 epDescr->hw_len = 0;
3479 epDescr->command = IO_STATE(USB_EP_command, eol, yes) |
3480 IO_FIELD(USB_EP_command, epid, i);
3481 epDescr->sub = 0;
3482 epDescr->next = virt_to_phys(&TxBulkEPList[0]);
3483
3484 /* Init DMA sub-channel pointers to last item in each list */
3485 *R_DMA_CH8_SUB0_EP = virt_to_phys(&TxBulkEPList[i]);
3486 /* No point in starting the bulk channel yet.
3487 *R_DMA_CH8_SUB0_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start); */
3488 }
3489
3490 static void tc_dma_init_tx_ctrl_list(void) {
3491 int i;
3492 volatile struct USB_EP_Desc *epDescr;
3493
3494 for (i = 0; i < (NBR_OF_EPIDS - 1); i++) {
3495 epDescr = &(TxCtrlEPList[i]);
3496 CHECK_ALIGN(epDescr);
3497 epDescr->hw_len = 0;
3498 epDescr->command = IO_FIELD(USB_EP_command, epid, i);
3499 epDescr->sub = 0;
3500 epDescr->next = virt_to_phys(&TxCtrlEPList[i + 1]);
3501 }
3502 /* Special handling of last descr in list, make list circular */
3503 epDescr = &TxCtrlEPList[i];
3504 CHECK_ALIGN(epDescr);
3505 epDescr->hw_len = 0;
3506 epDescr->command = IO_STATE(USB_EP_command, eol, yes) |
3507 IO_FIELD(USB_EP_command, epid, i);
3508 epDescr->sub = 0;
3509 epDescr->next = virt_to_phys(&TxCtrlEPList[0]);
3510
3511 /* Init DMA sub-channel pointers to last item in each list */
3512 *R_DMA_CH8_SUB1_EP = virt_to_phys(&TxCtrlEPList[i]);
3513 /* No point in starting the ctrl channel yet.
3514 *R_DMA_CH8_SUB1_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start); */
3515 }
3516
3517
3518 static void tc_dma_init_tx_intr_list(void) {
3519 int i;
3520
3521 TxIntrSB_zout.sw_len = 1;
3522 TxIntrSB_zout.next = 0;
3523 TxIntrSB_zout.buf = virt_to_phys(&zout_buffer[0]);
3524 TxIntrSB_zout.command = (IO_FIELD(USB_SB_command, rem, 0) |
3525 IO_STATE(USB_SB_command, tt, zout) |
3526 IO_STATE(USB_SB_command, full, yes) |
3527 IO_STATE(USB_SB_command, eot, yes) |
3528 IO_STATE(USB_SB_command, eol, yes));
3529
3530 for (i = 0; i < (MAX_INTR_INTERVAL - 1); i++) {
3531 CHECK_ALIGN(&TxIntrEPList[i]);
3532 TxIntrEPList[i].hw_len = 0;
3533 TxIntrEPList[i].command =
3534 (IO_STATE(USB_EP_command, eof, yes) |
3535 IO_STATE(USB_EP_command, enable, yes) |
3536 IO_FIELD(USB_EP_command, epid, INVALID_EPID));
3537 TxIntrEPList[i].sub = virt_to_phys(&TxIntrSB_zout);
3538 TxIntrEPList[i].next = virt_to_phys(&TxIntrEPList[i + 1]);
3539 }
3540
3541 /* Special handling of last descr in list, make list circular */
3542 CHECK_ALIGN(&TxIntrEPList[i]);
3543 TxIntrEPList[i].hw_len = 0;
3544 TxIntrEPList[i].command =
3545 (IO_STATE(USB_EP_command, eof, yes) |
3546 IO_STATE(USB_EP_command, eol, yes) |
3547 IO_STATE(USB_EP_command, enable, yes) |
3548 IO_FIELD(USB_EP_command, epid, INVALID_EPID));
3549 TxIntrEPList[i].sub = virt_to_phys(&TxIntrSB_zout);
3550 TxIntrEPList[i].next = virt_to_phys(&TxIntrEPList[0]);
3551
3552 intr_dbg("Initiated Intr EP descriptor list\n");
3553
3554
3555 /* Connect DMA 8 sub-channel 2 to first in list */
3556 *R_DMA_CH8_SUB2_EP = virt_to_phys(&TxIntrEPList[0]);
3557 }
3558
3559 static void tc_dma_init_tx_isoc_list(void) {
3560 int i;
3561
3562 DBFENTER;
3563
3564 /* Read comment at zout_buffer declaration for an explanation to this. */
3565 TxIsocSB_zout.sw_len = 1;
3566 TxIsocSB_zout.next = 0;
3567 TxIsocSB_zout.buf = virt_to_phys(&zout_buffer[0]);
3568 TxIsocSB_zout.command = (IO_FIELD(USB_SB_command, rem, 0) |
3569 IO_STATE(USB_SB_command, tt, zout) |
3570 IO_STATE(USB_SB_command, full, yes) |
3571 IO_STATE(USB_SB_command, eot, yes) |
3572 IO_STATE(USB_SB_command, eol, yes));
3573
3574 /* The last isochronous EP descriptor is a dummy. */
3575 for (i = 0; i < (NBR_OF_EPIDS - 1); i++) {
3576 CHECK_ALIGN(&TxIsocEPList[i]);
3577 TxIsocEPList[i].hw_len = 0;
3578 TxIsocEPList[i].command = IO_FIELD(USB_EP_command, epid, i);
3579 TxIsocEPList[i].sub = 0;
3580 TxIsocEPList[i].next = virt_to_phys(&TxIsocEPList[i + 1]);
3581 }
3582
3583 CHECK_ALIGN(&TxIsocEPList[i]);
3584 TxIsocEPList[i].hw_len = 0;
3585
3586 /* Must enable the last EP descr to get eof interrupt. */
3587 TxIsocEPList[i].command = (IO_STATE(USB_EP_command, enable, yes) |
3588 IO_STATE(USB_EP_command, eof, yes) |
3589 IO_STATE(USB_EP_command, eol, yes) |
3590 IO_FIELD(USB_EP_command, epid, INVALID_EPID));
3591 TxIsocEPList[i].sub = virt_to_phys(&TxIsocSB_zout);
3592 TxIsocEPList[i].next = virt_to_phys(&TxIsocEPList[0]);
3593
3594 *R_DMA_CH8_SUB3_EP = virt_to_phys(&TxIsocEPList[0]);
3595 *R_DMA_CH8_SUB3_CMD = IO_STATE(R_DMA_CH8_SUB3_CMD, cmd, start);
3596 }
3597
3598 static int tc_dma_init(struct usb_hcd *hcd) {
3599 tc_dma_init_rx_list();
3600 tc_dma_init_tx_bulk_list();
3601 tc_dma_init_tx_ctrl_list();
3602 tc_dma_init_tx_intr_list();
3603 tc_dma_init_tx_isoc_list();
3604
3605 if (cris_request_dma(USB_TX_DMA_NBR,
3606 "ETRAX 100LX built-in USB (Tx)",
3607 DMA_VERBOSE_ON_ERROR,
3608 dma_usb)) {
3609 err("Could not allocate DMA ch 8 for USB");
3610 return -EBUSY;
3611 }
3612
3613 if (cris_request_dma(USB_RX_DMA_NBR,
3614 "ETRAX 100LX built-in USB (Rx)",
3615 DMA_VERBOSE_ON_ERROR,
3616 dma_usb)) {
3617 err("Could not allocate DMA ch 9 for USB");
3618 return -EBUSY;
3619 }
3620
3621 *R_IRQ_MASK2_SET =
3622 /* Note that these interrupts are not used. */
3623 IO_STATE(R_IRQ_MASK2_SET, dma8_sub0_descr, set) |
3624 /* Sub channel 1 (ctrl) descr. interrupts are used. */
3625 IO_STATE(R_IRQ_MASK2_SET, dma8_sub1_descr, set) |
3626 IO_STATE(R_IRQ_MASK2_SET, dma8_sub2_descr, set) |
3627 /* Sub channel 3 (isoc) descr. interrupts are used. */
3628 IO_STATE(R_IRQ_MASK2_SET, dma8_sub3_descr, set);
3629
3630 /* Note that the dma9_descr interrupt is not used. */
3631 *R_IRQ_MASK2_SET =
3632 IO_STATE(R_IRQ_MASK2_SET, dma9_eop, set) |
3633 IO_STATE(R_IRQ_MASK2_SET, dma9_descr, set);
3634
3635 if (request_irq(ETRAX_USB_RX_IRQ, tc_dma_rx_interrupt, 0,
3636 "ETRAX 100LX built-in USB (Rx)", hcd)) {
3637 err("Could not allocate IRQ %d for USB", ETRAX_USB_RX_IRQ);
3638 return -EBUSY;
3639 }
3640
3641 if (request_irq(ETRAX_USB_TX_IRQ, tc_dma_tx_interrupt, 0,
3642 "ETRAX 100LX built-in USB (Tx)", hcd)) {
3643 err("Could not allocate IRQ %d for USB", ETRAX_USB_TX_IRQ);
3644 return -EBUSY;
3645 }
3646
3647 return 0;
3648 }
3649
3650 static void tc_dma_destroy(void) {
3651 free_irq(ETRAX_USB_RX_IRQ, NULL);
3652 free_irq(ETRAX_USB_TX_IRQ, NULL);
3653
3654 cris_free_dma(USB_TX_DMA_NBR, "ETRAX 100LX built-in USB (Tx)");
3655 cris_free_dma(USB_RX_DMA_NBR, "ETRAX 100LX built-in USB (Rx)");
3656
3657 }
3658
3659 static void tc_dma_link_intr_urb(struct urb *urb);
3660
3661 /* Handle processing of Bulk, Ctrl and Intr queues */
3662 static void tc_dma_process_queue(int epid) {
3663 struct urb *urb;
3664 struct crisv10_urb_priv *urb_priv;
3665 unsigned long flags;
3666 char toggle;
3667
3668 if(epid_state[epid].disabled) {
3669 /* Don't process any URBs on a disabled endpoint */
3670 return;
3671 }
3672
3673 /* Do not disturb us while fiddling with EPs and epids */
3674 local_irq_save(flags);
3675
3676 /* For bulk, Ctrl and Intr can we only have one URB active at a time for
3677 a specific EP. */
3678 if(activeUrbList[epid] != NULL) {
3679 /* An URB is already active on EP, skip checking queue */
3680 local_irq_restore(flags);
3681 return;
3682 }
3683
3684 urb = urb_list_first(epid);
3685 if(urb == NULL) {
3686 /* No URB waiting in EP queue. Nothing do to */
3687 local_irq_restore(flags);
3688 return;
3689 }
3690
3691 urb_priv = urb->hcpriv;
3692 ASSERT(urb_priv != NULL);
3693 ASSERT(urb_priv->urb_state == NOT_STARTED);
3694 ASSERT(!usb_pipeisoc(urb->pipe));
3695
3696 /* Remove this URB from the queue and move it to active */
3697 activeUrbList[epid] = urb;
3698 urb_list_del(urb, epid);
3699
3700 urb_priv->urb_state = STARTED;
3701
3702 /* Reset error counters (regardless of which direction this traffic is). */
3703 etrax_epid_clear_error(epid);
3704
3705 /* Special handling of Intr EP lists */
3706 if(usb_pipeint(urb->pipe)) {
3707 tc_dma_link_intr_urb(urb);
3708 local_irq_restore(flags);
3709 return;
3710 }
3711
3712 /* Software must preset the toggle bits for Bulk and Ctrl */
3713 if(usb_pipecontrol(urb->pipe)) {
3714 /* Toggle bits are initialized only during setup transaction in a
3715 CTRL transfer */
3716 etrax_epid_set_toggle(epid, 0, 0);
3717 etrax_epid_set_toggle(epid, 1, 0);
3718 } else {
3719 toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
3720 usb_pipeout(urb->pipe));
3721 etrax_epid_set_toggle(epid, usb_pipeout(urb->pipe), toggle);
3722 }
3723
3724 tc_dbg("Added SBs from (URB:0x%x %s %s) to epid %d: %s\n",
3725 (unsigned int)urb, str_dir(urb->pipe), str_type(urb->pipe), epid,
3726 sblist_to_str(urb_priv->first_sb));
3727
3728 /* We start the DMA sub channel without checking if it's running or not,
3729 because:
3730 1) If it's already running, issuing the start command is a nop.
3731 2) We avoid a test-and-set race condition. */
3732 switch(usb_pipetype(urb->pipe)) {
3733 case PIPE_BULK:
3734 /* Assert that the EP descriptor is disabled. */
3735 ASSERT(!(TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)));
3736
3737 /* Set up and enable the EP descriptor. */
3738 TxBulkEPList[epid].sub = virt_to_phys(urb_priv->first_sb);
3739 TxBulkEPList[epid].hw_len = 0;
3740 TxBulkEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
3741
3742 /* Check if the dummy list is already with us (if several urbs were queued). */
3743 if (usb_pipein(urb->pipe) && (TxBulkEPList[epid].next != virt_to_phys(&TxBulkDummyEPList[epid][0]))) {
3744 tc_dbg("Inviting dummy list to the party for urb 0x%lx, epid %d",
3745 (unsigned long)urb, epid);
3746
3747 /* We don't need to check if the DMA is at this EP or not before changing the
3748 next pointer, since we will do it in one 32-bit write (EP descriptors are
3749 32-bit aligned). */
3750 TxBulkEPList[epid].next = virt_to_phys(&TxBulkDummyEPList[epid][0]);
3751 }
3752
3753 restart_dma8_sub0();
3754
3755 /* Update/restart the bulk start timer since we just started the channel.*/
3756 mod_timer(&bulk_start_timer, jiffies + BULK_START_TIMER_INTERVAL);
3757 /* Update/restart the bulk eot timer since we just inserted traffic. */
3758 mod_timer(&bulk_eot_timer, jiffies + BULK_EOT_TIMER_INTERVAL);
3759 break;
3760 case PIPE_CONTROL:
3761 /* Assert that the EP descriptor is disabled. */
3762 ASSERT(!(TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)));
3763
3764 /* Set up and enable the EP descriptor. */
3765 TxCtrlEPList[epid].sub = virt_to_phys(urb_priv->first_sb);
3766 TxCtrlEPList[epid].hw_len = 0;
3767 TxCtrlEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
3768
3769 *R_DMA_CH8_SUB1_CMD = IO_STATE(R_DMA_CH8_SUB1_CMD, cmd, start);
3770 break;
3771 }
3772 local_irq_restore(flags);
3773 }
3774
3775 static void tc_dma_link_intr_urb(struct urb *urb) {
3776 struct crisv10_urb_priv *urb_priv = urb->hcpriv;
3777 volatile struct USB_EP_Desc *tmp_ep;
3778 struct USB_EP_Desc *ep_desc;
3779 int i = 0, epid;
3780 int pool_idx = 0;
3781
3782 ASSERT(urb_priv != NULL);
3783 epid = urb_priv->epid;
3784 ASSERT(urb_priv->interval > 0);
3785 ASSERT(urb_priv->intr_ep_pool_length > 0);
3786
3787 tmp_ep = &TxIntrEPList[0];
3788
3789 /* Only insert one EP descriptor in list for Out Intr URBs.
3790 We can only handle Out Intr with interval of 128ms because
3791 it's not possible to insert several Out Intr EPs because they
3792 are not consumed by the DMA. */
3793 if(usb_pipeout(urb->pipe)) {
3794 ep_desc = urb_priv->intr_ep_pool[0];
3795 ASSERT(ep_desc);
3796 ep_desc->next = tmp_ep->next;
3797 tmp_ep->next = virt_to_phys(ep_desc);
3798 i++;
3799 } else {
3800 /* Loop through Intr EP descriptor list and insert EP for URB at
3801 specified interval */
3802 do {
3803 /* Each EP descriptor with eof flag sat signals a new frame */
3804 if (tmp_ep->command & IO_MASK(USB_EP_command, eof)) {
3805 /* Insert a EP from URBs EP pool at correct interval */
3806 if ((i % urb_priv->interval) == 0) {
3807 ep_desc = urb_priv->intr_ep_pool[pool_idx];
3808 ASSERT(ep_desc);
3809 ep_desc->next = tmp_ep->next;
3810 tmp_ep->next = virt_to_phys(ep_desc);
3811 pool_idx++;
3812 ASSERT(pool_idx <= urb_priv->intr_ep_pool_length);
3813 }
3814 i++;
3815 }
3816 tmp_ep = (struct USB_EP_Desc *)phys_to_virt(tmp_ep->next);
3817 } while(tmp_ep != &TxIntrEPList[0]);
3818 }
3819
3820 intr_dbg("Added SBs to intr epid %d: %s interval:%d (%d EP)\n", epid,
3821 sblist_to_str(urb_priv->first_sb), urb_priv->interval, pool_idx);
3822
3823 /* We start the DMA sub channel without checking if it's running or not,
3824 because:
3825 1) If it's already running, issuing the start command is a nop.
3826 2) We avoid a test-and-set race condition. */
3827 *R_DMA_CH8_SUB2_CMD = IO_STATE(R_DMA_CH8_SUB2_CMD, cmd, start);
3828 }
3829
3830 static void tc_dma_process_isoc_urb(struct urb *urb) {
3831 unsigned long flags;
3832 struct crisv10_urb_priv *urb_priv = urb->hcpriv;
3833 int epid;
3834
3835 /* Do not disturb us while fiddling with EPs and epids */
3836 local_irq_save(flags);
3837
3838 ASSERT(urb_priv);
3839 ASSERT(urb_priv->first_sb);
3840 epid = urb_priv->epid;
3841
3842 if(activeUrbList[epid] == NULL) {
3843 /* EP is idle, so make this URB active */
3844 activeUrbList[epid] = urb;
3845 urb_list_del(urb, epid);
3846 ASSERT(TxIsocEPList[epid].sub == 0);
3847 ASSERT(!(TxIsocEPList[epid].command &
3848 IO_STATE(USB_EP_command, enable, yes)));
3849
3850 /* Differentiate between In and Out Isoc. Because In SBs are not consumed*/
3851 if(usb_pipein(urb->pipe)) {
3852 /* Each EP for In Isoc will have only one SB descriptor, setup when
3853 submitting the first active urb. We do it here by copying from URBs
3854 pre-allocated SB. */
3855 memcpy((void *)&(TxIsocSBList[epid]), urb_priv->first_sb,
3856 sizeof(TxIsocSBList[epid]));
3857 TxIsocEPList[epid].hw_len = 0;
3858 TxIsocEPList[epid].sub = virt_to_phys(&(TxIsocSBList[epid]));
3859 } else {
3860 /* For Out Isoc we attach the pre-allocated list of SBs for the URB */
3861 TxIsocEPList[epid].hw_len = 0;
3862 TxIsocEPList[epid].sub = virt_to_phys(urb_priv->first_sb);
3863
3864 isoc_dbg("Attached first URB:0x%x[%d] to epid:%d first_sb:0x%x"
3865 " last_sb::0x%x\n",
3866 (unsigned int)urb, urb_priv->urb_num, epid,
3867 (unsigned int)(urb_priv->first_sb),
3868 (unsigned int)(urb_priv->last_sb));
3869 }
3870
3871 if (urb->transfer_flags & URB_ISO_ASAP) {
3872 /* The isoc transfer should be started as soon as possible. The
3873 start_frame field is a return value if URB_ISO_ASAP was set. Comparing
3874 R_USB_FM_NUMBER with a USB Chief trace shows that the first isoc IN
3875 token is sent 2 frames later. I'm not sure how this affects usage of
3876 the start_frame field by the device driver, or how it affects things
3877 when USB_ISO_ASAP is not set, so therefore there's no compensation for
3878 the 2 frame "lag" here. */
3879 urb->start_frame = (*R_USB_FM_NUMBER & 0x7ff);
3880 TxIsocEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
3881 urb_priv->urb_state = STARTED;
3882 isoc_dbg("URB_ISO_ASAP set, urb->start_frame set to %d\n",
3883 urb->start_frame);
3884 } else {
3885 /* Not started yet. */
3886 urb_priv->urb_state = NOT_STARTED;
3887 isoc_warn("urb_priv->urb_state set to NOT_STARTED for URB:0x%x\n",
3888 (unsigned int)urb);
3889 }
3890
3891 } else {
3892 /* An URB is already active on the EP. Leave URB in queue and let
3893 finish_isoc_urb process it after current active URB */
3894 ASSERT(TxIsocEPList[epid].sub != 0);
3895
3896 if(usb_pipein(urb->pipe)) {
3897 /* Because there already is a active In URB on this epid we do nothing
3898 and the finish_isoc_urb() function will handle switching to next URB*/
3899
3900 } else { /* For Out Isoc, insert new URBs traffic last in SB-list. */
3901 struct USB_SB_Desc *temp_sb_desc;
3902
3903 /* Set state STARTED to all Out Isoc URBs added to SB list because we
3904 don't know how many of them that are finished before descr interrupt*/
3905 urb_priv->urb_state = STARTED;
3906
3907 /* Find end of current SB list by looking for SB with eol flag sat */
3908 temp_sb_desc = phys_to_virt(TxIsocEPList[epid].sub);
3909 while ((temp_sb_desc->command & IO_MASK(USB_SB_command, eol)) !=
3910 IO_STATE(USB_SB_command, eol, yes)) {
3911 ASSERT(temp_sb_desc->next);
3912 temp_sb_desc = phys_to_virt(temp_sb_desc->next);
3913 }
3914
3915 isoc_dbg("Appended URB:0x%x[%d] (first:0x%x last:0x%x) to epid:%d"
3916 " sub:0x%x eol:0x%x\n",
3917 (unsigned int)urb, urb_priv->urb_num,
3918 (unsigned int)(urb_priv->first_sb),
3919 (unsigned int)(urb_priv->last_sb), epid,
3920 (unsigned int)phys_to_virt(TxIsocEPList[epid].sub),
3921 (unsigned int)temp_sb_desc);
3922
3923 /* Next pointer must be set before eol is removed. */
3924 temp_sb_desc->next = virt_to_phys(urb_priv->first_sb);
3925 /* Clear the previous end of list flag since there is a new in the
3926 added SB descriptor list. */
3927 temp_sb_desc->command &= ~IO_MASK(USB_SB_command, eol);
3928
3929 if (!(TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable))) {
3930 __u32 epid_data;
3931 /* 8.8.5 in Designer's Reference says we should check for and correct
3932 any errors in the EP here. That should not be necessary if
3933 epid_attn is handled correctly, so we assume all is ok. */
3934 epid_data = etrax_epid_iso_get(epid);
3935 if (IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data) !=
3936 IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) {
3937 isoc_err("Disabled Isoc EP with error:%d on epid:%d when appending"
3938 " URB:0x%x[%d]\n",
3939 IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data), epid,
3940 (unsigned int)urb, urb_priv->urb_num);
3941 }
3942
3943 /* The SB list was exhausted. */
3944 if (virt_to_phys(urb_priv->last_sb) != TxIsocEPList[epid].sub) {
3945 /* The new sublist did not get processed before the EP was
3946 disabled. Setup the EP again. */
3947
3948 if(virt_to_phys(temp_sb_desc) == TxIsocEPList[epid].sub) {
3949 isoc_dbg("EP for epid:%d stoped at SB:0x%x before newly inserted"
3950 ", restarting from this URBs SB:0x%x\n",
3951 epid, (unsigned int)temp_sb_desc,
3952 (unsigned int)(urb_priv->first_sb));
3953 TxIsocEPList[epid].hw_len = 0;
3954 TxIsocEPList[epid].sub = virt_to_phys(urb_priv->first_sb);
3955 urb->start_frame = (*R_USB_FM_NUMBER & 0x7ff);
3956 /* Enable the EP again so data gets processed this time */
3957 TxIsocEPList[epid].command |=
3958 IO_STATE(USB_EP_command, enable, yes);
3959
3960 } else {
3961 /* The EP has been disabled but not at end this URB (god knows
3962 where). This should generate an epid_attn so we should not be
3963 here */
3964 isoc_warn("EP was disabled on sb:0x%x before SB list for"
3965 " URB:0x%x[%d] got processed\n",
3966 (unsigned int)phys_to_virt(TxIsocEPList[epid].sub),
3967 (unsigned int)urb, urb_priv->urb_num);
3968 }
3969 } else {
3970 /* This might happend if we are slow on this function and isn't
3971 an error. */
3972 isoc_dbg("EP was disabled and finished with SBs from appended"
3973 " URB:0x%x[%d]\n", (unsigned int)urb, urb_priv->urb_num);
3974 }
3975 }
3976 }
3977 }
3978
3979 /* Start the DMA sub channel */
3980 *R_DMA_CH8_SUB3_CMD = IO_STATE(R_DMA_CH8_SUB3_CMD, cmd, start);
3981
3982 local_irq_restore(flags);
3983 }
3984
3985 static void tc_dma_unlink_intr_urb(struct urb *urb) {
3986 struct crisv10_urb_priv *urb_priv = urb->hcpriv;
3987 volatile struct USB_EP_Desc *first_ep; /* First EP in the list. */
3988 volatile struct USB_EP_Desc *curr_ep; /* Current EP, the iterator. */
3989 volatile struct USB_EP_Desc *next_ep; /* The EP after current. */
3990 volatile struct USB_EP_Desc *unlink_ep; /* The one we should remove from
3991 the list. */
3992 int count = 0;
3993 volatile int timeout = 10000;
3994 int epid;
3995
3996 /* Read 8.8.4 in Designer's Reference, "Removing an EP Descriptor from the
3997 List". */
3998 ASSERT(urb_priv);
3999 ASSERT(urb_priv->intr_ep_pool_length > 0);
4000 epid = urb_priv->epid;
4001
4002 /* First disable all Intr EPs belonging to epid for this URB */
4003 first_ep = &TxIntrEPList[0];
4004 curr_ep = first_ep;
4005 do {
4006 next_ep = (struct USB_EP_Desc *)phys_to_virt(curr_ep->next);
4007 if (IO_EXTRACT(USB_EP_command, epid, next_ep->command) == epid) {
4008 /* Disable EP */
4009 next_ep->command &= ~IO_MASK(USB_EP_command, enable);
4010 }
4011 curr_ep = phys_to_virt(curr_ep->next);
4012 } while (curr_ep != first_ep);
4013
4014
4015 /* Now unlink all EPs belonging to this epid from Descr list */
4016 first_ep = &TxIntrEPList[0];
4017 curr_ep = first_ep;
4018 do {
4019 next_ep = (struct USB_EP_Desc *)phys_to_virt(curr_ep->next);
4020 if (IO_EXTRACT(USB_EP_command, epid, next_ep->command) == epid) {
4021 /* This is the one we should unlink. */
4022 unlink_ep = next_ep;
4023
4024 /* Actually unlink the EP from the DMA list. */
4025 curr_ep->next = unlink_ep->next;
4026
4027 /* Wait until the DMA is no longer at this descriptor. */
4028 while((*R_DMA_CH8_SUB2_EP == virt_to_phys(unlink_ep)) &&
4029 (timeout-- > 0));
4030
4031 count++;
4032 }
4033 curr_ep = phys_to_virt(curr_ep->next);
4034 } while (curr_ep != first_ep);
4035
4036 if(count != urb_priv->intr_ep_pool_length) {
4037 intr_warn("Unlinked %d of %d Intr EPs for URB:0x%x[%d]\n", count,
4038 urb_priv->intr_ep_pool_length, (unsigned int)urb,
4039 urb_priv->urb_num);
4040 } else {
4041 intr_dbg("Unlinked %d of %d interrupt EPs for URB:0x%x\n", count,
4042 urb_priv->intr_ep_pool_length, (unsigned int)urb);
4043 }
4044 }
4045
4046 static void check_finished_bulk_tx_epids(struct usb_hcd *hcd,
4047 int timer) {
4048 unsigned long flags;
4049 int epid;
4050 struct urb *urb;
4051 struct crisv10_urb_priv * urb_priv;
4052 __u32 epid_data;
4053
4054 /* Protect TxEPList */
4055 local_irq_save(flags);
4056
4057 for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
4058 /* A finished EP descriptor is disabled and has a valid sub pointer */
4059 if (!(TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) &&
4060 (TxBulkEPList[epid].sub != 0)) {
4061
4062 /* Get the active URB for this epid */
4063 urb = activeUrbList[epid];
4064 /* Sanity checks */
4065 ASSERT(urb);
4066 urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
4067 ASSERT(urb_priv);
4068
4069 /* Only handle finished out Bulk EPs here,
4070 and let RX interrupt take care of the rest */
4071 if(!epid_out_traffic(epid)) {
4072 continue;
4073 }
4074
4075 if(timer) {
4076 tc_warn("Found finished %s Bulk epid:%d URB:0x%x[%d] from timeout\n",
4077 epid_out_traffic(epid) ? "Out" : "In", epid, (unsigned int)urb,
4078 urb_priv->urb_num);
4079 } else {
4080 tc_dbg("Found finished %s Bulk epid:%d URB:0x%x[%d] from interrupt\n",
4081 epid_out_traffic(epid) ? "Out" : "In", epid, (unsigned int)urb,
4082 urb_priv->urb_num);
4083 }
4084
4085 if(urb_priv->urb_state == UNLINK) {
4086 /* This Bulk URB is requested to be unlinked, that means that the EP
4087 has been disabled and we might not have sent all data */
4088 tc_finish_urb(hcd, urb, urb->status);
4089 continue;
4090 }
4091
4092 ASSERT(urb_priv->urb_state == STARTED);
4093 if (phys_to_virt(TxBulkEPList[epid].sub) != urb_priv->last_sb) {
4094 tc_err("Endpoint got disabled before reaching last sb\n");
4095 }
4096
4097 epid_data = etrax_epid_get(epid);
4098 if (IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data) ==
4099 IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) {
4100 /* This means that the endpoint has no error, is disabled
4101 and had inserted traffic, i.e. transfer successfully completed. */
4102 tc_finish_urb(hcd, urb, 0);
4103 } else {
4104 /* Shouldn't happen. We expect errors to be caught by epid
4105 attention. */
4106 tc_err("Found disabled bulk EP desc (epid:%d error:%d)\n",
4107 epid, IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data));
4108 }
4109 } else {
4110 tc_dbg("Ignoring In Bulk epid:%d, let RX interrupt handle it\n", epid);
4111 }
4112 }
4113
4114 local_irq_restore(flags);
4115 }
4116
4117 static void check_finished_ctrl_tx_epids(struct usb_hcd *hcd) {
4118 unsigned long flags;
4119 int epid;
4120 struct urb *urb;
4121 struct crisv10_urb_priv * urb_priv;
4122 __u32 epid_data;
4123
4124 /* Protect TxEPList */
4125 local_irq_save(flags);
4126
4127 for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
4128 if(epid == DUMMY_EPID)
4129 continue;
4130
4131 /* A finished EP descriptor is disabled and has a valid sub pointer */
4132 if (!(TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) &&
4133 (TxCtrlEPList[epid].sub != 0)) {
4134
4135 /* Get the active URB for this epid */
4136 urb = activeUrbList[epid];
4137
4138 if(urb == NULL) {
4139 tc_warn("Found finished Ctrl epid:%d with no active URB\n", epid);
4140 continue;
4141 }
4142
4143 /* Sanity checks */
4144 ASSERT(usb_pipein(urb->pipe));
4145 urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
4146 ASSERT(urb_priv);
4147 if (phys_to_virt(TxCtrlEPList[epid].sub) != urb_priv->last_sb) {
4148 tc_err("Endpoint got disabled before reaching last sb\n");
4149 }
4150
4151 epid_data = etrax_epid_get(epid);
4152 if (IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data) ==
4153 IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) {
4154 /* This means that the endpoint has no error, is disabled
4155 and had inserted traffic, i.e. transfer successfully completed. */
4156
4157 /* Check if RX-interrupt for In Ctrl has been processed before
4158 finishing the URB */
4159 if(urb_priv->ctrl_rx_done) {
4160 tc_dbg("Finishing In Ctrl URB:0x%x[%d] in tx_interrupt\n",
4161 (unsigned int)urb, urb_priv->urb_num);
4162 tc_finish_urb(hcd, urb, 0);
4163 } else {
4164 /* If we get zout descriptor interrupt before RX was done for a
4165 In Ctrl transfer, then we flag that and it will be finished
4166 in the RX-Interrupt */
4167 urb_priv->ctrl_zout_done = 1;
4168 tc_dbg("Got zout descr interrupt before RX interrupt\n");
4169 }
4170 } else {
4171 /* Shouldn't happen. We expect errors to be caught by epid
4172 attention. */
4173 tc_err("Found disabled Ctrl EP desc (epid:%d URB:0x%x[%d]) error_code:%d\n", epid, (unsigned int)urb, urb_priv->urb_num, IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data));
4174 __dump_ep_desc(&(TxCtrlEPList[epid]));
4175 __dump_ept_data(epid);
4176 }
4177 }
4178 }
4179 local_irq_restore(flags);
4180 }
4181
4182 /* This function goes through all epids that are setup for Out Isoc transfers
4183 and marks (isoc_out_done) all queued URBs that the DMA has finished
4184 transfer for.
4185 No URB completetion is done here to make interrupt routine return quickly.
4186 URBs are completed later with help of complete_isoc_bottom_half() that
4187 becomes schedules when this functions is finished. */
4188 static void check_finished_isoc_tx_epids(void) {
4189 unsigned long flags;
4190 int epid;
4191 struct urb *urb;
4192 struct crisv10_urb_priv * urb_priv;
4193 struct USB_SB_Desc* sb_desc;
4194 int epid_done;
4195
4196 /* Protect TxIsocEPList */
4197 local_irq_save(flags);
4198
4199 for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
4200 if (TxIsocEPList[epid].sub == 0 || epid == INVALID_EPID ||
4201 !epid_out_traffic(epid)) {
4202 /* Nothing here to see. */
4203 continue;
4204 }
4205 ASSERT(epid_inuse(epid));
4206 ASSERT(epid_isoc(epid));
4207
4208 sb_desc = phys_to_virt(TxIsocEPList[epid].sub);
4209 /* Find the last descriptor of the currently active URB for this ep.
4210 This is the first descriptor in the sub list marked for a descriptor
4211 interrupt. */
4212 while (sb_desc && !IO_EXTRACT(USB_SB_command, intr, sb_desc->command)) {
4213 sb_desc = sb_desc->next ? phys_to_virt(sb_desc->next) : 0;
4214 }
4215 ASSERT(sb_desc);
4216
4217 isoc_dbg("Descr IRQ checking epid:%d sub:0x%x intr:0x%x\n",
4218 epid, (unsigned int)phys_to_virt(TxIsocEPList[epid].sub),
4219 (unsigned int)sb_desc);
4220
4221 urb = activeUrbList[epid];
4222 if(urb == NULL) {
4223 isoc_err("Isoc Descr irq on epid:%d with no active URB\n", epid);
4224 continue;
4225 }
4226
4227 epid_done = 0;
4228 while(urb && !epid_done) {
4229 /* Sanity check. */
4230 ASSERT(usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS);
4231 ASSERT(usb_pipeout(urb->pipe));
4232
4233 urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
4234 ASSERT(urb_priv);
4235 ASSERT(urb_priv->urb_state == STARTED ||
4236 urb_priv->urb_state == UNLINK);
4237
4238 if (sb_desc != urb_priv->last_sb) {
4239 /* This urb has been sent. */
4240 urb_priv->isoc_out_done = 1;
4241
4242 } else { /* Found URB that has last_sb as the interrupt reason */
4243
4244 /* Check if EP has been disabled, meaning that all transfers are done*/
4245 if(!(TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable))) {
4246 ASSERT((sb_desc->command & IO_MASK(USB_SB_command, eol)) ==
4247 IO_STATE(USB_SB_command, eol, yes));
4248 ASSERT(sb_desc->next == 0);
4249 urb_priv->isoc_out_done = 1;
4250 } else {
4251 isoc_dbg("Skipping URB:0x%x[%d] because EP not disabled yet\n",
4252 (unsigned int)urb, urb_priv->urb_num);
4253 }
4254 /* Stop looking any further in queue */
4255 epid_done = 1;
4256 }
4257
4258 if (!epid_done) {
4259 if(urb == activeUrbList[epid]) {
4260 urb = urb_list_first(epid);
4261 } else {
4262 urb = urb_list_next(urb, epid);
4263 }
4264 }
4265 } /* END: while(urb && !epid_done) */
4266 }
4267
4268 local_irq_restore(flags);
4269 }
4270
4271
4272 /* This is where the Out Isoc URBs are realy completed. This function is
4273 scheduled from tc_dma_tx_interrupt() when one or more Out Isoc transfers
4274 are done. This functions completes all URBs earlier marked with
4275 isoc_out_done by fast interrupt routine check_finished_isoc_tx_epids() */
4276
4277 static void complete_isoc_bottom_half(struct work_struct* work) {
4278 struct crisv10_isoc_complete_data *comp_data;
4279 struct usb_iso_packet_descriptor *packet;
4280 struct crisv10_urb_priv * urb_priv;
4281 unsigned long flags;
4282 struct urb* urb;
4283 int epid_done;
4284 int epid;
4285 int i;
4286
4287 comp_data = container_of(work, struct crisv10_isoc_complete_data, usb_bh);
4288
4289 local_irq_save(flags);
4290
4291 for (epid = 0; epid < NBR_OF_EPIDS - 1; epid++) {
4292 if(!epid_inuse(epid) || !epid_isoc(epid) || !epid_out_traffic(epid) || epid == DUMMY_EPID) {
4293 /* Only check valid Out Isoc epids */
4294 continue;
4295 }
4296
4297 isoc_dbg("Isoc bottom-half checking epid:%d, sub:0x%x\n", epid,
4298 (unsigned int)phys_to_virt(TxIsocEPList[epid].sub));
4299
4300 /* The descriptor interrupt handler has marked all transmitted Out Isoc
4301 URBs with isoc_out_done. Now we traverse all epids and for all that
4302 have out Isoc traffic we traverse its URB list and complete the
4303 transmitted URBs. */
4304 epid_done = 0;
4305 while (!epid_done) {
4306
4307 /* Get the active urb (if any) */
4308 urb = activeUrbList[epid];
4309 if (urb == 0) {
4310 isoc_dbg("No active URB on epid:%d anymore\n", epid);
4311 epid_done = 1;
4312 continue;
4313 }
4314
4315 /* Sanity check. */
4316 ASSERT(usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS);
4317 ASSERT(usb_pipeout(urb->pipe));
4318
4319 urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
4320 ASSERT(urb_priv);
4321
4322 if (!(urb_priv->isoc_out_done)) {
4323 /* We have reached URB that isn't flaged done yet, stop traversing. */
4324 isoc_dbg("Stoped traversing Out Isoc URBs on epid:%d"
4325 " before not yet flaged URB:0x%x[%d]\n",
4326 epid, (unsigned int)urb, urb_priv->urb_num);
4327 epid_done = 1;
4328 continue;
4329 }
4330
4331 /* This urb has been sent. */
4332 isoc_dbg("Found URB:0x%x[%d] that is flaged isoc_out_done\n",
4333 (unsigned int)urb, urb_priv->urb_num);
4334
4335 /* Set ok on transfered packets for this URB and finish it */
4336 for (i = 0; i < urb->number_of_packets; i++) {
4337 packet = &urb->iso_frame_desc[i];
4338 packet->status = 0;
4339 packet->actual_length = packet->length;
4340 }
4341 urb_priv->isoc_packet_counter = urb->number_of_packets;
4342 tc_finish_urb(comp_data->hcd, urb, 0);
4343
4344 } /* END: while(!epid_done) */
4345 } /* END: for(epid...) */
4346
4347 local_irq_restore(flags);
4348 kmem_cache_free(isoc_compl_cache, comp_data);
4349 }
4350
4351
4352 static void check_finished_intr_tx_epids(struct usb_hcd *hcd) {
4353 unsigned long flags;
4354 int epid;
4355 struct urb *urb;
4356 struct crisv10_urb_priv * urb_priv;
4357 volatile struct USB_EP_Desc *curr_ep; /* Current EP, the iterator. */
4358 volatile struct USB_EP_Desc *next_ep; /* The EP after current. */
4359
4360 /* Protect TxintrEPList */
4361 local_irq_save(flags);
4362
4363 for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
4364 if(!epid_inuse(epid) || !epid_intr(epid) || !epid_out_traffic(epid)) {
4365 /* Nothing to see on this epid. Only check valid Out Intr epids */
4366 continue;
4367 }
4368
4369 urb = activeUrbList[epid];
4370 if(urb == 0) {
4371 intr_warn("Found Out Intr epid:%d with no active URB\n", epid);
4372 continue;
4373 }
4374
4375 /* Sanity check. */
4376 ASSERT(usb_pipetype(urb->pipe) == PIPE_INTERRUPT);
4377 ASSERT(usb_pipeout(urb->pipe));
4378
4379 urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
4380 ASSERT(urb_priv);
4381
4382 /* Go through EPs between first and second sof-EP. It's here Out Intr EPs
4383 are inserted.*/
4384 curr_ep = &TxIntrEPList[0];
4385 do {
4386 next_ep = (struct USB_EP_Desc *)phys_to_virt(curr_ep->next);
4387 if(next_ep == urb_priv->intr_ep_pool[0]) {
4388 /* We found the Out Intr EP for this epid */
4389
4390 /* Disable it so it doesn't get processed again */
4391 next_ep->command &= ~IO_MASK(USB_EP_command, enable);
4392
4393 /* Finish the active Out Intr URB with status OK */
4394 tc_finish_urb(hcd, urb, 0);
4395 }
4396 curr_ep = phys_to_virt(curr_ep->next);
4397 } while (curr_ep != &TxIntrEPList[1]);
4398
4399 }
4400 local_irq_restore(flags);
4401 }
4402
4403 /* Interrupt handler for DMA8/IRQ24 with subchannels (called from hardware intr) */
4404 static irqreturn_t tc_dma_tx_interrupt(int irq, void *vhc) {
4405 struct usb_hcd *hcd = (struct usb_hcd*)vhc;
4406 ASSERT(hcd);
4407
4408 if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub0_descr)) {
4409 /* Clear this interrupt */
4410 *R_DMA_CH8_SUB0_CLR_INTR = IO_STATE(R_DMA_CH8_SUB0_CLR_INTR, clr_descr, do);
4411 restart_dma8_sub0();
4412 }
4413
4414 if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub1_descr)) {
4415 /* Clear this interrupt */
4416 *R_DMA_CH8_SUB1_CLR_INTR = IO_STATE(R_DMA_CH8_SUB1_CLR_INTR, clr_descr, do);
4417 check_finished_ctrl_tx_epids(hcd);
4418 }
4419
4420 if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub2_descr)) {
4421 /* Clear this interrupt */
4422 *R_DMA_CH8_SUB2_CLR_INTR = IO_STATE(R_DMA_CH8_SUB2_CLR_INTR, clr_descr, do);
4423 check_finished_intr_tx_epids(hcd);
4424 }
4425
4426 if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub3_descr)) {
4427 struct crisv10_isoc_complete_data* comp_data;
4428
4429 /* Flag done Out Isoc for later completion */
4430 check_finished_isoc_tx_epids();
4431
4432 /* Clear this interrupt */
4433 *R_DMA_CH8_SUB3_CLR_INTR = IO_STATE(R_DMA_CH8_SUB3_CLR_INTR, clr_descr, do);
4434 /* Schedule bottom half of Out Isoc completion function. This function
4435 finishes the URBs marked with isoc_out_done */
4436 comp_data = (struct crisv10_isoc_complete_data*)
4437 kmem_cache_alloc(isoc_compl_cache, GFP_ATOMIC);
4438 ASSERT(comp_data != NULL);
4439 comp_data ->hcd = hcd;
4440
4441 INIT_WORK(&comp_data->usb_bh, complete_isoc_bottom_half);
4442 schedule_work(&comp_data->usb_bh);
4443 }
4444
4445 return IRQ_HANDLED;
4446 }
4447
4448 /* Interrupt handler for DMA9/IRQ25 (called from hardware intr) */
4449 static irqreturn_t tc_dma_rx_interrupt(int irq, void *vhc) {
4450 unsigned long flags;
4451 struct urb *urb;
4452 struct usb_hcd *hcd = (struct usb_hcd*)vhc;
4453 struct crisv10_urb_priv *urb_priv;
4454 int epid = 0;
4455 int real_error;
4456
4457 ASSERT(hcd);
4458
4459 /* Clear this interrupt. */
4460 *R_DMA_CH9_CLR_INTR = IO_STATE(R_DMA_CH9_CLR_INTR, clr_eop, do);
4461
4462 /* Custom clear interrupt for this interrupt */
4463 /* The reason we cli here is that we call the driver's callback functions. */
4464 local_irq_save(flags);
4465
4466 /* Note that this while loop assumes that all packets span only
4467 one rx descriptor. */
4468 while(myNextRxDesc->status & IO_MASK(USB_IN_status, eop)) {
4469 epid = IO_EXTRACT(USB_IN_status, epid, myNextRxDesc->status);
4470 /* Get the active URB for this epid */
4471 urb = activeUrbList[epid];
4472
4473 ASSERT(epid_inuse(epid));
4474 if (!urb) {
4475 dma_err("No urb for epid %d in rx interrupt\n", epid);
4476 goto skip_out;
4477 }
4478
4479 /* Check if any errors on epid */
4480 real_error = 0;
4481 if (myNextRxDesc->status & IO_MASK(USB_IN_status, error)) {
4482 __u32 r_usb_ept_data;
4483
4484 if (usb_pipeisoc(urb->pipe)) {
4485 r_usb_ept_data = etrax_epid_iso_get(epid);
4486 if((r_usb_ept_data & IO_MASK(R_USB_EPT_DATA_ISO, valid)) &&
4487 (IO_EXTRACT(R_USB_EPT_DATA_ISO, error_code, r_usb_ept_data) == 0) &&
4488 (myNextRxDesc->status & IO_MASK(USB_IN_status, nodata))) {
4489 /* Not an error, just a failure to receive an expected iso
4490 in packet in this frame. This is not documented
4491 in the designers reference. Continue processing.
4492 */
4493 } else real_error = 1;
4494 } else real_error = 1;
4495 }
4496
4497 if(real_error) {
4498 dma_err("Error in RX descr on epid:%d for URB 0x%x",
4499 epid, (unsigned int)urb);
4500 dump_ept_data(epid);
4501 dump_in_desc(myNextRxDesc);
4502 goto skip_out;
4503 }
4504
4505 urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
4506 ASSERT(urb_priv);
4507 ASSERT(urb_priv->urb_state == STARTED ||
4508 urb_priv->urb_state == UNLINK);
4509
4510 if ((usb_pipetype(urb->pipe) == PIPE_BULK) ||
4511 (usb_pipetype(urb->pipe) == PIPE_CONTROL) ||
4512 (usb_pipetype(urb->pipe) == PIPE_INTERRUPT)) {
4513
4514 /* We get nodata for empty data transactions, and the rx descriptor's
4515 hw_len field is not valid in that case. No data to copy in other
4516 words. */
4517 if (myNextRxDesc->status & IO_MASK(USB_IN_status, nodata)) {
4518 /* No data to copy */
4519 } else {
4520 /*
4521 dma_dbg("Processing RX for URB:0x%x epid:%d (data:%d ofs:%d)\n",
4522 (unsigned int)urb, epid, myNextRxDesc->hw_len,
4523 urb_priv->rx_offset);
4524 */
4525 /* Only copy data if URB isn't flaged to be unlinked*/
4526 if(urb_priv->urb_state != UNLINK) {
4527 /* Make sure the data fits in the buffer. */
4528 if(urb_priv->rx_offset + myNextRxDesc->hw_len
4529 <= urb->transfer_buffer_length) {
4530
4531 /* Copy the data to URBs buffer */
4532 memcpy(urb->transfer_buffer + urb_priv->rx_offset,
4533 phys_to_virt(myNextRxDesc->buf), myNextRxDesc->hw_len);
4534 urb_priv->rx_offset += myNextRxDesc->hw_len;
4535 } else {
4536 /* Signal overflow when returning URB */
4537 urb->status = -EOVERFLOW;
4538 tc_finish_urb_later(hcd, urb, urb->status);
4539 }
4540 }
4541 }
4542
4543 /* Check if it was the last packet in the transfer */
4544 if (myNextRxDesc->status & IO_MASK(USB_IN_status, eot)) {
4545 /* Special handling for In Ctrl URBs. */
4546 if(usb_pipecontrol(urb->pipe) && usb_pipein(urb->pipe) &&
4547 !(urb_priv->ctrl_zout_done)) {
4548 /* Flag that RX part of Ctrl transfer is done. Because zout descr
4549 interrupt hasn't happend yet will the URB be finished in the
4550 TX-Interrupt. */
4551 urb_priv->ctrl_rx_done = 1;
4552 tc_dbg("Not finishing In Ctrl URB:0x%x from rx_interrupt, waiting"
4553 " for zout\n", (unsigned int)urb);
4554 } else {
4555 tc_finish_urb(hcd, urb, 0);
4556 }
4557 }
4558 } else { /* ISOC RX */
4559 /*
4560 isoc_dbg("Processing RX for epid:%d (URB:0x%x) ISOC pipe\n",
4561 epid, (unsigned int)urb);
4562 */
4563
4564 struct usb_iso_packet_descriptor *packet;
4565
4566 if (urb_priv->urb_state == UNLINK) {
4567 isoc_warn("Ignoring Isoc Rx data for urb being unlinked.\n");
4568 goto skip_out;
4569 } else if (urb_priv->urb_state == NOT_STARTED) {
4570 isoc_err("What? Got Rx data for Isoc urb that isn't started?\n");
4571 goto skip_out;
4572 }
4573
4574 packet = &urb->iso_frame_desc[urb_priv->isoc_packet_counter];
4575 ASSERT(packet);
4576 packet->status = 0;
4577
4578 if (myNextRxDesc->status & IO_MASK(USB_IN_status, nodata)) {
4579 /* We get nodata for empty data transactions, and the rx descriptor's
4580 hw_len field is not valid in that case. We copy 0 bytes however to
4581 stay in synch. */
4582 packet->actual_length = 0;
4583 } else {
4584 packet->actual_length = myNextRxDesc->hw_len;
4585 /* Make sure the data fits in the buffer. */
4586 ASSERT(packet->actual_length <= packet->length);
4587 memcpy(urb->transfer_buffer + packet->offset,
4588 phys_to_virt(myNextRxDesc->buf), packet->actual_length);
4589 if(packet->actual_length > 0)
4590 isoc_dbg("Copied %d bytes, packet %d for URB:0x%x[%d]\n",
4591 packet->actual_length, urb_priv->isoc_packet_counter,
4592 (unsigned int)urb, urb_priv->urb_num);
4593 }
4594
4595 /* Increment the packet counter. */
4596 urb_priv->isoc_packet_counter++;
4597
4598 /* Note that we don't care about the eot field in the rx descriptor's
4599 status. It will always be set for isoc traffic. */
4600 if (urb->number_of_packets == urb_priv->isoc_packet_counter) {
4601 /* Complete the urb with status OK. */
4602 tc_finish_urb(hcd, urb, 0);
4603 }
4604 }
4605
4606 skip_out:
4607 myNextRxDesc->status = 0;
4608 myNextRxDesc->command |= IO_MASK(USB_IN_command, eol);
4609 myLastRxDesc->command &= ~IO_MASK(USB_IN_command, eol);
4610 myLastRxDesc = myNextRxDesc;
4611 myNextRxDesc = phys_to_virt(myNextRxDesc->next);
4612 flush_etrax_cache();
4613 *R_DMA_CH9_CMD = IO_STATE(R_DMA_CH9_CMD, cmd, restart);
4614 }
4615
4616 local_irq_restore(flags);
4617
4618 return IRQ_HANDLED;
4619 }
4620
4621 static void tc_bulk_start_timer_func(unsigned long dummy) {
4622 /* We might enable an EP descriptor behind the current DMA position when
4623 it's about to decide that there are no more bulk traffic and it should
4624 stop the bulk channel.
4625 Therefore we periodically check if the bulk channel is stopped and there
4626 is an enabled bulk EP descriptor, in which case we start the bulk
4627 channel. */
4628
4629 if (!(*R_DMA_CH8_SUB0_CMD & IO_MASK(R_DMA_CH8_SUB0_CMD, cmd))) {
4630 int epid;
4631
4632 timer_dbg("bulk_start_timer: Bulk DMA channel not running.\n");
4633
4634 for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
4635 if (TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
4636 timer_warn("Found enabled EP for epid %d, starting bulk channel.\n",
4637 epid);
4638 restart_dma8_sub0();
4639
4640 /* Restart the bulk eot timer since we just started the bulk channel.*/
4641 mod_timer(&bulk_eot_timer, jiffies + BULK_EOT_TIMER_INTERVAL);
4642
4643 /* No need to search any further. */
4644 break;
4645 }
4646 }
4647 } else {
4648 timer_dbg("bulk_start_timer: Bulk DMA channel running.\n");
4649 }
4650 }
4651
4652 static void tc_bulk_eot_timer_func(unsigned long dummy) {
4653 struct usb_hcd *hcd = (struct usb_hcd*)dummy;
4654 ASSERT(hcd);
4655 /* Because of a race condition in the top half, we might miss a bulk eot.
4656 This timer "simulates" a bulk eot if we don't get one for a while,
4657 hopefully correcting the situation. */
4658 timer_dbg("bulk_eot_timer timed out.\n");
4659 check_finished_bulk_tx_epids(hcd, 1);
4660 }
4661
4662
4663 /*************************************************************/
4664 /*************************************************************/
4665 /* Device driver block */
4666 /*************************************************************/
4667 /*************************************************************/
4668
4669 /* Forward declarations for device driver functions */
4670 static int devdrv_hcd_probe(struct device *);
4671 static int devdrv_hcd_remove(struct device *);
4672 #ifdef CONFIG_PM
4673 static int devdrv_hcd_suspend(struct device *, u32, u32);
4674 static int devdrv_hcd_resume(struct device *, u32);
4675 #endif /* CONFIG_PM */
4676
4677 /* the device */
4678 static struct platform_device *devdrv_hc_platform_device;
4679
4680 /* device driver interface */
4681 static struct device_driver devdrv_hc_device_driver = {
4682 .name = (char *) hc_name,
4683 .bus = &platform_bus_type,
4684
4685 .probe = devdrv_hcd_probe,
4686 .remove = devdrv_hcd_remove,
4687
4688 #ifdef CONFIG_PM
4689 .suspend = devdrv_hcd_suspend,
4690 .resume = devdrv_hcd_resume,
4691 #endif /* CONFIG_PM */
4692 };
4693
4694 /* initialize the host controller and driver */
4695 static int __init_or_module devdrv_hcd_probe(struct device *dev)
4696 {
4697 struct usb_hcd *hcd;
4698 struct crisv10_hcd *crisv10_hcd;
4699 int retval;
4700
4701 /* Check DMA burst length */
4702 if(IO_EXTRACT(R_BUS_CONFIG, dma_burst, *R_BUS_CONFIG) !=
4703 IO_STATE(R_BUS_CONFIG, dma_burst, burst32)) {
4704 devdrv_err("Invalid DMA burst length in Etrax 100LX,"
4705 " needs to be 32\n");
4706 return -EPERM;
4707 }
4708
4709 //XXX: dev->usb_id don't exist, using "" instread? - claudio
4710 hcd = usb_create_hcd(&crisv10_hc_driver, dev, "");
4711 if (!hcd)
4712 return -ENOMEM;
4713
4714 crisv10_hcd = hcd_to_crisv10_hcd(hcd);
4715 spin_lock_init(&crisv10_hcd->lock);
4716 crisv10_hcd->num_ports = num_ports();
4717 crisv10_hcd->running = 0;
4718
4719 dev_set_drvdata(dev, crisv10_hcd);
4720
4721 devdrv_dbg("ETRAX USB IRQs HC:%d RX:%d TX:%d\n", ETRAX_USB_HC_IRQ,
4722 ETRAX_USB_RX_IRQ, ETRAX_USB_TX_IRQ);
4723
4724 /* Print out chip version read from registers */
4725 int rev_maj = *R_USB_REVISION & IO_MASK(R_USB_REVISION, major);
4726 int rev_min = *R_USB_REVISION & IO_MASK(R_USB_REVISION, minor);
4727 if(rev_min == 0) {
4728 devdrv_info("Etrax 100LX USB Revision %d v1,2\n", rev_maj);
4729 } else {
4730 devdrv_info("Etrax 100LX USB Revision %d v%d\n", rev_maj, rev_min);
4731 }
4732
4733 devdrv_info("Bulk timer interval, start:%d eot:%d\n",
4734 BULK_START_TIMER_INTERVAL,
4735 BULK_EOT_TIMER_INTERVAL);
4736
4737
4738 /* Init root hub data structures */
4739 if(rh_init()) {
4740 devdrv_err("Failed init data for Root Hub\n");
4741 retval = -ENOMEM;
4742 }
4743
4744 if(port_in_use(0)) {
4745 if (cris_request_io_interface(if_usb_1, "ETRAX100LX USB-HCD")) {
4746 printk(KERN_CRIT "usb-host: request IO interface usb1 failed");
4747 retval = -EBUSY;
4748 goto out;
4749 }
4750 devdrv_info("Claimed interface for USB physical port 1\n");
4751 }
4752 if(port_in_use(1)) {
4753 if (cris_request_io_interface(if_usb_2, "ETRAX100LX USB-HCD")) {
4754 /* Free first interface if second failed to be claimed */
4755 if(port_in_use(0)) {
4756 cris_free_io_interface(if_usb_1);
4757 }
4758 printk(KERN_CRIT "usb-host: request IO interface usb2 failed");
4759 retval = -EBUSY;
4760 goto out;
4761 }
4762 devdrv_info("Claimed interface for USB physical port 2\n");
4763 }
4764
4765 /* Init transfer controller structs and locks */
4766 if((retval = tc_init(hcd)) != 0) {
4767 goto out;
4768 }
4769
4770 /* Attach interrupt functions for DMA and init DMA controller */
4771 if((retval = tc_dma_init(hcd)) != 0) {
4772 goto out;
4773 }
4774
4775 /* Attach the top IRQ handler for USB controller interrupts */
4776 if (request_irq(ETRAX_USB_HC_IRQ, crisv10_hcd_top_irq, 0,
4777 "ETRAX 100LX built-in USB (HC)", hcd)) {
4778 err("Could not allocate IRQ %d for USB", ETRAX_USB_HC_IRQ);
4779 retval = -EBUSY;
4780 goto out;
4781 }
4782
4783 /* iso_eof is only enabled when isoc traffic is running. */
4784 *R_USB_IRQ_MASK_SET =
4785 /* IO_STATE(R_USB_IRQ_MASK_SET, iso_eof, set) | */
4786 IO_STATE(R_USB_IRQ_MASK_SET, bulk_eot, set) |
4787 IO_STATE(R_USB_IRQ_MASK_SET, epid_attn, set) |
4788 IO_STATE(R_USB_IRQ_MASK_SET, port_status, set) |
4789 IO_STATE(R_USB_IRQ_MASK_SET, ctl_status, set);
4790
4791
4792 crisv10_ready_wait();
4793 /* Reset the USB interface. */
4794 *R_USB_COMMAND =
4795 IO_STATE(R_USB_COMMAND, port_sel, nop) |
4796 IO_STATE(R_USB_COMMAND, port_cmd, reset) |
4797 IO_STATE(R_USB_COMMAND, ctrl_cmd, reset);
4798
4799 /* Designer's Reference, p. 8 - 10 says we should Initate R_USB_FM_PSTART to
4800 0x2A30 (10800), to guarantee that control traffic gets 10% of the
4801 bandwidth, and periodic transfer may allocate the rest (90%).
4802 This doesn't work though.
4803 The value 11960 is chosen to be just after the SOF token, with a couple
4804 of bit times extra for possible bit stuffing. */
4805 *R_USB_FM_PSTART = IO_FIELD(R_USB_FM_PSTART, value, 11960);
4806
4807 crisv10_ready_wait();
4808 /* Configure the USB interface as a host controller. */
4809 *R_USB_COMMAND =
4810 IO_STATE(R_USB_COMMAND, port_sel, nop) |
4811 IO_STATE(R_USB_COMMAND, port_cmd, reset) |
4812 IO_STATE(R_USB_COMMAND, ctrl_cmd, host_config);
4813
4814
4815 /* Check so controller not busy before enabling ports */
4816 crisv10_ready_wait();
4817
4818 /* Enable selected USB ports */
4819 if(port_in_use(0)) {
4820 *R_USB_PORT1_DISABLE = IO_STATE(R_USB_PORT1_DISABLE, disable, no);
4821 } else {
4822 *R_USB_PORT1_DISABLE = IO_STATE(R_USB_PORT1_DISABLE, disable, yes);
4823 }
4824 if(port_in_use(1)) {
4825 *R_USB_PORT2_DISABLE = IO_STATE(R_USB_PORT2_DISABLE, disable, no);
4826 } else {
4827 *R_USB_PORT2_DISABLE = IO_STATE(R_USB_PORT2_DISABLE, disable, yes);
4828 }
4829
4830 crisv10_ready_wait();
4831 /* Start processing of USB traffic. */
4832 *R_USB_COMMAND =
4833 IO_STATE(R_USB_COMMAND, port_sel, nop) |
4834 IO_STATE(R_USB_COMMAND, port_cmd, reset) |
4835 IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run);
4836
4837 /* Do not continue probing initialization before USB interface is done */
4838 crisv10_ready_wait();
4839
4840 /* Register our Host Controller to USB Core
4841 * Finish the remaining parts of generic HCD initialization: allocate the
4842 * buffers of consistent memory, register the bus
4843 * and call the driver's reset() and start() routines. */
4844 retval = usb_add_hcd(hcd, ETRAX_USB_HC_IRQ, IRQF_DISABLED);
4845 if (retval != 0) {
4846 devdrv_err("Failed registering HCD driver\n");
4847 goto out;
4848 }
4849
4850 return 0;
4851
4852 out:
4853 devdrv_hcd_remove(dev);
4854 return retval;
4855 }
4856
4857
4858 /* cleanup after the host controller and driver */
4859 static int __init_or_module devdrv_hcd_remove(struct device *dev)
4860 {
4861 struct crisv10_hcd *crisv10_hcd = dev_get_drvdata(dev);
4862 struct usb_hcd *hcd;
4863
4864 if (!crisv10_hcd)
4865 return 0;
4866 hcd = crisv10_hcd_to_hcd(crisv10_hcd);
4867
4868
4869 /* Stop USB Controller in Etrax 100LX */
4870 crisv10_hcd_reset(hcd);
4871
4872 usb_remove_hcd(hcd);
4873 devdrv_dbg("Removed HCD from USB Core\n");
4874
4875 /* Free USB Controller IRQ */
4876 free_irq(ETRAX_USB_HC_IRQ, NULL);
4877
4878 /* Free resources */
4879 tc_dma_destroy();
4880 tc_destroy();
4881
4882
4883 if(port_in_use(0)) {
4884 cris_free_io_interface(if_usb_1);
4885 }
4886 if(port_in_use(1)) {
4887 cris_free_io_interface(if_usb_2);
4888 }
4889
4890 devdrv_dbg("Freed all claimed resources\n");
4891
4892 return 0;
4893 }
4894
4895
4896 #ifdef CONFIG_PM
4897
4898 static int devdrv_hcd_suspend(struct usb_hcd *hcd, u32 state, u32 level)
4899 {
4900 return 0; /* no-op for now */
4901 }
4902
4903 static int devdrv_hcd_resume(struct usb_hcd *hcd, u32 level)
4904 {
4905 return 0; /* no-op for now */
4906 }
4907
4908 #endif /* CONFIG_PM */
4909
4910
4911 /*************************************************************/
4912 /*************************************************************/
4913 /* Module block */
4914 /*************************************************************/
4915 /*************************************************************/
4916
4917 /* register driver */
4918 static int __init module_hcd_init(void)
4919 {
4920
4921 if (usb_disabled())
4922 return -ENODEV;
4923
4924 /* Here we select enabled ports by following defines created from
4925 menuconfig */
4926 #ifndef CONFIG_ETRAX_USB_HOST_PORT1
4927 ports &= ~(1<<0);
4928 #endif
4929 #ifndef CONFIG_ETRAX_USB_HOST_PORT2
4930 ports &= ~(1<<1);
4931 #endif
4932
4933 printk(KERN_INFO "%s version "VERSION" "COPYRIGHT"\n", product_desc);
4934
4935 devdrv_hc_platform_device =
4936 platform_device_register_simple((char *) hc_name, 0, NULL, 0);
4937
4938 if (IS_ERR(devdrv_hc_platform_device))
4939 return PTR_ERR(devdrv_hc_platform_device);
4940 return driver_register(&devdrv_hc_device_driver);
4941 /*
4942 * Note that we do not set the DMA mask for the device,
4943 * i.e. we pretend that we will use PIO, since no specific
4944 * allocation routines are needed for DMA buffers. This will
4945 * cause the HCD buffer allocation routines to fall back to
4946 * kmalloc().
4947 */
4948 }
4949
4950 /* unregister driver */
4951 static void __exit module_hcd_exit(void)
4952 {
4953 driver_unregister(&devdrv_hc_device_driver);
4954 }
4955
4956
4957 /* Module hooks */
4958 module_init(module_hcd_init);
4959 module_exit(module_hcd_exit);
4960 /*
4961 *
4962 * ETRAX 100LX USB Host Controller Driver
4963 *
4964 * Copyright (C) 2005 - 2008 Axis Communications AB
4965 *
4966 * Author: Konrad Eriksson <konrad.eriksson@axis.se>
4967 *
4968 */
4969
4970 #include <linux/module.h>
4971 #include <linux/kernel.h>
4972 #include <linux/init.h>
4973 #include <linux/moduleparam.h>
4974 #include <linux/spinlock.h>
4975 #include <linux/usb.h>
4976 #include <linux/platform_device.h>
4977
4978 #include <asm/io.h>
4979 #include <asm/irq.h>
4980 #include <asm/arch/dma.h>
4981 #include <asm/arch/io_interface_mux.h>
4982
4983 #include "../core/hcd.h"
4984 #include "../core/hub.h"
4985 #include "hc-crisv10.h"
4986 #include "hc-cris-dbg.h"
4987
4988
4989 /***************************************************************************/
4990 /***************************************************************************/
4991 /* Host Controller settings */
4992 /***************************************************************************/
4993 /***************************************************************************/
4994
4995 #define VERSION "1.00-openwrt_diff"
4996 #define COPYRIGHT "(c) 2005, 2006 Axis Communications AB"
4997 #define DESCRIPTION "ETRAX 100LX USB Host Controller"
4998
4999 #define ETRAX_USB_HC_IRQ USB_HC_IRQ_NBR
5000 #define ETRAX_USB_RX_IRQ USB_DMA_RX_IRQ_NBR
5001 #define ETRAX_USB_TX_IRQ USB_DMA_TX_IRQ_NBR
5002
5003 /* Number of physical ports in Etrax 100LX */
5004 #define USB_ROOT_HUB_PORTS 2
5005
5006 const char hc_name[] = "hc-crisv10";
5007 const char product_desc[] = DESCRIPTION;
5008
5009 /* The number of epids is, among other things, used for pre-allocating
5010 ctrl, bulk and isoc EP descriptors (one for each epid).
5011 Assumed to be > 1 when initiating the DMA lists. */
5012 #define NBR_OF_EPIDS 32
5013
5014 /* Support interrupt traffic intervals up to 128 ms. */
5015 #define MAX_INTR_INTERVAL 128
5016
5017 /* If periodic traffic (intr or isoc) is to be used, then one entry in the EP
5018 table must be "invalid". By this we mean that we shouldn't care about epid
5019 attentions for this epid, or at least handle them differently from epid
5020 attentions for "valid" epids. This define determines which one to use
5021 (don't change it). */
5022 #define INVALID_EPID 31
5023 /* A special epid for the bulk dummys. */
5024 #define DUMMY_EPID 30
5025
5026 /* Module settings */
5027
5028 MODULE_DESCRIPTION(DESCRIPTION);
5029 MODULE_LICENSE("GPL");
5030 MODULE_AUTHOR("Konrad Eriksson <konrad.eriksson@axis.se>");
5031
5032
5033 /* Module parameters */
5034
5035 /* 0 = No ports enabled
5036 1 = Only port 1 enabled (on board ethernet on devboard)
5037 2 = Only port 2 enabled (external connector on devboard)
5038 3 = Both ports enabled
5039 */
5040 static unsigned int ports = 3;
5041 module_param(ports, uint, S_IRUGO);
5042 MODULE_PARM_DESC(ports, "Bitmask indicating USB ports to use");
5043
5044
5045 /***************************************************************************/
5046 /***************************************************************************/
5047 /* Shared global variables for this module */
5048 /***************************************************************************/
5049 /***************************************************************************/
5050
5051 /* EP descriptor lists for non period transfers. Must be 32-bit aligned. */
5052 static volatile struct USB_EP_Desc TxBulkEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
5053
5054 static volatile struct USB_EP_Desc TxCtrlEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
5055
5056 /* EP descriptor lists for period transfers. Must be 32-bit aligned. */
5057 static volatile struct USB_EP_Desc TxIntrEPList[MAX_INTR_INTERVAL] __attribute__ ((aligned (4)));
5058 static volatile struct USB_SB_Desc TxIntrSB_zout __attribute__ ((aligned (4)));
5059
5060 static volatile struct USB_EP_Desc TxIsocEPList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
5061 static volatile struct USB_SB_Desc TxIsocSB_zout __attribute__ ((aligned (4)));
5062
5063 static volatile struct USB_SB_Desc TxIsocSBList[NBR_OF_EPIDS] __attribute__ ((aligned (4)));
5064
5065 /* After each enabled bulk EP IN we put two disabled EP descriptors with the eol flag set,
5066 causing the DMA to stop the DMA channel. The first of these two has the intr flag set, which
5067 gives us a dma8_sub0_descr interrupt. When we receive this, we advance the DMA one step in the
5068 EP list and then restart the bulk channel, thus forcing a switch between bulk EP descriptors
5069 in each frame. */
5070 static volatile struct USB_EP_Desc TxBulkDummyEPList[NBR_OF_EPIDS][2] __attribute__ ((aligned (4)));
5071
5072 /* List of URB pointers, where each points to the active URB for a epid.
5073 For Bulk, Ctrl and Intr this means which URB that currently is added to
5074 DMA lists (Isoc URBs are all directly added to DMA lists). As soon as
5075 URB has completed is the queue examined and the first URB in queue is
5076 removed and moved to the activeUrbList while its state change to STARTED and
5077 its transfer(s) gets added to DMA list (exception Isoc where URBs enter
5078 state STARTED directly and added transfers added to DMA lists). */
5079 static struct urb *activeUrbList[NBR_OF_EPIDS];
5080
5081 /* Additional software state info for each epid */
5082 static struct etrax_epid epid_state[NBR_OF_EPIDS];
5083
5084 /* Timer handles for bulk traffic timer used to avoid DMA bug where DMA stops
5085 even if there is new data waiting to be processed */
5086 static struct timer_list bulk_start_timer = TIMER_INITIALIZER(NULL, 0, 0);
5087 static struct timer_list bulk_eot_timer = TIMER_INITIALIZER(NULL, 0, 0);
5088
5089 /* We want the start timer to expire before the eot timer, because the former
5090 might start traffic, thus making it unnecessary for the latter to time
5091 out. */
5092 #define BULK_START_TIMER_INTERVAL (HZ/50) /* 20 ms */
5093 #define BULK_EOT_TIMER_INTERVAL (HZ/16) /* 60 ms */
5094
5095 /* Delay before a URB completion happen when it's scheduled to be delayed */
5096 #define LATER_TIMER_DELAY (HZ/50) /* 20 ms */
5097
5098 /* Simplifying macros for checking software state info of a epid */
5099 /* ----------------------------------------------------------------------- */
5100 #define epid_inuse(epid) epid_state[epid].inuse
5101 #define epid_out_traffic(epid) epid_state[epid].out_traffic
5102 #define epid_isoc(epid) (epid_state[epid].type == PIPE_ISOCHRONOUS ? 1 : 0)
5103 #define epid_intr(epid) (epid_state[epid].type == PIPE_INTERRUPT ? 1 : 0)
5104
5105
5106 /***************************************************************************/
5107 /***************************************************************************/
5108 /* DEBUG FUNCTIONS */
5109 /***************************************************************************/
5110 /***************************************************************************/
5111 /* Note that these functions are always available in their "__" variants,
5112 for use in error situations. The "__" missing variants are controlled by
5113 the USB_DEBUG_DESC/USB_DEBUG_URB macros. */
5114 static void __dump_urb(struct urb* purb)
5115 {
5116 struct crisv10_urb_priv *urb_priv = purb->hcpriv;
5117 int urb_num = -1;
5118 if(urb_priv) {
5119 urb_num = urb_priv->urb_num;
5120 }
5121 printk("\nURB:0x%x[%d]\n", (unsigned int)purb, urb_num);
5122 printk("dev :0x%08lx\n", (unsigned long)purb->dev);
5123 printk("pipe :0x%08x\n", purb->pipe);
5124 printk("status :%d\n", purb->status);
5125 printk("transfer_flags :0x%08x\n", purb->transfer_flags);
5126 printk("transfer_buffer :0x%08lx\n", (unsigned long)purb->transfer_buffer);
5127 printk("transfer_buffer_length:%d\n", purb->transfer_buffer_length);
5128 printk("actual_length :%d\n", purb->actual_length);
5129 printk("setup_packet :0x%08lx\n", (unsigned long)purb->setup_packet);
5130 printk("start_frame :%d\n", purb->start_frame);
5131 printk("number_of_packets :%d\n", purb->number_of_packets);
5132 printk("interval :%d\n", purb->interval);
5133 printk("error_count :%d\n", purb->error_count);
5134 printk("context :0x%08lx\n", (unsigned long)purb->context);
5135 printk("complete :0x%08lx\n\n", (unsigned long)purb->complete);
5136 }
5137
5138 static void __dump_in_desc(volatile struct USB_IN_Desc *in)
5139 {
5140 printk("\nUSB_IN_Desc at 0x%08lx\n", (unsigned long)in);
5141 printk(" sw_len : 0x%04x (%d)\n", in->sw_len, in->sw_len);
5142 printk(" command : 0x%04x\n", in->command);
5143 printk(" next : 0x%08lx\n", in->next);
5144 printk(" buf : 0x%08lx\n", in->buf);
5145 printk(" hw_len : 0x%04x (%d)\n", in->hw_len, in->hw_len);
5146 printk(" status : 0x%04x\n\n", in->status);
5147 }
5148
5149 static void __dump_sb_desc(volatile struct USB_SB_Desc *sb)
5150 {
5151 char tt = (sb->command & 0x30) >> 4;
5152 char *tt_string;
5153
5154 switch (tt) {
5155 case 0:
5156 tt_string = "zout";
5157 break;
5158 case 1:
5159 tt_string = "in";
5160 break;
5161 case 2:
5162 tt_string = "out";
5163 break;
5164 case 3:
5165 tt_string = "setup";
5166 break;
5167 default:
5168 tt_string = "unknown (weird)";
5169 }
5170
5171 printk(" USB_SB_Desc at 0x%08lx ", (unsigned long)sb);
5172 printk(" command:0x%04x (", sb->command);
5173 printk("rem:%d ", (sb->command & 0x3f00) >> 8);
5174 printk("full:%d ", (sb->command & 0x40) >> 6);
5175 printk("tt:%d(%s) ", tt, tt_string);
5176 printk("intr:%d ", (sb->command & 0x8) >> 3);
5177 printk("eot:%d ", (sb->command & 0x2) >> 1);
5178 printk("eol:%d)", sb->command & 0x1);
5179 printk(" sw_len:0x%04x(%d)", sb->sw_len, sb->sw_len);
5180 printk(" next:0x%08lx", sb->next);
5181 printk(" buf:0x%08lx\n", sb->buf);
5182 }
5183
5184
5185 static void __dump_ep_desc(volatile struct USB_EP_Desc *ep)
5186 {
5187 printk("USB_EP_Desc at 0x%08lx ", (unsigned long)ep);
5188 printk(" command:0x%04x (", ep->command);
5189 printk("ep_id:%d ", (ep->command & 0x1f00) >> 8);
5190 printk("enable:%d ", (ep->command & 0x10) >> 4);
5191 printk("intr:%d ", (ep->command & 0x8) >> 3);
5192 printk("eof:%d ", (ep->command & 0x2) >> 1);
5193 printk("eol:%d)", ep->command & 0x1);
5194 printk(" hw_len:0x%04x(%d)", ep->hw_len, ep->hw_len);
5195 printk(" next:0x%08lx", ep->next);
5196 printk(" sub:0x%08lx\n", ep->sub);
5197 }
5198
5199 static inline void __dump_ep_list(int pipe_type)
5200 {
5201 volatile struct USB_EP_Desc *ep;
5202 volatile struct USB_EP_Desc *first_ep;
5203 volatile struct USB_SB_Desc *sb;
5204
5205 switch (pipe_type)
5206 {
5207 case PIPE_BULK:
5208 first_ep = &TxBulkEPList[0];
5209 break;
5210 case PIPE_CONTROL:
5211 first_ep = &TxCtrlEPList[0];
5212 break;
5213 case PIPE_INTERRUPT:
5214 first_ep = &TxIntrEPList[0];
5215 break;
5216 case PIPE_ISOCHRONOUS:
5217 first_ep = &TxIsocEPList[0];
5218 break;
5219 default:
5220 return;
5221 }
5222 ep = first_ep;
5223
5224 printk("\n\nDumping EP list...\n\n");
5225
5226 do {
5227 __dump_ep_desc(ep);
5228 /* Cannot phys_to_virt on 0 as it turns into 80000000, which is != 0. */
5229 sb = ep->sub ? phys_to_virt(ep->sub) : 0;
5230 while (sb) {
5231 __dump_sb_desc(sb);
5232 sb = sb->next ? phys_to_virt(sb->next) : 0;
5233 }
5234 ep = (volatile struct USB_EP_Desc *)(phys_to_virt(ep->next));
5235
5236 } while (ep != first_ep);
5237 }
5238
5239 static inline void __dump_ept_data(int epid)
5240 {
5241 unsigned long flags;
5242 __u32 r_usb_ept_data;
5243
5244 if (epid < 0 || epid > 31) {
5245 printk("Cannot dump ept data for invalid epid %d\n", epid);
5246 return;
5247 }
5248
5249 local_irq_save(flags);
5250 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
5251 nop();
5252 r_usb_ept_data = *R_USB_EPT_DATA;
5253 local_irq_restore(flags);
5254
5255 printk(" R_USB_EPT_DATA = 0x%x for epid %d :\n", r_usb_ept_data, epid);
5256 if (r_usb_ept_data == 0) {
5257 /* No need for more detailed printing. */
5258 return;
5259 }
5260 printk(" valid : %d\n", (r_usb_ept_data & 0x80000000) >> 31);
5261 printk(" hold : %d\n", (r_usb_ept_data & 0x40000000) >> 30);
5262 printk(" error_count_in : %d\n", (r_usb_ept_data & 0x30000000) >> 28);
5263 printk(" t_in : %d\n", (r_usb_ept_data & 0x08000000) >> 27);
5264 printk(" low_speed : %d\n", (r_usb_ept_data & 0x04000000) >> 26);
5265 printk(" port : %d\n", (r_usb_ept_data & 0x03000000) >> 24);
5266 printk(" error_code : %d\n", (r_usb_ept_data & 0x00c00000) >> 22);
5267 printk(" t_out : %d\n", (r_usb_ept_data & 0x00200000) >> 21);
5268 printk(" error_count_out : %d\n", (r_usb_ept_data & 0x00180000) >> 19);
5269 printk(" max_len : %d\n", (r_usb_ept_data & 0x0003f800) >> 11);
5270 printk(" ep : %d\n", (r_usb_ept_data & 0x00000780) >> 7);
5271 printk(" dev : %d\n", (r_usb_ept_data & 0x0000003f));
5272 }
5273
5274 static inline void __dump_ept_data_iso(int epid)
5275 {
5276 unsigned long flags;
5277 __u32 ept_data;
5278
5279 if (epid < 0 || epid > 31) {
5280 printk("Cannot dump ept data for invalid epid %d\n", epid);
5281 return;
5282 }
5283
5284 local_irq_save(flags);
5285 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
5286 nop();
5287 ept_data = *R_USB_EPT_DATA_ISO;
5288 local_irq_restore(flags);
5289
5290 printk(" R_USB_EPT_DATA = 0x%x for epid %d :\n", ept_data, epid);
5291 if (ept_data == 0) {
5292 /* No need for more detailed printing. */
5293 return;
5294 }
5295 printk(" valid : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, valid,
5296 ept_data));
5297 printk(" port : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, port,
5298 ept_data));
5299 printk(" error_code : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, error_code,
5300 ept_data));
5301 printk(" max_len : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, max_len,
5302 ept_data));
5303 printk(" ep : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, ep,
5304 ept_data));
5305 printk(" dev : %d\n", IO_EXTRACT(R_USB_EPT_DATA_ISO, dev,
5306 ept_data));
5307 }
5308
5309 static inline void __dump_ept_data_list(void)
5310 {
5311 int i;
5312
5313 printk("Dumping the whole R_USB_EPT_DATA list\n");
5314
5315 for (i = 0; i < 32; i++) {
5316 __dump_ept_data(i);
5317 }
5318 }
5319
5320 static void debug_epid(int epid) {
5321 int i;
5322
5323 if(epid_isoc(epid)) {
5324 __dump_ept_data_iso(epid);
5325 } else {
5326 __dump_ept_data(epid);
5327 }
5328
5329 printk("Bulk:\n");
5330 for(i = 0; i < 32; i++) {
5331 if(IO_EXTRACT(USB_EP_command, epid, TxBulkEPList[i].command) ==
5332 epid) {
5333 printk("%d: ", i); __dump_ep_desc(&(TxBulkEPList[i]));
5334 }
5335 }
5336
5337 printk("Ctrl:\n");
5338 for(i = 0; i < 32; i++) {
5339 if(IO_EXTRACT(USB_EP_command, epid, TxCtrlEPList[i].command) ==
5340 epid) {
5341 printk("%d: ", i); __dump_ep_desc(&(TxCtrlEPList[i]));
5342 }
5343 }
5344
5345 printk("Intr:\n");
5346 for(i = 0; i < MAX_INTR_INTERVAL; i++) {
5347 if(IO_EXTRACT(USB_EP_command, epid, TxIntrEPList[i].command) ==
5348 epid) {
5349 printk("%d: ", i); __dump_ep_desc(&(TxIntrEPList[i]));
5350 }
5351 }
5352
5353 printk("Isoc:\n");
5354 for(i = 0; i < 32; i++) {
5355 if(IO_EXTRACT(USB_EP_command, epid, TxIsocEPList[i].command) ==
5356 epid) {
5357 printk("%d: ", i); __dump_ep_desc(&(TxIsocEPList[i]));
5358 }
5359 }
5360
5361 __dump_ept_data_list();
5362 __dump_ep_list(PIPE_INTERRUPT);
5363 printk("\n\n");
5364 }
5365
5366
5367
5368 char* hcd_status_to_str(__u8 bUsbStatus) {
5369 static char hcd_status_str[128];
5370 hcd_status_str[0] = '\0';
5371 if(bUsbStatus & IO_STATE(R_USB_STATUS, ourun, yes)) {
5372 strcat(hcd_status_str, "ourun ");
5373 }
5374 if(bUsbStatus & IO_STATE(R_USB_STATUS, perror, yes)) {
5375 strcat(hcd_status_str, "perror ");
5376 }
5377 if(bUsbStatus & IO_STATE(R_USB_STATUS, device_mode, yes)) {
5378 strcat(hcd_status_str, "device_mode ");
5379 }
5380 if(bUsbStatus & IO_STATE(R_USB_STATUS, host_mode, yes)) {
5381 strcat(hcd_status_str, "host_mode ");
5382 }
5383 if(bUsbStatus & IO_STATE(R_USB_STATUS, started, yes)) {
5384 strcat(hcd_status_str, "started ");
5385 }
5386 if(bUsbStatus & IO_STATE(R_USB_STATUS, running, yes)) {
5387 strcat(hcd_status_str, "running ");
5388 }
5389 return hcd_status_str;
5390 }
5391
5392
5393 char* sblist_to_str(struct USB_SB_Desc* sb_desc) {
5394 static char sblist_to_str_buff[128];
5395 char tmp[32], tmp2[32];
5396 sblist_to_str_buff[0] = '\0';
5397 while(sb_desc != NULL) {
5398 switch(IO_EXTRACT(USB_SB_command, tt, sb_desc->command)) {
5399 case 0: sprintf(tmp, "zout"); break;
5400 case 1: sprintf(tmp, "in"); break;
5401 case 2: sprintf(tmp, "out"); break;
5402 case 3: sprintf(tmp, "setup"); break;
5403 }
5404 sprintf(tmp2, "(%s %d)", tmp, sb_desc->sw_len);
5405 strcat(sblist_to_str_buff, tmp2);
5406 if(sb_desc->next != 0) {
5407 sb_desc = phys_to_virt(sb_desc->next);
5408 } else {
5409 sb_desc = NULL;
5410 }
5411 }
5412 return sblist_to_str_buff;
5413 }
5414
5415 char* port_status_to_str(__u16 wPortStatus) {
5416 static char port_status_str[128];
5417 port_status_str[0] = '\0';
5418 if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, connected, yes)) {
5419 strcat(port_status_str, "connected ");
5420 }
5421 if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes)) {
5422 strcat(port_status_str, "enabled ");
5423 }
5424 if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, suspended, yes)) {
5425 strcat(port_status_str, "suspended ");
5426 }
5427 if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, reset, yes)) {
5428 strcat(port_status_str, "reset ");
5429 }
5430 if(wPortStatus & IO_STATE(R_USB_RH_PORT_STATUS_1, speed, full)) {
5431 strcat(port_status_str, "full-speed ");
5432 } else {
5433 strcat(port_status_str, "low-speed ");
5434 }
5435 return port_status_str;
5436 }
5437
5438
5439 char* endpoint_to_str(struct usb_endpoint_descriptor *ed) {
5440 static char endpoint_to_str_buff[128];
5441 char tmp[32];
5442 int epnum = ed->bEndpointAddress & 0x0F;
5443 int dir = ed->bEndpointAddress & 0x80;
5444 int type = ed->bmAttributes & 0x03;
5445 endpoint_to_str_buff[0] = '\0';
5446 sprintf(endpoint_to_str_buff, "ep:%d ", epnum);
5447 switch(type) {
5448 case 0:
5449 sprintf(tmp, " ctrl");
5450 break;
5451 case 1:
5452 sprintf(tmp, " isoc");
5453 break;
5454 case 2:
5455 sprintf(tmp, " bulk");
5456 break;
5457 case 3:
5458 sprintf(tmp, " intr");
5459 break;
5460 }
5461 strcat(endpoint_to_str_buff, tmp);
5462 if(dir) {
5463 sprintf(tmp, " in");
5464 } else {
5465 sprintf(tmp, " out");
5466 }
5467 strcat(endpoint_to_str_buff, tmp);
5468
5469 return endpoint_to_str_buff;
5470 }
5471
5472 /* Debug helper functions for Transfer Controller */
5473 char* pipe_to_str(unsigned int pipe) {
5474 static char pipe_to_str_buff[128];
5475 char tmp[64];
5476 sprintf(pipe_to_str_buff, "dir:%s", str_dir(pipe));
5477 sprintf(tmp, " type:%s", str_type(pipe));
5478 strcat(pipe_to_str_buff, tmp);
5479
5480 sprintf(tmp, " dev:%d", usb_pipedevice(pipe));
5481 strcat(pipe_to_str_buff, tmp);
5482 sprintf(tmp, " ep:%d", usb_pipeendpoint(pipe));
5483 strcat(pipe_to_str_buff, tmp);
5484 return pipe_to_str_buff;
5485 }
5486
5487
5488 #define USB_DEBUG_DESC 1
5489
5490 #ifdef USB_DEBUG_DESC
5491 #define dump_in_desc(x) __dump_in_desc(x)
5492 #define dump_sb_desc(...) __dump_sb_desc(...)
5493 #define dump_ep_desc(x) __dump_ep_desc(x)
5494 #define dump_ept_data(x) __dump_ept_data(x)
5495 #else
5496 #define dump_in_desc(...) do {} while (0)
5497 #define dump_sb_desc(...) do {} while (0)
5498 #define dump_ep_desc(...) do {} while (0)
5499 #endif
5500
5501
5502 /* Uncomment this to enable massive function call trace
5503 #define USB_DEBUG_TRACE */
5504
5505 #ifdef USB_DEBUG_TRACE
5506 #define DBFENTER (printk(": Entering: %s\n", __FUNCTION__))
5507 #define DBFEXIT (printk(": Exiting: %s\n", __FUNCTION__))
5508 #else
5509 #define DBFENTER do {} while (0)
5510 #define DBFEXIT do {} while (0)
5511 #endif
5512
5513 #define CHECK_ALIGN(x) if (((__u32)(x)) & 0x00000003) \
5514 {panic("Alignment check (DWORD) failed at %s:%s:%d\n", __FILE__, __FUNCTION__, __LINE__);}
5515
5516 /* Most helpful debugging aid */
5517 #define ASSERT(expr) ((void) ((expr) ? 0 : (err("assert failed at: %s %d",__FUNCTION__, __LINE__))))
5518
5519
5520 /***************************************************************************/
5521 /***************************************************************************/
5522 /* Forward declarations */
5523 /***************************************************************************/
5524 /***************************************************************************/
5525 void crisv10_hcd_epid_attn_irq(struct crisv10_irq_reg *reg);
5526 void crisv10_hcd_port_status_irq(struct crisv10_irq_reg *reg);
5527 void crisv10_hcd_ctl_status_irq(struct crisv10_irq_reg *reg);
5528 void crisv10_hcd_isoc_eof_irq(struct crisv10_irq_reg *reg);
5529
5530 void rh_port_status_change(__u16[]);
5531 int rh_clear_port_feature(__u8, __u16);
5532 int rh_set_port_feature(__u8, __u16);
5533 static void rh_disable_port(unsigned int port);
5534
5535 static void check_finished_bulk_tx_epids(struct usb_hcd *hcd,
5536 int timer);
5537
5538 static int tc_setup_epid(struct usb_host_endpoint *ep, struct urb *urb,
5539 int mem_flags);
5540 static void tc_free_epid(struct usb_host_endpoint *ep);
5541 static int tc_allocate_epid(void);
5542 static void tc_finish_urb(struct usb_hcd *hcd, struct urb *urb, int status);
5543 static void tc_finish_urb_later(struct usb_hcd *hcd, struct urb *urb,
5544 int status);
5545
5546 static int urb_priv_create(struct usb_hcd *hcd, struct urb *urb, int epid,
5547 int mem_flags);
5548 static void urb_priv_free(struct usb_hcd *hcd, struct urb *urb);
5549
5550 static int crisv10_usb_check_bandwidth(struct usb_device *dev,struct urb *urb);
5551 static void crisv10_usb_claim_bandwidth(
5552 struct usb_device *dev, struct urb *urb, int bustime, int isoc);
5553 static void crisv10_usb_release_bandwidth(
5554 struct usb_hcd *hcd, int isoc, int bandwidth);
5555
5556 static inline struct urb *urb_list_first(int epid);
5557 static inline void urb_list_add(struct urb *urb, int epid,
5558 int mem_flags);
5559 static inline urb_entry_t *urb_list_entry(struct urb *urb, int epid);
5560 static inline void urb_list_del(struct urb *urb, int epid);
5561 static inline void urb_list_move_last(struct urb *urb, int epid);
5562 static inline struct urb *urb_list_next(struct urb *urb, int epid);
5563
5564 int create_sb_for_urb(struct urb *urb, int mem_flags);
5565 int init_intr_urb(struct urb *urb, int mem_flags);
5566
5567 static inline void etrax_epid_set(__u8 index, __u32 data);
5568 static inline void etrax_epid_clear_error(__u8 index);
5569 static inline void etrax_epid_set_toggle(__u8 index, __u8 dirout,
5570 __u8 toggle);
5571 static inline __u8 etrax_epid_get_toggle(__u8 index, __u8 dirout);
5572 static inline __u32 etrax_epid_get(__u8 index);
5573
5574 /* We're accessing the same register position in Etrax so
5575 when we do full access the internal difference doesn't matter */
5576 #define etrax_epid_iso_set(index, data) etrax_epid_set(index, data)
5577 #define etrax_epid_iso_get(index) etrax_epid_get(index)
5578
5579
5580 static void tc_dma_process_isoc_urb(struct urb *urb);
5581 static void tc_dma_process_queue(int epid);
5582 static void tc_dma_unlink_intr_urb(struct urb *urb);
5583 static irqreturn_t tc_dma_tx_interrupt(int irq, void *vhc);
5584 static irqreturn_t tc_dma_rx_interrupt(int irq, void *vhc);
5585
5586 static void tc_bulk_start_timer_func(unsigned long dummy);
5587 static void tc_bulk_eot_timer_func(unsigned long dummy);
5588
5589
5590 /*************************************************************/
5591 /*************************************************************/
5592 /* Host Controler Driver block */
5593 /*************************************************************/
5594 /*************************************************************/
5595
5596 /* HCD operations */
5597 static irqreturn_t crisv10_hcd_top_irq(int irq, void*);
5598 static int crisv10_hcd_reset(struct usb_hcd *);
5599 static int crisv10_hcd_start(struct usb_hcd *);
5600 static void crisv10_hcd_stop(struct usb_hcd *);
5601 #ifdef CONFIG_PM
5602 static int crisv10_hcd_suspend(struct device *, u32, u32);
5603 static int crisv10_hcd_resume(struct device *, u32);
5604 #endif /* CONFIG_PM */
5605 static int crisv10_hcd_get_frame(struct usb_hcd *);
5606
5607 static int tc_urb_enqueue(struct usb_hcd *, struct urb *, gfp_t mem_flags);
5608 static int tc_urb_dequeue(struct usb_hcd *, struct urb *, int);
5609 static void tc_endpoint_disable(struct usb_hcd *, struct usb_host_endpoint *ep);
5610
5611 static int rh_status_data_request(struct usb_hcd *, char *);
5612 static int rh_control_request(struct usb_hcd *, u16, u16, u16, char*, u16);
5613
5614 #ifdef CONFIG_PM
5615 static int crisv10_hcd_hub_suspend(struct usb_hcd *);
5616 static int crisv10_hcd_hub_resume(struct usb_hcd *);
5617 #endif /* CONFIG_PM */
5618 #ifdef CONFIG_USB_OTG
5619 static int crisv10_hcd_start_port_reset(struct usb_hcd *, unsigned);
5620 #endif /* CONFIG_USB_OTG */
5621
5622 /* host controller driver interface */
5623 static const struct hc_driver crisv10_hc_driver =
5624 {
5625 .description = hc_name,
5626 .product_desc = product_desc,
5627 .hcd_priv_size = sizeof(struct crisv10_hcd),
5628
5629 /* Attaching IRQ handler manualy in probe() */
5630 /* .irq = crisv10_hcd_irq, */
5631
5632 .flags = HCD_USB11,
5633
5634 /* called to init HCD and root hub */
5635 .reset = crisv10_hcd_reset,
5636 .start = crisv10_hcd_start,
5637
5638 /* cleanly make HCD stop writing memory and doing I/O */
5639 .stop = crisv10_hcd_stop,
5640
5641 /* return current frame number */
5642 .get_frame_number = crisv10_hcd_get_frame,
5643
5644
5645 /* Manage i/o requests via the Transfer Controller */
5646 .urb_enqueue = tc_urb_enqueue,
5647 .urb_dequeue = tc_urb_dequeue,
5648
5649 /* hw synch, freeing endpoint resources that urb_dequeue can't */
5650 .endpoint_disable = tc_endpoint_disable,
5651
5652
5653 /* Root Hub support */
5654 .hub_status_data = rh_status_data_request,
5655 .hub_control = rh_control_request,
5656 #ifdef CONFIG_PM
5657 .hub_suspend = rh_suspend_request,
5658 .hub_resume = rh_resume_request,
5659 #endif /* CONFIG_PM */
5660 #ifdef CONFIG_USB_OTG
5661 .start_port_reset = crisv10_hcd_start_port_reset,
5662 #endif /* CONFIG_USB_OTG */
5663 };
5664
5665
5666 /*
5667 * conversion between pointers to a hcd and the corresponding
5668 * crisv10_hcd
5669 */
5670
5671 static inline struct crisv10_hcd *hcd_to_crisv10_hcd(struct usb_hcd *hcd)
5672 {
5673 return (struct crisv10_hcd *) hcd->hcd_priv;
5674 }
5675
5676 static inline struct usb_hcd *crisv10_hcd_to_hcd(struct crisv10_hcd *hcd)
5677 {
5678 return container_of((void *) hcd, struct usb_hcd, hcd_priv);
5679 }
5680
5681 /* check if specified port is in use */
5682 static inline int port_in_use(unsigned int port)
5683 {
5684 return ports & (1 << port);
5685 }
5686
5687 /* number of ports in use */
5688 static inline unsigned int num_ports(void)
5689 {
5690 unsigned int i, num = 0;
5691 for (i = 0; i < USB_ROOT_HUB_PORTS; i++)
5692 if (port_in_use(i))
5693 num++;
5694 return num;
5695 }
5696
5697 /* map hub port number to the port number used internally by the HC */
5698 static inline unsigned int map_port(unsigned int port)
5699 {
5700 unsigned int i, num = 0;
5701 for (i = 0; i < USB_ROOT_HUB_PORTS; i++)
5702 if (port_in_use(i))
5703 if (++num == port)
5704 return i;
5705 return -1;
5706 }
5707
5708 /* size of descriptors in slab cache */
5709 #ifndef MAX
5710 #define MAX(x, y) ((x) > (y) ? (x) : (y))
5711 #endif
5712
5713
5714 /******************************************************************/
5715 /* Hardware Interrupt functions */
5716 /******************************************************************/
5717
5718 /* Fast interrupt handler for HC */
5719 static irqreturn_t crisv10_hcd_top_irq(int irq, void *vcd)
5720 {
5721 struct usb_hcd *hcd = vcd;
5722 struct crisv10_irq_reg reg;
5723 __u32 irq_mask;
5724 unsigned long flags;
5725
5726 DBFENTER;
5727
5728 ASSERT(hcd != NULL);
5729 reg.hcd = hcd;
5730
5731 /* Turn of other interrupts while handling these sensitive cases */
5732 local_irq_save(flags);
5733
5734 /* Read out which interrupts that are flaged */
5735 irq_mask = *R_USB_IRQ_MASK_READ;
5736 reg.r_usb_irq_mask_read = irq_mask;
5737
5738 /* Reading R_USB_STATUS clears the ctl_status interrupt. Note that
5739 R_USB_STATUS must be read before R_USB_EPID_ATTN since reading the latter
5740 clears the ourun and perror fields of R_USB_STATUS. */
5741 reg.r_usb_status = *R_USB_STATUS;
5742
5743 /* Reading R_USB_EPID_ATTN clears the iso_eof, bulk_eot and epid_attn
5744 interrupts. */
5745 reg.r_usb_epid_attn = *R_USB_EPID_ATTN;
5746
5747 /* Reading R_USB_RH_PORT_STATUS_1 and R_USB_RH_PORT_STATUS_2 clears the
5748 port_status interrupt. */
5749 reg.r_usb_rh_port_status_1 = *R_USB_RH_PORT_STATUS_1;
5750 reg.r_usb_rh_port_status_2 = *R_USB_RH_PORT_STATUS_2;
5751
5752 /* Reading R_USB_FM_NUMBER clears the sof interrupt. */
5753 /* Note: the lower 11 bits contain the actual frame number, sent with each
5754 sof. */
5755 reg.r_usb_fm_number = *R_USB_FM_NUMBER;
5756
5757 /* Interrupts are handled in order of priority. */
5758 if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, port_status)) {
5759 crisv10_hcd_port_status_irq(&reg);
5760 }
5761 if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, epid_attn)) {
5762 crisv10_hcd_epid_attn_irq(&reg);
5763 }
5764 if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, ctl_status)) {
5765 crisv10_hcd_ctl_status_irq(&reg);
5766 }
5767 if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, iso_eof)) {
5768 crisv10_hcd_isoc_eof_irq(&reg);
5769 }
5770 if (irq_mask & IO_MASK(R_USB_IRQ_MASK_READ, bulk_eot)) {
5771 /* Update/restart the bulk start timer since obviously the channel is
5772 running. */
5773 mod_timer(&bulk_start_timer, jiffies + BULK_START_TIMER_INTERVAL);
5774 /* Update/restart the bulk eot timer since we just received an bulk eot
5775 interrupt. */
5776 mod_timer(&bulk_eot_timer, jiffies + BULK_EOT_TIMER_INTERVAL);
5777
5778 /* Check for finished bulk transfers on epids */
5779 check_finished_bulk_tx_epids(hcd, 0);
5780 }
5781 local_irq_restore(flags);
5782
5783 DBFEXIT;
5784 return IRQ_HANDLED;
5785 }
5786
5787
5788 void crisv10_hcd_epid_attn_irq(struct crisv10_irq_reg *reg) {
5789 struct usb_hcd *hcd = reg->hcd;
5790 struct crisv10_urb_priv *urb_priv;
5791 int epid;
5792 DBFENTER;
5793
5794 for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
5795 if (test_bit(epid, (void *)&reg->r_usb_epid_attn)) {
5796 struct urb *urb;
5797 __u32 ept_data;
5798 int error_code;
5799
5800 if (epid == DUMMY_EPID || epid == INVALID_EPID) {
5801 /* We definitely don't care about these ones. Besides, they are
5802 always disabled, so any possible disabling caused by the
5803 epid attention interrupt is irrelevant. */
5804 continue;
5805 }
5806
5807 if(!epid_inuse(epid)) {
5808 irq_err("Epid attention on epid:%d that isn't in use\n", epid);
5809 printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
5810 debug_epid(epid);
5811 continue;
5812 }
5813
5814 /* Note that although there are separate R_USB_EPT_DATA and
5815 R_USB_EPT_DATA_ISO registers, they are located at the same address and
5816 are of the same size. In other words, this read should be ok for isoc
5817 also. */
5818 ept_data = etrax_epid_get(epid);
5819 error_code = IO_EXTRACT(R_USB_EPT_DATA, error_code, ept_data);
5820
5821 /* Get the active URB for this epid. We blatantly assume
5822 that only this URB could have caused the epid attention. */
5823 urb = activeUrbList[epid];
5824 if (urb == NULL) {
5825 irq_err("Attention on epid:%d error:%d with no active URB.\n",
5826 epid, error_code);
5827 printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
5828 debug_epid(epid);
5829 continue;
5830 }
5831
5832 urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
5833 ASSERT(urb_priv);
5834
5835 /* Using IO_STATE_VALUE on R_USB_EPT_DATA should be ok for isoc also. */
5836 if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) {
5837
5838 /* Isoc traffic doesn't have error_count_in/error_count_out. */
5839 if ((usb_pipetype(urb->pipe) != PIPE_ISOCHRONOUS) &&
5840 (IO_EXTRACT(R_USB_EPT_DATA, error_count_in, ept_data) == 3 ||
5841 IO_EXTRACT(R_USB_EPT_DATA, error_count_out, ept_data) == 3)) {
5842 /* Check if URB allready is marked for late-finish, we can get
5843 several 3rd error for Intr traffic when a device is unplugged */
5844 if(urb_priv->later_data == NULL) {
5845 /* 3rd error. */
5846 irq_warn("3rd error for epid:%d (%s %s) URB:0x%x[%d]\n", epid,
5847 str_dir(urb->pipe), str_type(urb->pipe),
5848 (unsigned int)urb, urb_priv->urb_num);
5849
5850 tc_finish_urb_later(hcd, urb, -EPROTO);
5851 }
5852
5853 } else if (reg->r_usb_status & IO_MASK(R_USB_STATUS, perror)) {
5854 irq_warn("Perror for epid:%d\n", epid);
5855 printk("FM_NUMBER: %d\n", reg->r_usb_fm_number & 0x7ff);
5856 printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
5857 __dump_urb(urb);
5858 debug_epid(epid);
5859
5860 if (!(ept_data & IO_MASK(R_USB_EPT_DATA, valid))) {
5861 /* invalid ep_id */
5862 panic("Perror because of invalid epid."
5863 " Deconfigured too early?");
5864 } else {
5865 /* past eof1, near eof, zout transfer, setup transfer */
5866 /* Dump the urb and the relevant EP descriptor. */
5867 panic("Something wrong with DMA descriptor contents."
5868 " Too much traffic inserted?");
5869 }
5870 } else if (reg->r_usb_status & IO_MASK(R_USB_STATUS, ourun)) {
5871 /* buffer ourun */
5872 printk("FM_NUMBER: %d\n", reg->r_usb_fm_number & 0x7ff);
5873 printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
5874 __dump_urb(urb);
5875 debug_epid(epid);
5876
5877 panic("Buffer overrun/underrun for epid:%d. DMA too busy?", epid);
5878 } else {
5879 irq_warn("Attention on epid:%d (%s %s) with no error code\n", epid,
5880 str_dir(urb->pipe), str_type(urb->pipe));
5881 printk("R_USB_STATUS: 0x%x\n", reg->r_usb_status);
5882 __dump_urb(urb);
5883 debug_epid(epid);
5884 }
5885
5886 } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code,
5887 stall)) {
5888 /* Not really a protocol error, just says that the endpoint gave
5889 a stall response. Note that error_code cannot be stall for isoc. */
5890 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
5891 panic("Isoc traffic cannot stall");
5892 }
5893
5894 tc_dbg("Stall for epid:%d (%s %s) URB:0x%x\n", epid,
5895 str_dir(urb->pipe), str_type(urb->pipe), (unsigned int)urb);
5896 tc_finish_urb(hcd, urb, -EPIPE);
5897
5898 } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code,
5899 bus_error)) {
5900 /* Two devices responded to a transaction request. Must be resolved
5901 by software. FIXME: Reset ports? */
5902 panic("Bus error for epid %d."
5903 " Two devices responded to transaction request\n",
5904 epid);
5905
5906 } else if (error_code == IO_STATE_VALUE(R_USB_EPT_DATA, error_code,
5907 buffer_error)) {
5908 /* DMA overrun or underrun. */
5909 irq_warn("Buffer overrun/underrun for epid:%d (%s %s)\n", epid,
5910 str_dir(urb->pipe), str_type(urb->pipe));
5911
5912 /* It seems that error_code = buffer_error in
5913 R_USB_EPT_DATA/R_USB_EPT_DATA_ISO and ourun = yes in R_USB_STATUS
5914 are the same error. */
5915 tc_finish_urb(hcd, urb, -EPROTO);
5916 } else {
5917 irq_warn("Unknown attention on epid:%d (%s %s)\n", epid,
5918 str_dir(urb->pipe), str_type(urb->pipe));
5919 dump_ept_data(epid);
5920 }
5921 }
5922 }
5923 DBFEXIT;
5924 }
5925
5926 void crisv10_hcd_port_status_irq(struct crisv10_irq_reg *reg)
5927 {
5928 __u16 port_reg[USB_ROOT_HUB_PORTS];
5929 DBFENTER;
5930 port_reg[0] = reg->r_usb_rh_port_status_1;
5931 port_reg[1] = reg->r_usb_rh_port_status_2;
5932 rh_port_status_change(port_reg);
5933 DBFEXIT;
5934 }
5935
5936 void crisv10_hcd_isoc_eof_irq(struct crisv10_irq_reg *reg)
5937 {
5938 int epid;
5939 struct urb *urb;
5940 struct crisv10_urb_priv *urb_priv;
5941
5942 DBFENTER;
5943
5944 for (epid = 0; epid < NBR_OF_EPIDS - 1; epid++) {
5945
5946 /* Only check epids that are in use, is valid and has SB list */
5947 if (!epid_inuse(epid) || epid == INVALID_EPID ||
5948 TxIsocEPList[epid].sub == 0 || epid == DUMMY_EPID) {
5949 /* Nothing here to see. */
5950 continue;
5951 }
5952 ASSERT(epid_isoc(epid));
5953
5954 /* Get the active URB for this epid (if any). */
5955 urb = activeUrbList[epid];
5956 if (urb == 0) {
5957 isoc_warn("Ignoring NULL urb for epid:%d\n", epid);
5958 continue;
5959 }
5960 if(!epid_out_traffic(epid)) {
5961 /* Sanity check. */
5962 ASSERT(usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS);
5963
5964 urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
5965 ASSERT(urb_priv);
5966
5967 if (urb_priv->urb_state == NOT_STARTED) {
5968 /* If ASAP is not set and urb->start_frame is the current frame,
5969 start the transfer. */
5970 if (!(urb->transfer_flags & URB_ISO_ASAP) &&
5971 (urb->start_frame == (*R_USB_FM_NUMBER & 0x7ff))) {
5972 /* EP should not be enabled if we're waiting for start_frame */
5973 ASSERT((TxIsocEPList[epid].command &
5974 IO_STATE(USB_EP_command, enable, yes)) == 0);
5975
5976 isoc_warn("Enabling isoc IN EP descr for epid %d\n", epid);
5977 TxIsocEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
5978
5979 /* This urb is now active. */
5980 urb_priv->urb_state = STARTED;
5981 continue;
5982 }
5983 }
5984 }
5985 }
5986
5987 DBFEXIT;
5988 }
5989
5990 void crisv10_hcd_ctl_status_irq(struct crisv10_irq_reg *reg)
5991 {
5992 struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(reg->hcd);
5993
5994 DBFENTER;
5995 ASSERT(crisv10_hcd);
5996
5997 /* irq_dbg("ctr_status_irq, controller status: %s\n",
5998 hcd_status_to_str(reg->r_usb_status));*/
5999
6000 /* FIXME: What should we do if we get ourun or perror? Dump the EP and SB
6001 list for the corresponding epid? */
6002 if (reg->r_usb_status & IO_MASK(R_USB_STATUS, ourun)) {
6003 panic("USB controller got ourun.");
6004 }
6005 if (reg->r_usb_status & IO_MASK(R_USB_STATUS, perror)) {
6006
6007 /* Before, etrax_usb_do_intr_recover was called on this epid if it was
6008 an interrupt pipe. I don't see how re-enabling all EP descriptors
6009 will help if there was a programming error. */
6010 panic("USB controller got perror.");
6011 }
6012
6013 /* Keep track of USB Controller, if it's running or not */
6014 if(reg->r_usb_status & IO_STATE(R_USB_STATUS, running, yes)) {
6015 crisv10_hcd->running = 1;
6016 } else {
6017 crisv10_hcd->running = 0;
6018 }
6019
6020 if (reg->r_usb_status & IO_MASK(R_USB_STATUS, device_mode)) {
6021 /* We should never operate in device mode. */
6022 panic("USB controller in device mode.");
6023 }
6024
6025 /* Set the flag to avoid getting "Unlink after no-IRQ? Controller is probably
6026 using the wrong IRQ" from hcd_unlink_urb() in drivers/usb/core/hcd.c */
6027 set_bit(HCD_FLAG_SAW_IRQ, &reg->hcd->flags);
6028
6029 DBFEXIT;
6030 }
6031
6032
6033 /******************************************************************/
6034 /* Host Controller interface functions */
6035 /******************************************************************/
6036
6037 static inline void crisv10_ready_wait(void) {
6038 volatile int timeout = 10000;
6039 /* Check the busy bit of USB controller in Etrax */
6040 while((*R_USB_COMMAND & IO_MASK(R_USB_COMMAND, busy)) &&
6041 (timeout-- > 0));
6042 }
6043
6044 /* reset host controller */
6045 static int crisv10_hcd_reset(struct usb_hcd *hcd)
6046 {
6047 DBFENTER;
6048 hcd_dbg(hcd, "reset\n");
6049
6050
6051 /* Reset the USB interface. */
6052 /*
6053 *R_USB_COMMAND =
6054 IO_STATE(R_USB_COMMAND, port_sel, nop) |
6055 IO_STATE(R_USB_COMMAND, port_cmd, reset) |
6056 IO_STATE(R_USB_COMMAND, ctrl_cmd, reset);
6057 nop();
6058 */
6059 DBFEXIT;
6060 return 0;
6061 }
6062
6063 /* start host controller */
6064 static int crisv10_hcd_start(struct usb_hcd *hcd)
6065 {
6066 DBFENTER;
6067 hcd_dbg(hcd, "start\n");
6068
6069 crisv10_ready_wait();
6070
6071 /* Start processing of USB traffic. */
6072 *R_USB_COMMAND =
6073 IO_STATE(R_USB_COMMAND, port_sel, nop) |
6074 IO_STATE(R_USB_COMMAND, port_cmd, reset) |
6075 IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run);
6076
6077 nop();
6078
6079 hcd->state = HC_STATE_RUNNING;
6080
6081 DBFEXIT;
6082 return 0;
6083 }
6084
6085 /* stop host controller */
6086 static void crisv10_hcd_stop(struct usb_hcd *hcd)
6087 {
6088 DBFENTER;
6089 hcd_dbg(hcd, "stop\n");
6090 crisv10_hcd_reset(hcd);
6091 DBFEXIT;
6092 }
6093
6094 /* return the current frame number */
6095 static int crisv10_hcd_get_frame(struct usb_hcd *hcd)
6096 {
6097 DBFENTER;
6098 DBFEXIT;
6099 return (*R_USB_FM_NUMBER & 0x7ff);
6100 }
6101
6102 #ifdef CONFIG_USB_OTG
6103
6104 static int crisv10_hcd_start_port_reset(struct usb_hcd *hcd, unsigned port)
6105 {
6106 return 0; /* no-op for now */
6107 }
6108
6109 #endif /* CONFIG_USB_OTG */
6110
6111
6112 /******************************************************************/
6113 /* Root Hub functions */
6114 /******************************************************************/
6115
6116 /* root hub status */
6117 static const struct usb_hub_status rh_hub_status =
6118 {
6119 .wHubStatus = 0,
6120 .wHubChange = 0,
6121 };
6122
6123 /* root hub descriptor */
6124 static const u8 rh_hub_descr[] =
6125 {
6126 0x09, /* bDescLength */
6127 0x29, /* bDescriptorType */
6128 USB_ROOT_HUB_PORTS, /* bNbrPorts */
6129 0x00, /* wHubCharacteristics */
6130 0x00,
6131 0x01, /* bPwrOn2pwrGood */
6132 0x00, /* bHubContrCurrent */
6133 0x00, /* DeviceRemovable */
6134 0xff /* PortPwrCtrlMask */
6135 };
6136
6137 /* Actual holder of root hub status*/
6138 struct crisv10_rh rh;
6139
6140 /* Initialize root hub data structures (called from dvdrv_hcd_probe()) */
6141 int rh_init(void) {
6142 int i;
6143 /* Reset port status flags */
6144 for (i = 0; i < USB_ROOT_HUB_PORTS; i++) {
6145 rh.wPortChange[i] = 0;
6146 rh.wPortStatusPrev[i] = 0;
6147 }
6148 return 0;
6149 }
6150
6151 #define RH_FEAT_MASK ((1<<USB_PORT_FEAT_CONNECTION)|\
6152 (1<<USB_PORT_FEAT_ENABLE)|\
6153 (1<<USB_PORT_FEAT_SUSPEND)|\
6154 (1<<USB_PORT_FEAT_RESET))
6155
6156 /* Handle port status change interrupt (called from bottom part interrupt) */
6157 void rh_port_status_change(__u16 port_reg[]) {
6158 int i;
6159 __u16 wChange;
6160
6161 for(i = 0; i < USB_ROOT_HUB_PORTS; i++) {
6162 /* Xor out changes since last read, masked for important flags */
6163 wChange = (port_reg[i] & RH_FEAT_MASK) ^ rh.wPortStatusPrev[i];
6164 /* Or changes together with (if any) saved changes */
6165 rh.wPortChange[i] |= wChange;
6166 /* Save new status */
6167 rh.wPortStatusPrev[i] = port_reg[i];
6168
6169 if(wChange) {
6170 rh_dbg("Interrupt port_status change port%d: %s Current-status:%s\n", i+1,
6171 port_status_to_str(wChange),
6172 port_status_to_str(port_reg[i]));
6173 }
6174 }
6175 }
6176
6177 /* Construct port status change bitmap for the root hub */
6178 static int rh_status_data_request(struct usb_hcd *hcd, char *buf)
6179 {
6180 struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(hcd);
6181 unsigned int i;
6182
6183 DBFENTER;
6184 /*
6185 * corresponds to hub status change EP (USB 2.0 spec section 11.13.4)
6186 * return bitmap indicating ports with status change
6187 */
6188 *buf = 0;
6189 spin_lock(&crisv10_hcd->lock);
6190 for (i = 1; i <= crisv10_hcd->num_ports; i++) {
6191 if (rh.wPortChange[map_port(i)]) {
6192 *buf |= (1 << i);
6193 rh_dbg("rh_status_data_request, change on port %d: %s Current Status: %s\n", i,
6194 port_status_to_str(rh.wPortChange[map_port(i)]),
6195 port_status_to_str(rh.wPortStatusPrev[map_port(i)]));
6196 }
6197 }
6198 spin_unlock(&crisv10_hcd->lock);
6199 DBFEXIT;
6200 return *buf == 0 ? 0 : 1;
6201 }
6202
6203 /* Handle a control request for the root hub (called from hcd_driver) */
6204 static int rh_control_request(struct usb_hcd *hcd,
6205 u16 typeReq,
6206 u16 wValue,
6207 u16 wIndex,
6208 char *buf,
6209 u16 wLength) {
6210
6211 struct crisv10_hcd *crisv10_hcd = hcd_to_crisv10_hcd(hcd);
6212 int retval = 0;
6213 int len;
6214 DBFENTER;
6215
6216 switch (typeReq) {
6217 case GetHubDescriptor:
6218 rh_dbg("GetHubDescriptor\n");
6219 len = min_t(unsigned int, sizeof rh_hub_descr, wLength);
6220 memcpy(buf, rh_hub_descr, len);
6221 buf[2] = crisv10_hcd->num_ports;
6222 break;
6223 case GetHubStatus:
6224 rh_dbg("GetHubStatus\n");
6225 len = min_t(unsigned int, sizeof rh_hub_status, wLength);
6226 memcpy(buf, &rh_hub_status, len);
6227 break;
6228 case GetPortStatus:
6229 if (!wIndex || wIndex > crisv10_hcd->num_ports)
6230 goto error;
6231 rh_dbg("GetportStatus, port:%d change:%s status:%s\n", wIndex,
6232 port_status_to_str(rh.wPortChange[map_port(wIndex)]),
6233 port_status_to_str(rh.wPortStatusPrev[map_port(wIndex)]));
6234 *(u16 *) buf = cpu_to_le16(rh.wPortStatusPrev[map_port(wIndex)]);
6235 *(u16 *) (buf + 2) = cpu_to_le16(rh.wPortChange[map_port(wIndex)]);
6236 break;
6237 case SetHubFeature:
6238 rh_dbg("SetHubFeature\n");
6239 case ClearHubFeature:
6240 rh_dbg("ClearHubFeature\n");
6241 switch (wValue) {
6242 case C_HUB_OVER_CURRENT:
6243 case C_HUB_LOCAL_POWER:
6244 rh_warn("Not implemented hub request:%d \n", typeReq);
6245 /* not implemented */
6246 break;
6247 default:
6248 goto error;
6249 }
6250 break;
6251 case SetPortFeature:
6252 if (!wIndex || wIndex > crisv10_hcd->num_ports)
6253 goto error;
6254 if(rh_set_port_feature(map_port(wIndex), wValue))
6255 goto error;
6256 break;
6257 case ClearPortFeature:
6258 if (!wIndex || wIndex > crisv10_hcd->num_ports)
6259 goto error;
6260 if(rh_clear_port_feature(map_port(wIndex), wValue))
6261 goto error;
6262 break;
6263 default:
6264 rh_warn("Unknown hub request: %d\n", typeReq);
6265 error:
6266 retval = -EPIPE;
6267 }
6268 DBFEXIT;
6269 return retval;
6270 }
6271
6272 int rh_set_port_feature(__u8 bPort, __u16 wFeature) {
6273 __u8 bUsbCommand = 0;
6274 __u8 reset_cnt;
6275 switch(wFeature) {
6276 case USB_PORT_FEAT_RESET:
6277 rh_dbg("SetPortFeature: reset\n");
6278
6279 if (rh.wPortStatusPrev[bPort] &
6280 IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes))
6281 {
6282 __u8 restart_controller = 0;
6283
6284 if ( (rh.wPortStatusPrev[0] &
6285 IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes)) &&
6286 (rh.wPortStatusPrev[1] &
6287 IO_STATE(R_USB_RH_PORT_STATUS_2, enabled, yes)) )
6288 {
6289 /* Both ports is enabled. The USB controller will not change state. */
6290 restart_controller = 0;
6291 }
6292 else
6293 {
6294 /* Only ports is enabled. The USB controller will change state and
6295 must be restarted. */
6296 restart_controller = 1;
6297 }
6298 /*
6299 In ETRAX 100LX it's not possible to reset an enabled root hub port.
6300 The workaround is to disable and enable the port before resetting it.
6301 Disabling the port can, if both ports are disabled at once, cause the
6302 USB controller to change state to HOST_MODE state.
6303 The USB controller state transition causes a lot of unwanted
6304 interrupts that must be avoided.
6305 Disabling the USB controller status and port status interrupts before
6306 disabling/resetting the port stops these interrupts.
6307
6308 These actions are performed:
6309 1. Disable USB controller status and port status interrupts.
6310 2. Disable the port
6311 3. Wait for the port to be disabled.
6312 4. Enable the port.
6313 5. Wait for the port to be enabled.
6314 6. Reset the port.
6315 7. Wait for for the reset to end.
6316 8. Wait for the USB controller entering started state.
6317 9. Order the USB controller to running state.
6318 10. Wait for the USB controller reaching running state.
6319 11. Clear all interrupts generated during the disable/enable/reset
6320 procedure.
6321 12. Enable the USB controller status and port status interrupts.
6322 */
6323
6324 /* 1. Disable USB controller status and USB port status interrupts. */
6325 *R_USB_IRQ_MASK_CLR = IO_STATE(R_USB_IRQ_MASK_CLR, ctl_status, clr);
6326 __asm__ __volatile__ (" nop");
6327 *R_USB_IRQ_MASK_CLR = IO_STATE(R_USB_IRQ_MASK_CLR, port_status, clr);
6328 __asm__ __volatile__ (" nop");
6329
6330 {
6331
6332 /* Since an root hub port reset shall be 50 ms and the ETRAX 100LX
6333 root hub port reset is 10 ms we must perform 5 port resets to
6334 achieve a proper root hub port reset. */
6335 for (reset_cnt = 0; reset_cnt < 5; reset_cnt ++)
6336 {
6337 rh_dbg("Disable Port %d\n", bPort + 1);
6338
6339 /* 2. Disable the port*/
6340 if (bPort == 0)
6341 {
6342 *R_USB_PORT1_DISABLE = IO_STATE(R_USB_PORT1_DISABLE, disable, yes);
6343 }
6344 else
6345 {
6346 *R_USB_PORT2_DISABLE = IO_STATE(R_USB_PORT2_DISABLE, disable, yes);
6347 }
6348
6349 /* 3. Wait for the port to be disabled. */
6350 while ( (bPort == 0) ?
6351 *R_USB_RH_PORT_STATUS_1 &
6352 IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes) :
6353 *R_USB_RH_PORT_STATUS_2 &
6354 IO_STATE(R_USB_RH_PORT_STATUS_2, enabled, yes) ) {}
6355
6356 rh_dbg("Port %d is disabled. Enable it!\n", bPort + 1);
6357
6358 /* 4. Enable the port. */
6359 if (bPort == 0)
6360 {
6361 *R_USB_PORT1_DISABLE = IO_STATE(R_USB_PORT1_DISABLE, disable, no);
6362 }
6363 else
6364 {
6365 *R_USB_PORT2_DISABLE = IO_STATE(R_USB_PORT2_DISABLE, disable, no);
6366 }
6367
6368 /* 5. Wait for the port to be enabled again. */
6369 while (!( (bPort == 0) ?
6370 *R_USB_RH_PORT_STATUS_1 &
6371 IO_STATE(R_USB_RH_PORT_STATUS_1, connected, yes) :
6372 *R_USB_RH_PORT_STATUS_2 &
6373 IO_STATE(R_USB_RH_PORT_STATUS_2, connected, yes) ) ) {}
6374
6375 rh_dbg("Port %d is enabled.\n", bPort + 1);
6376
6377 /* 6. Reset the port */
6378 crisv10_ready_wait();
6379 *R_USB_COMMAND =
6380 ( (bPort == 0) ?
6381 IO_STATE(R_USB_COMMAND, port_sel, port1):
6382 IO_STATE(R_USB_COMMAND, port_sel, port2) ) |
6383 IO_STATE(R_USB_COMMAND, port_cmd, reset) |
6384 IO_STATE(R_USB_COMMAND, busy, no) |
6385 IO_STATE(R_USB_COMMAND, ctrl_cmd, nop);
6386 rh_dbg("Port %d is resetting.\n", bPort + 1);
6387
6388 /* 7. The USB specification says that we should wait for at least
6389 10ms for device recover */
6390 udelay(10500); /* 10,5ms blocking wait */
6391
6392 crisv10_ready_wait();
6393 }
6394 }
6395
6396
6397 /* Check if the USB controller needs to be restarted. */
6398 if (restart_controller)
6399 {
6400 /* 8. Wait for the USB controller entering started state. */
6401 while (!(*R_USB_STATUS & IO_STATE(R_USB_STATUS, started, yes))) {}
6402
6403 /* 9. Order the USB controller to running state. */
6404 crisv10_ready_wait();
6405 *R_USB_COMMAND =
6406 IO_STATE(R_USB_COMMAND, port_sel, nop) |
6407 IO_STATE(R_USB_COMMAND, port_cmd, reset) |
6408 IO_STATE(R_USB_COMMAND, busy, no) |
6409 IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run);
6410
6411 /* 10. Wait for the USB controller reaching running state. */
6412 while (!(*R_USB_STATUS & IO_STATE(R_USB_STATUS, running, yes))) {}
6413 }
6414
6415 /* 11. Clear any controller or port satus interrupts before enabling
6416 the interrupts. */
6417 {
6418 u16 dummy;
6419
6420 /* Clear the port status interrupt of the reset port. */
6421 if (bPort == 0)
6422 {
6423 rh_dbg("Clearing port 1 interrupts\n");
6424 dummy = *R_USB_RH_PORT_STATUS_1;
6425 }
6426 else
6427 {
6428 rh_dbg("Clearing port 2 interrupts\n");
6429 dummy = *R_USB_RH_PORT_STATUS_2;
6430 }
6431
6432 if (restart_controller)
6433 {
6434 /* The USB controller is restarted. Clear all interupts. */
6435 rh_dbg("Clearing all interrupts\n");
6436 dummy = *R_USB_STATUS;
6437 dummy = *R_USB_RH_PORT_STATUS_1;
6438 dummy = *R_USB_RH_PORT_STATUS_2;
6439 }
6440 }
6441
6442 /* 12. Enable USB controller status and USB port status interrupts. */
6443 *R_USB_IRQ_MASK_SET = IO_STATE(R_USB_IRQ_MASK_SET, ctl_status, set);
6444 __asm__ __volatile__ (" nop");
6445 *R_USB_IRQ_MASK_SET = IO_STATE(R_USB_IRQ_MASK_SET, port_status, set);
6446 __asm__ __volatile__ (" nop");
6447
6448 }
6449 else
6450 {
6451
6452 bUsbCommand |= IO_STATE(R_USB_COMMAND, port_cmd, reset);
6453 /* Select which port via the port_sel field */
6454 bUsbCommand |= IO_FIELD(R_USB_COMMAND, port_sel, bPort+1);
6455
6456 /* Make sure the controller isn't busy. */
6457 crisv10_ready_wait();
6458 /* Send out the actual command to the USB controller */
6459 *R_USB_COMMAND = bUsbCommand;
6460
6461 /* Wait a while for controller to first become started after port reset */
6462 udelay(12000); /* 12ms blocking wait */
6463
6464 /* Make sure the controller isn't busy. */
6465 crisv10_ready_wait();
6466
6467 /* If all enabled ports were disabled the host controller goes down into
6468 started mode, so we need to bring it back into the running state.
6469 (This is safe even if it's already in the running state.) */
6470 *R_USB_COMMAND =
6471 IO_STATE(R_USB_COMMAND, port_sel, nop) |
6472 IO_STATE(R_USB_COMMAND, port_cmd, reset) |
6473 IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run);
6474 }
6475
6476 break;
6477 case USB_PORT_FEAT_SUSPEND:
6478 rh_dbg("SetPortFeature: suspend\n");
6479 bUsbCommand |= IO_STATE(R_USB_COMMAND, port_cmd, suspend);
6480 goto set;
6481 break;
6482 case USB_PORT_FEAT_POWER:
6483 rh_dbg("SetPortFeature: power\n");
6484 break;
6485 case USB_PORT_FEAT_C_CONNECTION:
6486 rh_dbg("SetPortFeature: c_connection\n");
6487 break;
6488 case USB_PORT_FEAT_C_RESET:
6489 rh_dbg("SetPortFeature: c_reset\n");
6490 break;
6491 case USB_PORT_FEAT_C_OVER_CURRENT:
6492 rh_dbg("SetPortFeature: c_over_current\n");
6493 break;
6494
6495 set:
6496 /* Select which port via the port_sel field */
6497 bUsbCommand |= IO_FIELD(R_USB_COMMAND, port_sel, bPort+1);
6498
6499 /* Make sure the controller isn't busy. */
6500 crisv10_ready_wait();
6501 /* Send out the actual command to the USB controller */
6502 *R_USB_COMMAND = bUsbCommand;
6503 break;
6504 default:
6505 rh_dbg("SetPortFeature: unknown feature\n");
6506 return -1;
6507 }
6508 return 0;
6509 }
6510
6511 int rh_clear_port_feature(__u8 bPort, __u16 wFeature) {
6512 switch(wFeature) {
6513 case USB_PORT_FEAT_ENABLE:
6514 rh_dbg("ClearPortFeature: enable\n");
6515 rh_disable_port(bPort);
6516 break;
6517 case USB_PORT_FEAT_SUSPEND:
6518 rh_dbg("ClearPortFeature: suspend\n");
6519 break;
6520 case USB_PORT_FEAT_POWER:
6521 rh_dbg("ClearPortFeature: power\n");
6522 break;
6523
6524 case USB_PORT_FEAT_C_ENABLE:
6525 rh_dbg("ClearPortFeature: c_enable\n");
6526 goto clear;
6527 case USB_PORT_FEAT_C_SUSPEND:
6528 rh_dbg("ClearPortFeature: c_suspend\n");
6529 goto clear;
6530 case USB_PORT_FEAT_C_CONNECTION:
6531 rh_dbg("ClearPortFeature: c_connection\n");
6532 goto clear;
6533 case USB_PORT_FEAT_C_OVER_CURRENT:
6534 rh_dbg("ClearPortFeature: c_over_current\n");
6535 goto clear;
6536 case USB_PORT_FEAT_C_RESET:
6537 rh_dbg("ClearPortFeature: c_reset\n");
6538 goto clear;
6539 clear:
6540 rh.wPortChange[bPort] &= ~(1 << (wFeature - 16));
6541 break;
6542 default:
6543 rh_dbg("ClearPortFeature: unknown feature\n");
6544 return -1;
6545 }
6546 return 0;
6547 }
6548
6549
6550 #ifdef CONFIG_PM
6551 /* Handle a suspend request for the root hub (called from hcd_driver) */
6552 static int rh_suspend_request(struct usb_hcd *hcd)
6553 {
6554 return 0; /* no-op for now */
6555 }
6556
6557 /* Handle a resume request for the root hub (called from hcd_driver) */
6558 static int rh_resume_request(struct usb_hcd *hcd)
6559 {
6560 return 0; /* no-op for now */
6561 }
6562 #endif /* CONFIG_PM */
6563
6564
6565
6566 /* Wrapper function for workaround port disable registers in USB controller */
6567 static void rh_disable_port(unsigned int port) {
6568 volatile int timeout = 10000;
6569 volatile char* usb_portx_disable;
6570 switch(port) {
6571 case 0:
6572 usb_portx_disable = R_USB_PORT1_DISABLE;
6573 break;
6574 case 1:
6575 usb_portx_disable = R_USB_PORT2_DISABLE;
6576 break;
6577 default:
6578 /* Invalid port index */
6579 return;
6580 }
6581 /* Set disable flag in special register */
6582 *usb_portx_disable = IO_STATE(R_USB_PORT1_DISABLE, disable, yes);
6583 /* Wait until not enabled anymore */
6584 while((rh.wPortStatusPrev[port] &
6585 IO_STATE(R_USB_RH_PORT_STATUS_1, enabled, yes)) &&
6586 (timeout-- > 0));
6587
6588 /* clear disable flag in special register */
6589 *usb_portx_disable = IO_STATE(R_USB_PORT1_DISABLE, disable, no);
6590 rh_info("Physical port %d disabled\n", port+1);
6591 }
6592
6593
6594 /******************************************************************/
6595 /* Transfer Controller (TC) functions */
6596 /******************************************************************/
6597
6598 /* FIXME: Should RX_BUF_SIZE be a config option, or maybe we should adjust it
6599 dynamically?
6600 To adjust it dynamically we would have to get an interrupt when we reach
6601 the end of the rx descriptor list, or when we get close to the end, and
6602 then allocate more descriptors. */
6603 #define NBR_OF_RX_DESC 512
6604 #define RX_DESC_BUF_SIZE 1024
6605 #define RX_BUF_SIZE (NBR_OF_RX_DESC * RX_DESC_BUF_SIZE)
6606
6607
6608 /* Local variables for Transfer Controller */
6609 /* --------------------------------------- */
6610
6611 /* This is a circular (double-linked) list of the active urbs for each epid.
6612 The head is never removed, and new urbs are linked onto the list as
6613 urb_entry_t elements. Don't reference urb_list directly; use the wrapper
6614 functions instead (which includes spin_locks) */
6615 static struct list_head urb_list[NBR_OF_EPIDS];
6616
6617 /* Read about the need and usage of this lock in submit_ctrl_urb. */
6618 /* Lock for URB lists for each EPID */
6619 static spinlock_t urb_list_lock;
6620
6621 /* Lock for EPID array register (R_USB_EPT_x) in Etrax */
6622 static spinlock_t etrax_epid_lock;
6623
6624 /* Lock for dma8 sub0 handling */
6625 static spinlock_t etrax_dma8_sub0_lock;
6626
6627 /* DMA IN cache bug. Align the DMA IN buffers to 32 bytes, i.e. a cache line.
6628 Since RX_DESC_BUF_SIZE is 1024 is a multiple of 32, all rx buffers will be
6629 cache aligned. */
6630 static volatile unsigned char RxBuf[RX_BUF_SIZE] __attribute__ ((aligned (32)));
6631 static volatile struct USB_IN_Desc RxDescList[NBR_OF_RX_DESC] __attribute__ ((aligned (4)));
6632
6633 /* Pointers into RxDescList. */
6634 static volatile struct USB_IN_Desc *myNextRxDesc;
6635 static volatile struct USB_IN_Desc *myLastRxDesc;
6636
6637 /* A zout transfer makes a memory access at the address of its buf pointer,
6638 which means that setting this buf pointer to 0 will cause an access to the
6639 flash. In addition to this, setting sw_len to 0 results in a 16/32 bytes
6640 (depending on DMA burst size) transfer.
6641 Instead, we set it to 1, and point it to this buffer. */
6642 static int zout_buffer[4] __attribute__ ((aligned (4)));
6643
6644 /* Cache for allocating new EP and SB descriptors. */
6645 static struct kmem_cache *usb_desc_cache;
6646
6647 /* Cache for the data allocated in the isoc descr top half. */
6648 static struct kmem_cache *isoc_compl_cache;
6649
6650 /* Cache for the data allocated when delayed finishing of URBs */
6651 static struct kmem_cache *later_data_cache;
6652
6653
6654 /* Counter to keep track of how many Isoc EP we have sat up. Used to enable
6655 and disable iso_eof interrupt. We only need these interrupts when we have
6656 Isoc data endpoints (consumes CPU cycles).
6657 FIXME: This could be more fine granular, so this interrupt is only enabled
6658 when we have a In Isoc URB not URB_ISO_ASAP flaged queued. */
6659 static int isoc_epid_counter;
6660
6661 /* Protecting wrapper functions for R_USB_EPT_x */
6662 /* -------------------------------------------- */
6663 static inline void etrax_epid_set(__u8 index, __u32 data) {
6664 unsigned long flags;
6665 spin_lock_irqsave(&etrax_epid_lock, flags);
6666 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
6667 nop();
6668 *R_USB_EPT_DATA = data;
6669 spin_unlock_irqrestore(&etrax_epid_lock, flags);
6670 }
6671
6672 static inline void etrax_epid_clear_error(__u8 index) {
6673 unsigned long flags;
6674 spin_lock_irqsave(&etrax_epid_lock, flags);
6675 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
6676 nop();
6677 *R_USB_EPT_DATA &=
6678 ~(IO_MASK(R_USB_EPT_DATA, error_count_in) |
6679 IO_MASK(R_USB_EPT_DATA, error_count_out) |
6680 IO_MASK(R_USB_EPT_DATA, error_code));
6681 spin_unlock_irqrestore(&etrax_epid_lock, flags);
6682 }
6683
6684 static inline void etrax_epid_set_toggle(__u8 index, __u8 dirout,
6685 __u8 toggle) {
6686 unsigned long flags;
6687 spin_lock_irqsave(&etrax_epid_lock, flags);
6688 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
6689 nop();
6690 if(dirout) {
6691 *R_USB_EPT_DATA &= ~IO_MASK(R_USB_EPT_DATA, t_out);
6692 *R_USB_EPT_DATA |= IO_FIELD(R_USB_EPT_DATA, t_out, toggle);
6693 } else {
6694 *R_USB_EPT_DATA &= ~IO_MASK(R_USB_EPT_DATA, t_in);
6695 *R_USB_EPT_DATA |= IO_FIELD(R_USB_EPT_DATA, t_in, toggle);
6696 }
6697 spin_unlock_irqrestore(&etrax_epid_lock, flags);
6698 }
6699
6700 static inline __u8 etrax_epid_get_toggle(__u8 index, __u8 dirout) {
6701 unsigned long flags;
6702 __u8 toggle;
6703 spin_lock_irqsave(&etrax_epid_lock, flags);
6704 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
6705 nop();
6706 if (dirout) {
6707 toggle = IO_EXTRACT(R_USB_EPT_DATA, t_out, *R_USB_EPT_DATA);
6708 } else {
6709 toggle = IO_EXTRACT(R_USB_EPT_DATA, t_in, *R_USB_EPT_DATA);
6710 }
6711 spin_unlock_irqrestore(&etrax_epid_lock, flags);
6712 return toggle;
6713 }
6714
6715
6716 static inline __u32 etrax_epid_get(__u8 index) {
6717 unsigned long flags;
6718 __u32 data;
6719 spin_lock_irqsave(&etrax_epid_lock, flags);
6720 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, index);
6721 nop();
6722 data = *R_USB_EPT_DATA;
6723 spin_unlock_irqrestore(&etrax_epid_lock, flags);
6724 return data;
6725 }
6726
6727
6728
6729
6730 /* Main functions for Transfer Controller */
6731 /* -------------------------------------- */
6732
6733 /* Init structs, memories and lists used by Transfer Controller */
6734 int tc_init(struct usb_hcd *hcd) {
6735 int i;
6736 /* Clear software state info for all epids */
6737 memset(epid_state, 0, sizeof(struct etrax_epid) * NBR_OF_EPIDS);
6738
6739 /* Set Invalid and Dummy as being in use and disabled */
6740 epid_state[INVALID_EPID].inuse = 1;
6741 epid_state[DUMMY_EPID].inuse = 1;
6742 epid_state[INVALID_EPID].disabled = 1;
6743 epid_state[DUMMY_EPID].disabled = 1;
6744
6745 /* Clear counter for how many Isoc epids we have sat up */
6746 isoc_epid_counter = 0;
6747
6748 /* Initialize the urb list by initiating a head for each list.
6749 Also reset list hodling active URB for each epid */
6750 for (i = 0; i < NBR_OF_EPIDS; i++) {
6751 INIT_LIST_HEAD(&urb_list[i]);
6752 activeUrbList[i] = NULL;
6753 }
6754
6755 /* Init lock for URB lists */
6756 spin_lock_init(&urb_list_lock);
6757 /* Init lock for Etrax R_USB_EPT register */
6758 spin_lock_init(&etrax_epid_lock);
6759 /* Init lock for Etrax dma8 sub0 handling */
6760 spin_lock_init(&etrax_dma8_sub0_lock);
6761
6762 /* We use kmem_cache_* to make sure that all DMA desc. are dword aligned */
6763
6764 /* Note that we specify sizeof(struct USB_EP_Desc) as the size, but also
6765 allocate SB descriptors from this cache. This is ok since
6766 sizeof(struct USB_EP_Desc) == sizeof(struct USB_SB_Desc). */
6767 usb_desc_cache = kmem_cache_create("usb_desc_cache",
6768 sizeof(struct USB_EP_Desc), 0,
6769 SLAB_HWCACHE_ALIGN, 0);
6770 if(usb_desc_cache == NULL) {
6771 return -ENOMEM;
6772 }
6773
6774 /* Create slab cache for speedy allocation of memory for isoc bottom-half
6775 interrupt handling */
6776 isoc_compl_cache =
6777 kmem_cache_create("isoc_compl_cache",
6778 sizeof(struct crisv10_isoc_complete_data),
6779 0, SLAB_HWCACHE_ALIGN, 0);
6780 if(isoc_compl_cache == NULL) {
6781 return -ENOMEM;
6782 }
6783
6784 /* Create slab cache for speedy allocation of memory for later URB finish
6785 struct */
6786 later_data_cache =
6787 kmem_cache_create("later_data_cache",
6788 sizeof(struct urb_later_data),
6789 0, SLAB_HWCACHE_ALIGN, 0);
6790 if(later_data_cache == NULL) {
6791 return -ENOMEM;
6792 }
6793
6794
6795 /* Initiate the bulk start timer. */
6796 init_timer(&bulk_start_timer);
6797 bulk_start_timer.expires = jiffies + BULK_START_TIMER_INTERVAL;
6798 bulk_start_timer.function = tc_bulk_start_timer_func;
6799 add_timer(&bulk_start_timer);
6800
6801
6802 /* Initiate the bulk eot timer. */
6803 init_timer(&bulk_eot_timer);
6804 bulk_eot_timer.expires = jiffies + BULK_EOT_TIMER_INTERVAL;
6805 bulk_eot_timer.function = tc_bulk_eot_timer_func;
6806 bulk_eot_timer.data = (unsigned long)hcd;
6807 add_timer(&bulk_eot_timer);
6808
6809 return 0;
6810 }
6811
6812 /* Uninitialize all resources used by Transfer Controller */
6813 void tc_destroy(void) {
6814
6815 /* Destroy all slab cache */
6816 kmem_cache_destroy(usb_desc_cache);
6817 kmem_cache_destroy(isoc_compl_cache);
6818 kmem_cache_destroy(later_data_cache);
6819
6820 /* Remove timers */
6821 del_timer(&bulk_start_timer);
6822 del_timer(&bulk_eot_timer);
6823 }
6824
6825 static void restart_dma8_sub0(void) {
6826 unsigned long flags;
6827 spin_lock_irqsave(&etrax_dma8_sub0_lock, flags);
6828 /* Verify that the dma is not running */
6829 if ((*R_DMA_CH8_SUB0_CMD & IO_MASK(R_DMA_CH8_SUB0_CMD, cmd)) == 0) {
6830 struct USB_EP_Desc *ep = (struct USB_EP_Desc *)phys_to_virt(*R_DMA_CH8_SUB0_EP);
6831 while (DUMMY_EPID == IO_EXTRACT(USB_EP_command, epid, ep->command)) {
6832 ep = (struct USB_EP_Desc *)phys_to_virt(ep->next);
6833 }
6834 /* Advance the DMA to the next EP descriptor that is not a DUMMY_EPID. */
6835 *R_DMA_CH8_SUB0_EP = virt_to_phys(ep);
6836 /* Restart the DMA */
6837 *R_DMA_CH8_SUB0_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start);
6838 }
6839 spin_unlock_irqrestore(&etrax_dma8_sub0_lock, flags);
6840 }
6841
6842 /* queue an URB with the transfer controller (called from hcd_driver) */
6843 static int tc_urb_enqueue(struct usb_hcd *hcd,
6844 struct urb *urb,
6845 gfp_t mem_flags) {
6846 int epid;
6847 int retval;
6848 int bustime = 0;
6849 int maxpacket;
6850 unsigned long flags;
6851 struct crisv10_urb_priv *urb_priv;
6852 struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(hcd);
6853 DBFENTER;
6854
6855 if(!(crisv10_hcd->running)) {
6856 /* The USB Controller is not running, probably because no device is
6857 attached. No idea to enqueue URBs then */
6858 tc_warn("Rejected enqueueing of URB:0x%x because no dev attached\n",
6859 (unsigned int)urb);
6860 return -ENOENT;
6861 }
6862
6863 maxpacket = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
6864 /* Special case check for In Isoc transfers. Specification states that each
6865 In Isoc transfer consists of one packet and therefore it should fit into
6866 the transfer-buffer of an URB.
6867 We do the check here to be sure (an invalid scenario can be produced with
6868 parameters to the usbtest suite) */
6869 if(usb_pipeisoc(urb->pipe) && usb_pipein(urb->pipe) &&
6870 (urb->transfer_buffer_length < maxpacket)) {
6871 tc_err("Submit In Isoc URB with buffer length:%d to pipe with maxpacketlen: %d\n", urb->transfer_buffer_length, maxpacket);
6872 return -EMSGSIZE;
6873 }
6874
6875 /* Check if there is a epid for URBs destination, if not this function
6876 set up one. */
6877 epid = tc_setup_epid(urb->ep, urb, mem_flags);
6878 if (epid < 0) {
6879 tc_err("Failed setup epid:%d for URB:0x%x\n", epid, (unsigned int)urb);
6880 DBFEXIT;
6881 return -ENOMEM;
6882 }
6883
6884 if(urb == activeUrbList[epid]) {
6885 tc_err("Resubmition of allready active URB:0x%x\n", (unsigned int)urb);
6886 return -ENXIO;
6887 }
6888
6889 if(urb_list_entry(urb, epid)) {
6890 tc_err("Resubmition of allready queued URB:0x%x\n", (unsigned int)urb);
6891 return -ENXIO;
6892 }
6893
6894 /* If we actively have flaged endpoint as disabled then refuse submition */
6895 if(epid_state[epid].disabled) {
6896 return -ENOENT;
6897 }
6898
6899 /* Allocate and init HC-private data for URB */
6900 if(urb_priv_create(hcd, urb, epid, mem_flags) != 0) {
6901 DBFEXIT;
6902 return -ENOMEM;
6903 }
6904 urb_priv = urb->hcpriv;
6905
6906 /* Check if there is enough bandwidth for periodic transfer */
6907 if(usb_pipeint(urb->pipe) || usb_pipeisoc(urb->pipe)) {
6908 /* only check (and later claim) if not already claimed */
6909 if (urb_priv->bandwidth == 0) {
6910 bustime = crisv10_usb_check_bandwidth(urb->dev, urb);
6911 if (bustime < 0) {
6912 tc_err("Not enough periodic bandwidth\n");
6913 urb_priv_free(hcd, urb);
6914 DBFEXIT;
6915 return -ENOSPC;
6916 }
6917 }
6918 }
6919
6920 tc_dbg("Enqueue URB:0x%x[%d] epid:%d (%s) bufflen:%d\n",
6921 (unsigned int)urb, urb_priv->urb_num, epid,
6922 pipe_to_str(urb->pipe), urb->transfer_buffer_length);
6923
6924 /* Create and link SBs required for this URB */
6925 retval = create_sb_for_urb(urb, mem_flags);
6926 if(retval != 0) {
6927 tc_err("Failed to create SBs for URB:0x%x[%d]\n", (unsigned int)urb,
6928 urb_priv->urb_num);
6929 urb_priv_free(hcd, urb);
6930 DBFEXIT;
6931 return retval;
6932 }
6933
6934 /* Init intr EP pool if this URB is a INTR transfer. This pool is later
6935 used when inserting EPs in the TxIntrEPList. We do the alloc here
6936 so we can't run out of memory later */
6937 if(usb_pipeint(urb->pipe)) {
6938 retval = init_intr_urb(urb, mem_flags);
6939 if(retval != 0) {
6940 tc_warn("Failed to init Intr URB\n");
6941 urb_priv_free(hcd, urb);
6942 DBFEXIT;
6943 return retval;
6944 }
6945 }
6946
6947 /* Disable other access when inserting USB */
6948 local_irq_save(flags);
6949
6950 /* Claim bandwidth, if needed */
6951 if(bustime) {
6952 crisv10_usb_claim_bandwidth(urb->dev,
6953 urb,
6954 bustime,
6955 (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS));
6956 }
6957
6958 /* Add URB to EP queue */
6959 urb_list_add(urb, epid, mem_flags);
6960
6961 if(usb_pipeisoc(urb->pipe)) {
6962 /* Special processing of Isoc URBs. */
6963 tc_dma_process_isoc_urb(urb);
6964 } else {
6965 /* Process EP queue for rest of the URB types (Bulk, Ctrl, Intr) */
6966 tc_dma_process_queue(epid);
6967 }
6968
6969 local_irq_restore(flags);
6970
6971 DBFEXIT;
6972 return 0;
6973 }
6974
6975 /* remove an URB from the transfer controller queues (called from hcd_driver)*/
6976 static int tc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) {
6977 struct crisv10_urb_priv *urb_priv;
6978 unsigned long flags;
6979 int epid;
6980
6981 DBFENTER;
6982 /* Disable interrupts here since a descriptor interrupt for the isoc epid
6983 will modify the sb list. This could possibly be done more granular, but
6984 urb_dequeue should not be used frequently anyway.
6985 */
6986 local_irq_save(flags);
6987
6988 urb->status = status;
6989 urb_priv = urb->hcpriv;
6990
6991 if (!urb_priv) {
6992 /* This happens if a device driver calls unlink on an urb that
6993 was never submitted (lazy driver) or if the urb was completed
6994 while dequeue was being called. */
6995 tc_warn("Dequeing of not enqueued URB:0x%x\n", (unsigned int)urb);
6996 local_irq_restore(flags);
6997 return 0;
6998 }
6999 epid = urb_priv->epid;
7000
7001 tc_warn("Dequeing %s URB:0x%x[%d] (%s %s epid:%d) status:%d %s\n",
7002 (urb == activeUrbList[epid]) ? "active" : "queued",
7003 (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
7004 str_type(urb->pipe), epid, urb->status,
7005 (urb_priv->later_data) ? "later-sched" : "");
7006
7007 /* For Bulk, Ctrl and Intr are only one URB active at a time. So any URB
7008 that isn't active can be dequeued by just removing it from the queue */
7009 if(usb_pipebulk(urb->pipe) || usb_pipecontrol(urb->pipe) ||
7010 usb_pipeint(urb->pipe)) {
7011
7012 /* Check if URB haven't gone further than the queue */
7013 if(urb != activeUrbList[epid]) {
7014 ASSERT(urb_priv->later_data == NULL);
7015 tc_warn("Dequeing URB:0x%x[%d] (%s %s epid:%d) from queue"
7016 " (not active)\n", (unsigned int)urb, urb_priv->urb_num,
7017 str_dir(urb->pipe), str_type(urb->pipe), epid);
7018
7019 /* Finish the URB with error status from USB core */
7020 tc_finish_urb(hcd, urb, urb->status);
7021 local_irq_restore(flags);
7022 return 0;
7023 }
7024 }
7025
7026 /* Set URB status to Unlink for handling when interrupt comes. */
7027 urb_priv->urb_state = UNLINK;
7028
7029 /* Differentiate dequeing of Bulk and Ctrl from Isoc and Intr */
7030 switch(usb_pipetype(urb->pipe)) {
7031 case PIPE_BULK:
7032 /* Check if EP still is enabled */
7033 if (TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
7034 /* The EP was enabled, disable it. */
7035 TxBulkEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
7036 }
7037 /* Kicking dummy list out of the party. */
7038 TxBulkEPList[epid].next = virt_to_phys(&TxBulkEPList[(epid + 1) % NBR_OF_EPIDS]);
7039 break;
7040 case PIPE_CONTROL:
7041 /* Check if EP still is enabled */
7042 if (TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
7043 /* The EP was enabled, disable it. */
7044 TxCtrlEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
7045 }
7046 break;
7047 case PIPE_ISOCHRONOUS:
7048 /* Disabling, busy-wait and unlinking of Isoc SBs will be done in
7049 finish_isoc_urb(). Because there might the case when URB is dequeued
7050 but there are other valid URBs waiting */
7051
7052 /* Check if In Isoc EP still is enabled */
7053 if (TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
7054 /* The EP was enabled, disable it. */
7055 TxIsocEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
7056 }
7057 break;
7058 case PIPE_INTERRUPT:
7059 /* Special care is taken for interrupt URBs. EPs are unlinked in
7060 tc_finish_urb */
7061 break;
7062 default:
7063 break;
7064 }
7065
7066 /* Asynchronous unlink, finish the URB later from scheduled or other
7067 event (data finished, error) */
7068 tc_finish_urb_later(hcd, urb, urb->status);
7069
7070 local_irq_restore(flags);
7071 DBFEXIT;
7072 return 0;
7073 }
7074
7075
7076 static void tc_sync_finish_epid(struct usb_hcd *hcd, int epid) {
7077 volatile int timeout = 10000;
7078 struct urb* urb;
7079 struct crisv10_urb_priv* urb_priv;
7080 unsigned long flags;
7081
7082 volatile struct USB_EP_Desc *first_ep; /* First EP in the list. */
7083 volatile struct USB_EP_Desc *curr_ep; /* Current EP, the iterator. */
7084 volatile struct USB_EP_Desc *next_ep; /* The EP after current. */
7085
7086 int type = epid_state[epid].type;
7087
7088 /* Setting this flag will cause enqueue() to return -ENOENT for new
7089 submitions on this endpoint and finish_urb() wont process queue further */
7090 epid_state[epid].disabled = 1;
7091
7092 switch(type) {
7093 case PIPE_BULK:
7094 /* Check if EP still is enabled */
7095 if (TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
7096 /* The EP was enabled, disable it. */
7097 TxBulkEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
7098 tc_warn("sync_finish: Disabling EP for epid:%d\n", epid);
7099
7100 /* Do busy-wait until DMA not using this EP descriptor anymore */
7101 while((*R_DMA_CH8_SUB0_EP ==
7102 virt_to_phys(&TxBulkEPList[epid])) &&
7103 (timeout-- > 0));
7104
7105 }
7106 break;
7107
7108 case PIPE_CONTROL:
7109 /* Check if EP still is enabled */
7110 if (TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
7111 /* The EP was enabled, disable it. */
7112 TxCtrlEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
7113 tc_warn("sync_finish: Disabling EP for epid:%d\n", epid);
7114
7115 /* Do busy-wait until DMA not using this EP descriptor anymore */
7116 while((*R_DMA_CH8_SUB1_EP ==
7117 virt_to_phys(&TxCtrlEPList[epid])) &&
7118 (timeout-- > 0));
7119 }
7120 break;
7121
7122 case PIPE_INTERRUPT:
7123 local_irq_save(flags);
7124 /* Disable all Intr EPs belonging to epid */
7125 first_ep = &TxIntrEPList[0];
7126 curr_ep = first_ep;
7127 do {
7128 next_ep = (struct USB_EP_Desc *)phys_to_virt(curr_ep->next);
7129 if (IO_EXTRACT(USB_EP_command, epid, next_ep->command) == epid) {
7130 /* Disable EP */
7131 next_ep->command &= ~IO_MASK(USB_EP_command, enable);
7132 }
7133 curr_ep = phys_to_virt(curr_ep->next);
7134 } while (curr_ep != first_ep);
7135
7136 local_irq_restore(flags);
7137 break;
7138
7139 case PIPE_ISOCHRONOUS:
7140 /* Check if EP still is enabled */
7141 if (TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
7142 tc_warn("sync_finish: Disabling Isoc EP for epid:%d\n", epid);
7143 /* The EP was enabled, disable it. */
7144 TxIsocEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
7145
7146 while((*R_DMA_CH8_SUB3_EP == virt_to_phys(&TxIsocEPList[epid])) &&
7147 (timeout-- > 0));
7148 }
7149 break;
7150 }
7151
7152 local_irq_save(flags);
7153
7154 /* Finish if there is active URB for this endpoint */
7155 if(activeUrbList[epid] != NULL) {
7156 urb = activeUrbList[epid];
7157 urb_priv = urb->hcpriv;
7158 ASSERT(urb_priv);
7159 tc_warn("Sync finish %s URB:0x%x[%d] (%s %s epid:%d) status:%d %s\n",
7160 (urb == activeUrbList[epid]) ? "active" : "queued",
7161 (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
7162 str_type(urb->pipe), epid, urb->status,
7163 (urb_priv->later_data) ? "later-sched" : "");
7164
7165 tc_finish_urb(hcd, activeUrbList[epid], -ENOENT);
7166 ASSERT(activeUrbList[epid] == NULL);
7167 }
7168
7169 /* Finish any queued URBs for this endpoint. There won't be any resubmitions
7170 because epid_disabled causes enqueue() to fail for this endpoint */
7171 while((urb = urb_list_first(epid)) != NULL) {
7172 urb_priv = urb->hcpriv;
7173 ASSERT(urb_priv);
7174
7175 tc_warn("Sync finish %s URB:0x%x[%d] (%s %s epid:%d) status:%d %s\n",
7176 (urb == activeUrbList[epid]) ? "active" : "queued",
7177 (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
7178 str_type(urb->pipe), epid, urb->status,
7179 (urb_priv->later_data) ? "later-sched" : "");
7180
7181 tc_finish_urb(hcd, urb, -ENOENT);
7182 }
7183 epid_state[epid].disabled = 0;
7184 local_irq_restore(flags);
7185 }
7186
7187 /* free resources associated with an endpoint (called from hcd_driver) */
7188 static void tc_endpoint_disable(struct usb_hcd *hcd,
7189 struct usb_host_endpoint *ep) {
7190 DBFENTER;
7191 /* Only free epid if it has been allocated. We get two endpoint_disable
7192 requests for ctrl endpoints so ignore the second one */
7193 if(ep->hcpriv != NULL) {
7194 struct crisv10_ep_priv *ep_priv = ep->hcpriv;
7195 int epid = ep_priv->epid;
7196 tc_warn("endpoint_disable ep:0x%x ep-priv:0x%x (%s) (epid:%d freed)\n",
7197 (unsigned int)ep, (unsigned int)ep->hcpriv,
7198 endpoint_to_str(&(ep->desc)), epid);
7199
7200 tc_sync_finish_epid(hcd, epid);
7201
7202 ASSERT(activeUrbList[epid] == NULL);
7203 ASSERT(list_empty(&urb_list[epid]));
7204
7205 tc_free_epid(ep);
7206 } else {
7207 tc_dbg("endpoint_disable ep:0x%x ep-priv:0x%x (%s)\n", (unsigned int)ep,
7208 (unsigned int)ep->hcpriv, endpoint_to_str(&(ep->desc)));
7209 }
7210 DBFEXIT;
7211 }
7212
7213 static void tc_finish_urb_later_proc(struct work_struct* work) {
7214 unsigned long flags;
7215 struct urb_later_data* uld;
7216
7217 local_irq_save(flags);
7218 uld = container_of(work, struct urb_later_data, dws.work);
7219 if(uld->urb == NULL) {
7220 late_dbg("Later finish of URB = NULL (allready finished)\n");
7221 } else {
7222 struct crisv10_urb_priv* urb_priv = uld->urb->hcpriv;
7223 ASSERT(urb_priv);
7224 if(urb_priv->urb_num == uld->urb_num) {
7225 late_dbg("Later finish of URB:0x%x[%d]\n", (unsigned int)(uld->urb),
7226 urb_priv->urb_num);
7227 if(uld->status != uld->urb->status) {
7228 errno_dbg("Later-finish URB with status:%d, later-status:%d\n",
7229 uld->urb->status, uld->status);
7230 }
7231 if(uld != urb_priv->later_data) {
7232 panic("Scheduled uld not same as URBs uld\n");
7233 }
7234 tc_finish_urb(uld->hcd, uld->urb, uld->status);
7235 } else {
7236 late_warn("Ignoring later finish of URB:0x%x[%d]"
7237 ", urb_num doesn't match current URB:0x%x[%d]",
7238 (unsigned int)(uld->urb), uld->urb_num,
7239 (unsigned int)(uld->urb), urb_priv->urb_num);
7240 }
7241 }
7242 local_irq_restore(flags);
7243 kmem_cache_free(later_data_cache, uld);
7244 }
7245
7246 static void tc_finish_urb_later(struct usb_hcd *hcd, struct urb *urb,
7247 int status) {
7248 struct crisv10_urb_priv *urb_priv = urb->hcpriv;
7249 struct urb_later_data* uld;
7250
7251 ASSERT(urb_priv);
7252
7253 if(urb_priv->later_data != NULL) {
7254 /* Later-finish allready scheduled for this URB, just update status to
7255 return when finishing later */
7256 errno_dbg("Later-finish schedule change URB status:%d with new"
7257 " status:%d\n", urb_priv->later_data->status, status);
7258
7259 urb_priv->later_data->status = status;
7260 return;
7261 }
7262
7263 uld = kmem_cache_alloc(later_data_cache, GFP_ATOMIC);
7264 ASSERT(uld);
7265
7266 uld->hcd = hcd;
7267 uld->urb = urb;
7268 uld->urb_num = urb_priv->urb_num;
7269 uld->status = status;
7270
7271 INIT_DELAYED_WORK(&uld->dws, tc_finish_urb_later_proc);
7272 urb_priv->later_data = uld;
7273
7274 /* Schedule the finishing of the URB to happen later */
7275 schedule_delayed_work(&uld->dws, LATER_TIMER_DELAY);
7276 }
7277
7278 static void tc_finish_isoc_urb(struct usb_hcd *hcd, struct urb *urb,
7279 int status);
7280
7281 static void tc_finish_urb(struct usb_hcd *hcd, struct urb *urb, int status) {
7282 struct crisv10_hcd* crisv10_hcd = hcd_to_crisv10_hcd(hcd);
7283 struct crisv10_urb_priv *urb_priv = urb->hcpriv;
7284 int epid;
7285 char toggle;
7286 int urb_num;
7287
7288 DBFENTER;
7289 ASSERT(urb_priv != NULL);
7290 epid = urb_priv->epid;
7291 urb_num = urb_priv->urb_num;
7292
7293 if(urb != activeUrbList[epid]) {
7294 if(urb_list_entry(urb, epid)) {
7295 /* Remove this URB from the list. Only happens when URB are finished
7296 before having been processed (dequeing) */
7297 urb_list_del(urb, epid);
7298 } else {
7299 tc_warn("Finishing of URB:0x%x[%d] neither active or in queue for"
7300 " epid:%d\n", (unsigned int)urb, urb_num, epid);
7301 }
7302 }
7303
7304 /* Cancel any pending later-finish of this URB */
7305 if(urb_priv->later_data) {
7306 urb_priv->later_data->urb = NULL;
7307 }
7308
7309 /* For an IN pipe, we always set the actual length, regardless of whether
7310 there was an error or not (which means the device driver can use the data
7311 if it wants to). */
7312 if(usb_pipein(urb->pipe)) {
7313 urb->actual_length = urb_priv->rx_offset;
7314 } else {
7315 /* Set actual_length for OUT urbs also; the USB mass storage driver seems
7316 to want that. */
7317 if (status == 0 && urb->status == -EINPROGRESS) {
7318 urb->actual_length = urb->transfer_buffer_length;
7319 } else {
7320 /* We wouldn't know of any partial writes if there was an error. */
7321 urb->actual_length = 0;
7322 }
7323 }
7324
7325
7326 /* URB status mangling */
7327 if(urb->status == -EINPROGRESS) {
7328 /* The USB core hasn't changed the status, let's set our finish status */
7329 urb->status = status;
7330
7331 if ((status == 0) && (urb->transfer_flags & URB_SHORT_NOT_OK) &&
7332 usb_pipein(urb->pipe) &&
7333 (urb->actual_length != urb->transfer_buffer_length)) {
7334 /* URB_SHORT_NOT_OK means that short reads (shorter than the endpoint's
7335 max length) is to be treated as an error. */
7336 errno_dbg("Finishing URB:0x%x[%d] with SHORT_NOT_OK flag and short"
7337 " data:%d\n", (unsigned int)urb, urb_num,
7338 urb->actual_length);
7339 urb->status = -EREMOTEIO;
7340 }
7341
7342 if(urb_priv->urb_state == UNLINK) {
7343 /* URB has been requested to be unlinked asynchronously */
7344 urb->status = -ECONNRESET;
7345 errno_dbg("Fixing unlink status of URB:0x%x[%d] to:%d\n",
7346 (unsigned int)urb, urb_num, urb->status);
7347 }
7348 } else {
7349 /* The USB Core wants to signal some error via the URB, pass it through */
7350 }
7351
7352 /* use completely different finish function for Isoc URBs */
7353 if(usb_pipeisoc(urb->pipe)) {
7354 tc_finish_isoc_urb(hcd, urb, status);
7355 return;
7356 }
7357
7358 /* Do special unlinking of EPs for Intr traffic */
7359 if(usb_pipeint(urb->pipe)) {
7360 tc_dma_unlink_intr_urb(urb);
7361 }
7362
7363 /* Release allocated bandwidth for periodic transfers */
7364 if(usb_pipeint(urb->pipe) || usb_pipeisoc(urb->pipe))
7365 crisv10_usb_release_bandwidth(hcd,
7366 usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS,
7367 urb_priv->bandwidth);
7368
7369 /* This URB is active on EP */
7370 if(urb == activeUrbList[epid]) {
7371 /* We need to fiddle with the toggle bits because the hardware doesn't do
7372 it for us. */
7373 toggle = etrax_epid_get_toggle(epid, usb_pipeout(urb->pipe));
7374 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
7375 usb_pipeout(urb->pipe), toggle);
7376
7377 /* Checks for Ctrl and Bulk EPs */
7378 switch(usb_pipetype(urb->pipe)) {
7379 case PIPE_BULK:
7380 /* Check so Bulk EP realy is disabled before finishing active URB */
7381 ASSERT((TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) ==
7382 IO_STATE(USB_EP_command, enable, no));
7383 /* Disable sub-pointer for EP to avoid next tx_interrupt() to
7384 process Bulk EP. */
7385 TxBulkEPList[epid].sub = 0;
7386 /* No need to wait for the DMA before changing the next pointer.
7387 The modulo NBR_OF_EPIDS isn't actually necessary, since we will never use
7388 the last one (INVALID_EPID) for actual traffic. */
7389 TxBulkEPList[epid].next =
7390 virt_to_phys(&TxBulkEPList[(epid + 1) % NBR_OF_EPIDS]);
7391 break;
7392 case PIPE_CONTROL:
7393 /* Check so Ctrl EP realy is disabled before finishing active URB */
7394 ASSERT((TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) ==
7395 IO_STATE(USB_EP_command, enable, no));
7396 /* Disable sub-pointer for EP to avoid next tx_interrupt() to
7397 process Ctrl EP. */
7398 TxCtrlEPList[epid].sub = 0;
7399 break;
7400 }
7401 }
7402
7403 /* Free HC-private URB data*/
7404 urb_priv_free(hcd, urb);
7405
7406 if(urb->status) {
7407 errno_dbg("finish_urb (URB:0x%x[%d] %s %s) (data:%d) status:%d\n",
7408 (unsigned int)urb, urb_num, str_dir(urb->pipe),
7409 str_type(urb->pipe), urb->actual_length, urb->status);
7410 } else {
7411 tc_dbg("finish_urb (URB:0x%x[%d] %s %s) (data:%d) status:%d\n",
7412 (unsigned int)urb, urb_num, str_dir(urb->pipe),
7413 str_type(urb->pipe), urb->actual_length, urb->status);
7414 }
7415
7416 /* If we just finished an active URB, clear active pointer. */
7417 if (urb == activeUrbList[epid]) {
7418 /* Make URB not active on EP anymore */
7419 activeUrbList[epid] = NULL;
7420
7421 if(urb->status == 0) {
7422 /* URB finished sucessfully, process queue to see if there are any more
7423 URBs waiting before we call completion function.*/
7424 if(crisv10_hcd->running) {
7425 /* Only process queue if USB controller is running */
7426 tc_dma_process_queue(epid);
7427 } else {
7428 tc_warn("No processing of queue for epid:%d, USB Controller not"
7429 " running\n", epid);
7430 }
7431 }
7432 }
7433
7434 /* Hand the URB from HCD to its USB device driver, using its completion
7435 functions */
7436 usb_hcd_giveback_urb (hcd, urb, status);
7437
7438 /* Check the queue once more if the URB returned with error, because we
7439 didn't do it before the completion function because the specification
7440 states that the queue should not restart until all it's unlinked
7441 URBs have been fully retired, with the completion functions run */
7442 if(crisv10_hcd->running) {
7443 /* Only process queue if USB controller is running */
7444 tc_dma_process_queue(epid);
7445 } else {
7446 tc_warn("No processing of queue for epid:%d, USB Controller not running\n",
7447 epid);
7448 }
7449
7450 DBFEXIT;
7451 }
7452
7453 static void tc_finish_isoc_urb(struct usb_hcd *hcd, struct urb *urb,
7454 int status) {
7455 struct crisv10_urb_priv *urb_priv = urb->hcpriv;
7456 int epid, i;
7457 volatile int timeout = 10000;
7458 int bandwidth = 0;
7459
7460 ASSERT(urb_priv);
7461 epid = urb_priv->epid;
7462
7463 ASSERT(usb_pipeisoc(urb->pipe));
7464
7465 /* Set that all isoc packets have status and length set before
7466 completing the urb. */
7467 for (i = urb_priv->isoc_packet_counter; i < urb->number_of_packets; i++){
7468 urb->iso_frame_desc[i].actual_length = 0;
7469 urb->iso_frame_desc[i].status = -EPROTO;
7470 }
7471
7472 /* Check if the URB is currently active (done or error) */
7473 if(urb == activeUrbList[epid]) {
7474 /* Check if there are another In Isoc URB queued for this epid */
7475 if (!list_empty(&urb_list[epid])&& !epid_state[epid].disabled) {
7476 /* Move it from queue to active and mark it started so Isoc transfers
7477 won't be interrupted.
7478 All Isoc URBs data transfers are already added to DMA lists so we
7479 don't have to insert anything in DMA lists here. */
7480 activeUrbList[epid] = urb_list_first(epid);
7481 ((struct crisv10_urb_priv *)(activeUrbList[epid]->hcpriv))->urb_state =
7482 STARTED;
7483 urb_list_del(activeUrbList[epid], epid);
7484
7485 if(urb->status) {
7486 errno_dbg("finish_isoc_urb (URB:0x%x[%d] %s %s) (%d of %d packets)"
7487 " status:%d, new waiting URB:0x%x[%d]\n",
7488 (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
7489 str_type(urb->pipe), urb_priv->isoc_packet_counter,
7490 urb->number_of_packets, urb->status,
7491 (unsigned int)activeUrbList[epid],
7492 ((struct crisv10_urb_priv *)(activeUrbList[epid]->hcpriv))->urb_num);
7493 }
7494
7495 } else { /* No other URB queued for this epid */
7496 if(urb->status) {
7497 errno_dbg("finish_isoc_urb (URB:0x%x[%d] %s %s) (%d of %d packets)"
7498 " status:%d, no new URB waiting\n",
7499 (unsigned int)urb, urb_priv->urb_num, str_dir(urb->pipe),
7500 str_type(urb->pipe), urb_priv->isoc_packet_counter,
7501 urb->number_of_packets, urb->status);
7502 }
7503
7504 /* Check if EP is still enabled, then shut it down. */
7505 if (TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
7506 isoc_dbg("Isoc EP enabled for epid:%d, disabling it\n", epid);
7507
7508 /* Should only occur for In Isoc EPs where SB isn't consumed. */
7509 ASSERT(usb_pipein(urb->pipe));
7510
7511 /* Disable it and wait for it to stop */
7512 TxIsocEPList[epid].command &= ~IO_MASK(USB_EP_command, enable);
7513
7514 /* Ah, the luxury of busy-wait. */
7515 while((*R_DMA_CH8_SUB3_EP == virt_to_phys(&TxIsocEPList[epid])) &&
7516 (timeout-- > 0));
7517 }
7518
7519 /* Unlink SB to say that epid is finished. */
7520 TxIsocEPList[epid].sub = 0;
7521 TxIsocEPList[epid].hw_len = 0;
7522
7523 /* No URB active for EP anymore */
7524 activeUrbList[epid] = NULL;
7525 }
7526 } else { /* Finishing of not active URB (queued up with SBs thought) */
7527 isoc_warn("finish_isoc_urb (URB:0x%x %s) (%d of %d packets) status:%d,"
7528 " SB queued but not active\n",
7529 (unsigned int)urb, str_dir(urb->pipe),
7530 urb_priv->isoc_packet_counter, urb->number_of_packets,
7531 urb->status);
7532 if(usb_pipeout(urb->pipe)) {
7533 /* Finishing of not yet active Out Isoc URB needs unlinking of SBs. */
7534 struct USB_SB_Desc *iter_sb, *prev_sb, *next_sb;
7535
7536 iter_sb = TxIsocEPList[epid].sub ?
7537 phys_to_virt(TxIsocEPList[epid].sub) : 0;
7538 prev_sb = 0;
7539
7540 /* SB that is linked before this URBs first SB */
7541 while (iter_sb && (iter_sb != urb_priv->first_sb)) {
7542 prev_sb = iter_sb;
7543 iter_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0;
7544 }
7545
7546 if (iter_sb == 0) {
7547 /* Unlink of the URB currently being transmitted. */
7548 prev_sb = 0;
7549 iter_sb = TxIsocEPList[epid].sub ? phys_to_virt(TxIsocEPList[epid].sub) : 0;
7550 }
7551
7552 while (iter_sb && (iter_sb != urb_priv->last_sb)) {
7553 iter_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0;
7554 }
7555
7556 if (iter_sb) {
7557 next_sb = iter_sb->next ? phys_to_virt(iter_sb->next) : 0;
7558 } else {
7559 /* This should only happen if the DMA has completed
7560 processing the SB list for this EP while interrupts
7561 are disabled. */
7562 isoc_dbg("Isoc urb not found, already sent?\n");
7563 next_sb = 0;
7564 }
7565 if (prev_sb) {
7566 prev_sb->next = next_sb ? virt_to_phys(next_sb) : 0;
7567 } else {
7568 TxIsocEPList[epid].sub = next_sb ? virt_to_phys(next_sb) : 0;
7569 }
7570 }
7571 }
7572
7573 /* Free HC-private URB data*/
7574 bandwidth = urb_priv->bandwidth;
7575 urb_priv_free(hcd, urb);
7576
7577 crisv10_usb_release_bandwidth(hcd, usb_pipeisoc(urb->pipe), bandwidth);
7578
7579 /* Hand the URB from HCD to its USB device driver, using its completion
7580 functions */
7581 usb_hcd_giveback_urb (hcd, urb, status);
7582 }
7583
7584 static __u32 urb_num = 0;
7585
7586 /* allocate and initialize URB private data */
7587 static int urb_priv_create(struct usb_hcd *hcd, struct urb *urb, int epid,
7588 int mem_flags) {
7589 struct crisv10_urb_priv *urb_priv;
7590
7591 urb_priv = kmalloc(sizeof *urb_priv, mem_flags);
7592 if (!urb_priv)
7593 return -ENOMEM;
7594 memset(urb_priv, 0, sizeof *urb_priv);
7595
7596 urb_priv->epid = epid;
7597 urb_priv->urb_state = NOT_STARTED;
7598
7599 urb->hcpriv = urb_priv;
7600 /* Assign URB a sequence number, and increment counter */
7601 urb_priv->urb_num = urb_num;
7602 urb_num++;
7603 urb_priv->bandwidth = 0;
7604 return 0;
7605 }
7606
7607 /* free URB private data */
7608 static void urb_priv_free(struct usb_hcd *hcd, struct urb *urb) {
7609 int i;
7610 struct crisv10_urb_priv *urb_priv = urb->hcpriv;
7611 ASSERT(urb_priv != 0);
7612
7613 /* Check it has any SBs linked that needs to be freed*/
7614 if(urb_priv->first_sb != NULL) {
7615 struct USB_SB_Desc *next_sb, *first_sb, *last_sb;
7616 int i = 0;
7617 first_sb = urb_priv->first_sb;
7618 last_sb = urb_priv->last_sb;
7619 ASSERT(last_sb);
7620 while(first_sb != last_sb) {
7621 next_sb = (struct USB_SB_Desc *)phys_to_virt(first_sb->next);
7622 kmem_cache_free(usb_desc_cache, first_sb);
7623 first_sb = next_sb;
7624 i++;
7625 }
7626 kmem_cache_free(usb_desc_cache, last_sb);
7627 i++;
7628 }
7629
7630 /* Check if it has any EPs in its Intr pool that also needs to be freed */
7631 if(urb_priv->intr_ep_pool_length > 0) {
7632 for(i = 0; i < urb_priv->intr_ep_pool_length; i++) {
7633 kfree(urb_priv->intr_ep_pool[i]);
7634 }
7635 /*
7636 tc_dbg("Freed %d EPs from URB:0x%x EP pool\n",
7637 urb_priv->intr_ep_pool_length, (unsigned int)urb);
7638 */
7639 }
7640
7641 kfree(urb_priv);
7642 urb->hcpriv = NULL;
7643 }
7644
7645 static int ep_priv_create(struct usb_host_endpoint *ep, int mem_flags) {
7646 struct crisv10_ep_priv *ep_priv;
7647
7648 ep_priv = kmalloc(sizeof *ep_priv, mem_flags);
7649 if (!ep_priv)
7650 return -ENOMEM;
7651 memset(ep_priv, 0, sizeof *ep_priv);
7652
7653 ep->hcpriv = ep_priv;
7654 return 0;
7655 }
7656
7657 static void ep_priv_free(struct usb_host_endpoint *ep) {
7658 struct crisv10_ep_priv *ep_priv = ep->hcpriv;
7659 ASSERT(ep_priv);
7660 kfree(ep_priv);
7661 ep->hcpriv = NULL;
7662 }
7663
7664 /*
7665 * usb_check_bandwidth():
7666 *
7667 * old_alloc is from host_controller->bandwidth_allocated in microseconds;
7668 * bustime is from calc_bus_time(), but converted to microseconds.
7669 *
7670 * returns <bustime in us> if successful,
7671 * or -ENOSPC if bandwidth request fails.
7672 *
7673 * FIXME:
7674 * This initial implementation does not use Endpoint.bInterval
7675 * in managing bandwidth allocation.
7676 * It probably needs to be expanded to use Endpoint.bInterval.
7677 * This can be done as a later enhancement (correction).
7678 *
7679 * This will also probably require some kind of
7680 * frame allocation tracking...meaning, for example,
7681 * that if multiple drivers request interrupts every 10 USB frames,
7682 * they don't all have to be allocated at
7683 * frame numbers N, N+10, N+20, etc. Some of them could be at
7684 * N+11, N+21, N+31, etc., and others at
7685 * N+12, N+22, N+32, etc.
7686 *
7687 * Similarly for isochronous transfers...
7688 *
7689 * Individual HCDs can schedule more directly ... this logic
7690 * is not correct for high speed transfers.
7691 */
7692 static int crisv10_usb_check_bandwidth(
7693 struct usb_device *dev,
7694 struct urb *urb)
7695 {
7696 unsigned int pipe = urb->pipe;
7697 long bustime;
7698 int is_in = usb_pipein (pipe);
7699 int is_iso = usb_pipeisoc (pipe);
7700 int old_alloc = dev->bus->bandwidth_allocated;
7701 int new_alloc;
7702
7703 bustime = NS_TO_US (usb_calc_bus_time (dev->speed, is_in, is_iso,
7704 usb_maxpacket (dev, pipe, !is_in)));
7705 if (is_iso)
7706 bustime /= urb->number_of_packets;
7707
7708 new_alloc = old_alloc + (int) bustime;
7709 if (new_alloc > FRAME_TIME_MAX_USECS_ALLOC) {
7710 dev_dbg (&dev->dev, "usb_check_bandwidth FAILED: %d + %ld = %d usec\n",
7711 old_alloc, bustime, new_alloc);
7712 bustime = -ENOSPC; /* report error */
7713 }
7714
7715 return bustime;
7716 }
7717
7718 /**
7719 * usb_claim_bandwidth - records bandwidth for a periodic transfer
7720 * @dev: source/target of request
7721 * @urb: request (urb->dev == dev)
7722 * @bustime: bandwidth consumed, in (average) microseconds per frame
7723 * @isoc: true iff the request is isochronous
7724 *
7725 * HCDs are expected not to overcommit periodic bandwidth, and to record such
7726 * reservations whenever endpoints are added to the periodic schedule.
7727 *
7728 * FIXME averaging per-frame is suboptimal. Better to sum over the HCD's
7729 * entire periodic schedule ... 32 frames for OHCI, 1024 for UHCI, settable
7730 * for EHCI (256/512/1024 frames, default 1024) and have the bus expose how
7731 * large its periodic schedule is.
7732 */
7733 static void crisv10_usb_claim_bandwidth(
7734 struct usb_device *dev,
7735 struct urb *urb, int bustime, int isoc)
7736 {
7737 dev->bus->bandwidth_allocated += bustime;
7738 if (isoc)
7739 dev->bus->bandwidth_isoc_reqs++;
7740 else
7741 dev->bus->bandwidth_int_reqs++;
7742 struct crisv10_urb_priv *urb_priv;
7743 urb_priv = urb->hcpriv;
7744 urb_priv->bandwidth = bustime;
7745 }
7746
7747 /**
7748 * usb_release_bandwidth - reverses effect of usb_claim_bandwidth()
7749 * @hcd: host controller
7750 * @isoc: true iff the request is isochronous
7751 * @bandwidth: bandwidth returned
7752 *
7753 * This records that previously allocated bandwidth has been released.
7754 * Bandwidth is released when endpoints are removed from the host controller's
7755 * periodic schedule.
7756 */
7757 static void crisv10_usb_release_bandwidth(
7758 struct usb_hcd *hcd,
7759 int isoc,
7760 int bandwidth)
7761 {
7762 hcd_to_bus(hcd)->bandwidth_allocated -= bandwidth;
7763 if (isoc)
7764 hcd_to_bus(hcd)->bandwidth_isoc_reqs--;
7765 else
7766 hcd_to_bus(hcd)->bandwidth_int_reqs--;
7767 }
7768
7769
7770 /* EPID handling functions, managing EP-list in Etrax through wrappers */
7771 /* ------------------------------------------------------------------- */
7772
7773 /* Sets up a new EPID for an endpoint or returns existing if found */
7774 static int tc_setup_epid(struct usb_host_endpoint *ep, struct urb *urb,
7775 int mem_flags) {
7776 int epid;
7777 char devnum, endpoint, out_traffic, slow;
7778 int maxlen;
7779 __u32 epid_data;
7780 struct crisv10_ep_priv *ep_priv = ep->hcpriv;
7781
7782 DBFENTER;
7783
7784 /* Check if a valid epid already is setup for this endpoint */
7785 if(ep_priv != NULL) {
7786 return ep_priv->epid;
7787 }
7788
7789 /* We must find and initiate a new epid for this urb. */
7790 epid = tc_allocate_epid();
7791
7792 if (epid == -1) {
7793 /* Failed to allocate a new epid. */
7794 DBFEXIT;
7795 return epid;
7796 }
7797
7798 /* We now have a new epid to use. Claim it. */
7799 epid_state[epid].inuse = 1;
7800
7801 /* Init private data for new endpoint */
7802 if(ep_priv_create(ep, mem_flags) != 0) {
7803 return -ENOMEM;
7804 }
7805 ep_priv = ep->hcpriv;
7806 ep_priv->epid = epid;
7807
7808 devnum = usb_pipedevice(urb->pipe);
7809 endpoint = usb_pipeendpoint(urb->pipe);
7810 slow = (urb->dev->speed == USB_SPEED_LOW);
7811 maxlen = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
7812
7813 if (usb_pipetype(urb->pipe) == PIPE_CONTROL) {
7814 /* We want both IN and OUT control traffic to be put on the same
7815 EP/SB list. */
7816 out_traffic = 1;
7817 } else {
7818 out_traffic = usb_pipeout(urb->pipe);
7819 }
7820
7821 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
7822 epid_data = IO_STATE(R_USB_EPT_DATA_ISO, valid, yes) |
7823 /* FIXME: Change any to the actual port? */
7824 IO_STATE(R_USB_EPT_DATA_ISO, port, any) |
7825 IO_FIELD(R_USB_EPT_DATA_ISO, max_len, maxlen) |
7826 IO_FIELD(R_USB_EPT_DATA_ISO, ep, endpoint) |
7827 IO_FIELD(R_USB_EPT_DATA_ISO, dev, devnum);
7828 etrax_epid_iso_set(epid, epid_data);
7829 } else {
7830 epid_data = IO_STATE(R_USB_EPT_DATA, valid, yes) |
7831 IO_FIELD(R_USB_EPT_DATA, low_speed, slow) |
7832 /* FIXME: Change any to the actual port? */
7833 IO_STATE(R_USB_EPT_DATA, port, any) |
7834 IO_FIELD(R_USB_EPT_DATA, max_len, maxlen) |
7835 IO_FIELD(R_USB_EPT_DATA, ep, endpoint) |
7836 IO_FIELD(R_USB_EPT_DATA, dev, devnum);
7837 etrax_epid_set(epid, epid_data);
7838 }
7839
7840 epid_state[epid].out_traffic = out_traffic;
7841 epid_state[epid].type = usb_pipetype(urb->pipe);
7842
7843 tc_warn("Setting up ep:0x%x epid:%d (addr:%d endp:%d max_len:%d %s %s %s)\n",
7844 (unsigned int)ep, epid, devnum, endpoint, maxlen,
7845 str_type(urb->pipe), out_traffic ? "out" : "in",
7846 slow ? "low" : "full");
7847
7848 /* Enable Isoc eof interrupt if we set up the first Isoc epid */
7849 if(usb_pipeisoc(urb->pipe)) {
7850 isoc_epid_counter++;
7851 if(isoc_epid_counter == 1) {
7852 isoc_warn("Enabled Isoc eof interrupt\n");
7853 *R_USB_IRQ_MASK_SET = IO_STATE(R_USB_IRQ_MASK_SET, iso_eof, set);
7854 }
7855 }
7856
7857 DBFEXIT;
7858 return epid;
7859 }
7860
7861 static void tc_free_epid(struct usb_host_endpoint *ep) {
7862 unsigned long flags;
7863 struct crisv10_ep_priv *ep_priv = ep->hcpriv;
7864 int epid;
7865 volatile int timeout = 10000;
7866
7867 DBFENTER;
7868
7869 if (ep_priv == NULL) {
7870 tc_warn("Trying to free unused epid on ep:0x%x\n", (unsigned int)ep);
7871 DBFEXIT;
7872 return;
7873 }
7874
7875 epid = ep_priv->epid;
7876
7877 /* Disable Isoc eof interrupt if we free the last Isoc epid */
7878 if(epid_isoc(epid)) {
7879 ASSERT(isoc_epid_counter > 0);
7880 isoc_epid_counter--;
7881 if(isoc_epid_counter == 0) {
7882 *R_USB_IRQ_MASK_CLR = IO_STATE(R_USB_IRQ_MASK_CLR, iso_eof, clr);
7883 isoc_warn("Disabled Isoc eof interrupt\n");
7884 }
7885 }
7886
7887 /* Take lock manualy instead of in epid_x_x wrappers,
7888 because we need to be polling here */
7889 spin_lock_irqsave(&etrax_epid_lock, flags);
7890
7891 *R_USB_EPT_INDEX = IO_FIELD(R_USB_EPT_INDEX, value, epid);
7892 nop();
7893 while((*R_USB_EPT_DATA & IO_MASK(R_USB_EPT_DATA, hold)) &&
7894 (timeout-- > 0));
7895 /* This will, among other things, set the valid field to 0. */
7896 *R_USB_EPT_DATA = 0;
7897 spin_unlock_irqrestore(&etrax_epid_lock, flags);
7898
7899 /* Free resource in software state info list */
7900 epid_state[epid].inuse = 0;
7901
7902 /* Free private endpoint data */
7903 ep_priv_free(ep);
7904
7905 DBFEXIT;
7906 }
7907
7908 static int tc_allocate_epid(void) {
7909 int i;
7910 DBFENTER;
7911 for (i = 0; i < NBR_OF_EPIDS; i++) {
7912 if (!epid_inuse(i)) {
7913 DBFEXIT;
7914 return i;
7915 }
7916 }
7917
7918 tc_warn("Found no free epids\n");
7919 DBFEXIT;
7920 return -1;
7921 }
7922
7923
7924 /* Wrappers around the list functions (include/linux/list.h). */
7925 /* ---------------------------------------------------------- */
7926 static inline int __urb_list_empty(int epid) {
7927 int retval;
7928 retval = list_empty(&urb_list[epid]);
7929 return retval;
7930 }
7931
7932 /* Returns first urb for this epid, or NULL if list is empty. */
7933 static inline struct urb *urb_list_first(int epid) {
7934 unsigned long flags;
7935 struct urb *first_urb = 0;
7936 spin_lock_irqsave(&urb_list_lock, flags);
7937 if (!__urb_list_empty(epid)) {
7938 /* Get the first urb (i.e. head->next). */
7939 urb_entry_t *urb_entry = list_entry((&urb_list[epid])->next, urb_entry_t, list);
7940 first_urb = urb_entry->urb;
7941 }
7942 spin_unlock_irqrestore(&urb_list_lock, flags);
7943 return first_urb;
7944 }
7945
7946 /* Adds an urb_entry last in the list for this epid. */
7947 static inline void urb_list_add(struct urb *urb, int epid, int mem_flags) {
7948 unsigned long flags;
7949 urb_entry_t *urb_entry = (urb_entry_t *)kmalloc(sizeof(urb_entry_t), mem_flags);
7950 ASSERT(urb_entry);
7951
7952 urb_entry->urb = urb;
7953 spin_lock_irqsave(&urb_list_lock, flags);
7954 list_add_tail(&urb_entry->list, &urb_list[epid]);
7955 spin_unlock_irqrestore(&urb_list_lock, flags);
7956 }
7957
7958 /* Search through the list for an element that contains this urb. (The list
7959 is expected to be short and the one we are about to delete will often be
7960 the first in the list.)
7961 Should be protected by spin_locks in calling function */
7962 static inline urb_entry_t *__urb_list_entry(struct urb *urb, int epid) {
7963 struct list_head *entry;
7964 struct list_head *tmp;
7965 urb_entry_t *urb_entry;
7966
7967 list_for_each_safe(entry, tmp, &urb_list[epid]) {
7968 urb_entry = list_entry(entry, urb_entry_t, list);
7969 ASSERT(urb_entry);
7970 ASSERT(urb_entry->urb);
7971
7972 if (urb_entry->urb == urb) {
7973 return urb_entry;
7974 }
7975 }
7976 return 0;
7977 }
7978
7979 /* Same function as above but for global use. Protects list by spinlock */
7980 static inline urb_entry_t *urb_list_entry(struct urb *urb, int epid) {
7981 unsigned long flags;
7982 urb_entry_t *urb_entry;
7983 spin_lock_irqsave(&urb_list_lock, flags);
7984 urb_entry = __urb_list_entry(urb, epid);
7985 spin_unlock_irqrestore(&urb_list_lock, flags);
7986 return (urb_entry);
7987 }
7988
7989 /* Delete an urb from the list. */
7990 static inline void urb_list_del(struct urb *urb, int epid) {
7991 unsigned long flags;
7992 urb_entry_t *urb_entry;
7993
7994 /* Delete entry and free. */
7995 spin_lock_irqsave(&urb_list_lock, flags);
7996 urb_entry = __urb_list_entry(urb, epid);
7997 ASSERT(urb_entry);
7998
7999 list_del(&urb_entry->list);
8000 spin_unlock_irqrestore(&urb_list_lock, flags);
8001 kfree(urb_entry);
8002 }
8003
8004 /* Move an urb to the end of the list. */
8005 static inline void urb_list_move_last(struct urb *urb, int epid) {
8006 unsigned long flags;
8007 urb_entry_t *urb_entry;
8008
8009 spin_lock_irqsave(&urb_list_lock, flags);
8010 urb_entry = __urb_list_entry(urb, epid);
8011 ASSERT(urb_entry);
8012
8013 list_del(&urb_entry->list);
8014 list_add_tail(&urb_entry->list, &urb_list[epid]);
8015 spin_unlock_irqrestore(&urb_list_lock, flags);
8016 }
8017
8018 /* Get the next urb in the list. */
8019 static inline struct urb *urb_list_next(struct urb *urb, int epid) {
8020 unsigned long flags;
8021 urb_entry_t *urb_entry;
8022
8023 spin_lock_irqsave(&urb_list_lock, flags);
8024 urb_entry = __urb_list_entry(urb, epid);
8025 ASSERT(urb_entry);
8026
8027 if (urb_entry->list.next != &urb_list[epid]) {
8028 struct list_head *elem = urb_entry->list.next;
8029 urb_entry = list_entry(elem, urb_entry_t, list);
8030 spin_unlock_irqrestore(&urb_list_lock, flags);
8031 return urb_entry->urb;
8032 } else {
8033 spin_unlock_irqrestore(&urb_list_lock, flags);
8034 return NULL;
8035 }
8036 }
8037
8038 struct USB_EP_Desc* create_ep(int epid, struct USB_SB_Desc* sb_desc,
8039 int mem_flags) {
8040 struct USB_EP_Desc *ep_desc;
8041 ep_desc = (struct USB_EP_Desc *) kmem_cache_alloc(usb_desc_cache, mem_flags);
8042 if(ep_desc == NULL)
8043 return NULL;
8044 memset(ep_desc, 0, sizeof(struct USB_EP_Desc));
8045
8046 ep_desc->hw_len = 0;
8047 ep_desc->command = (IO_FIELD(USB_EP_command, epid, epid) |
8048 IO_STATE(USB_EP_command, enable, yes));
8049 if(sb_desc == NULL) {
8050 ep_desc->sub = 0;
8051 } else {
8052 ep_desc->sub = virt_to_phys(sb_desc);
8053 }
8054 return ep_desc;
8055 }
8056
8057 #define TT_ZOUT 0
8058 #define TT_IN 1
8059 #define TT_OUT 2
8060 #define TT_SETUP 3
8061
8062 #define CMD_EOL IO_STATE(USB_SB_command, eol, yes)
8063 #define CMD_INTR IO_STATE(USB_SB_command, intr, yes)
8064 #define CMD_FULL IO_STATE(USB_SB_command, full, yes)
8065
8066 /* Allocation and setup of a generic SB. Used to create SETUP, OUT and ZOUT
8067 SBs. Also used by create_sb_in() to avoid same allocation procedure at two
8068 places */
8069 struct USB_SB_Desc* create_sb(struct USB_SB_Desc* sb_prev, int tt, void* data,
8070 int datalen, int mem_flags) {
8071 struct USB_SB_Desc *sb_desc;
8072 sb_desc = (struct USB_SB_Desc*)kmem_cache_alloc(usb_desc_cache, mem_flags);
8073 if(sb_desc == NULL)
8074 return NULL;
8075 memset(sb_desc, 0, sizeof(struct USB_SB_Desc));
8076
8077 sb_desc->command = IO_FIELD(USB_SB_command, tt, tt) |
8078 IO_STATE(USB_SB_command, eot, yes);
8079
8080 sb_desc->sw_len = datalen;
8081 if(data != NULL) {
8082 sb_desc->buf = virt_to_phys(data);
8083 } else {
8084 sb_desc->buf = 0;
8085 }
8086 if(sb_prev != NULL) {
8087 sb_prev->next = virt_to_phys(sb_desc);
8088 }
8089 return sb_desc;
8090 }
8091
8092 /* Creates a copy of an existing SB by allocation space for it and copy
8093 settings */
8094 struct USB_SB_Desc* create_sb_copy(struct USB_SB_Desc* sb_orig, int mem_flags) {
8095 struct USB_SB_Desc *sb_desc;
8096 sb_desc = (struct USB_SB_Desc*)kmem_cache_alloc(usb_desc_cache, mem_flags);
8097 if(sb_desc == NULL)
8098 return NULL;
8099
8100 memcpy(sb_desc, sb_orig, sizeof(struct USB_SB_Desc));
8101 return sb_desc;
8102 }
8103
8104 /* A specific create_sb function for creation of in SBs. This is due to
8105 that datalen in In SBs shows how many packets we are expecting. It also
8106 sets up the rem field to show if how many bytes we expect in last packet
8107 if it's not a full one */
8108 struct USB_SB_Desc* create_sb_in(struct USB_SB_Desc* sb_prev, int datalen,
8109 int maxlen, int mem_flags) {
8110 struct USB_SB_Desc *sb_desc;
8111 sb_desc = create_sb(sb_prev, TT_IN, NULL,
8112 datalen ? (datalen - 1) / maxlen + 1 : 0, mem_flags);
8113 if(sb_desc == NULL)
8114 return NULL;
8115 sb_desc->command |= IO_FIELD(USB_SB_command, rem, datalen % maxlen);
8116 return sb_desc;
8117 }
8118
8119 void set_sb_cmds(struct USB_SB_Desc *sb_desc, __u16 flags) {
8120 sb_desc->command |= flags;
8121 }
8122
8123 int create_sb_for_urb(struct urb *urb, int mem_flags) {
8124 int is_out = !usb_pipein(urb->pipe);
8125 int type = usb_pipetype(urb->pipe);
8126 int maxlen = usb_maxpacket(urb->dev, urb->pipe, is_out);
8127 int buf_len = urb->transfer_buffer_length;
8128 void *buf = buf_len > 0 ? urb->transfer_buffer : NULL;
8129 struct USB_SB_Desc *sb_desc = NULL;
8130
8131 struct crisv10_urb_priv *urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
8132 ASSERT(urb_priv != NULL);
8133
8134 switch(type) {
8135 case PIPE_CONTROL:
8136 /* Setup stage */
8137 sb_desc = create_sb(NULL, TT_SETUP, urb->setup_packet, 8, mem_flags);
8138 if(sb_desc == NULL)
8139 return -ENOMEM;
8140 set_sb_cmds(sb_desc, CMD_FULL);
8141
8142 /* Attach first SB to URB */
8143 urb_priv->first_sb = sb_desc;
8144
8145 if (is_out) { /* Out Control URB */
8146 /* If this Control OUT transfer has an optional data stage we add
8147 an OUT token before the mandatory IN (status) token */
8148 if ((buf_len > 0) && buf) {
8149 sb_desc = create_sb(sb_desc, TT_OUT, buf, buf_len, mem_flags);
8150 if(sb_desc == NULL)
8151 return -ENOMEM;
8152 set_sb_cmds(sb_desc, CMD_FULL);
8153 }
8154
8155 /* Status stage */
8156 /* The data length has to be exactly 1. This is due to a requirement
8157 of the USB specification that a host must be prepared to receive
8158 data in the status phase */
8159 sb_desc = create_sb(sb_desc, TT_IN, NULL, 1, mem_flags);
8160 if(sb_desc == NULL)
8161 return -ENOMEM;
8162 } else { /* In control URB */
8163 /* Data stage */
8164 sb_desc = create_sb_in(sb_desc, buf_len, maxlen, mem_flags);
8165 if(sb_desc == NULL)
8166 return -ENOMEM;
8167
8168 /* Status stage */
8169 /* Read comment at zout_buffer declaration for an explanation to this. */
8170 sb_desc = create_sb(sb_desc, TT_ZOUT, &zout_buffer[0], 1, mem_flags);
8171 if(sb_desc == NULL)
8172 return -ENOMEM;
8173 /* Set descriptor interrupt flag for in URBs so we can finish URB after
8174 zout-packet has been sent */
8175 set_sb_cmds(sb_desc, CMD_INTR | CMD_FULL);
8176 }
8177 /* Set end-of-list flag in last SB */
8178 set_sb_cmds(sb_desc, CMD_EOL);
8179 /* Attach last SB to URB */
8180 urb_priv->last_sb = sb_desc;
8181 break;
8182
8183 case PIPE_BULK:
8184 if (is_out) { /* Out Bulk URB */
8185 sb_desc = create_sb(NULL, TT_OUT, buf, buf_len, mem_flags);
8186 if(sb_desc == NULL)
8187 return -ENOMEM;
8188 /* The full field is set to yes, even if we don't actually check that
8189 this is a full-length transfer (i.e., that transfer_buffer_length %
8190 maxlen = 0).
8191 Setting full prevents the USB controller from sending an empty packet
8192 in that case. However, if URB_ZERO_PACKET was set we want that. */
8193 if (!(urb->transfer_flags & URB_ZERO_PACKET)) {
8194 set_sb_cmds(sb_desc, CMD_FULL);
8195 }
8196 } else { /* In Bulk URB */
8197 sb_desc = create_sb_in(NULL, buf_len, maxlen, mem_flags);
8198 if(sb_desc == NULL)
8199 return -ENOMEM;
8200 }
8201 /* Set end-of-list flag for last SB */
8202 set_sb_cmds(sb_desc, CMD_EOL);
8203
8204 /* Attach SB to URB */
8205 urb_priv->first_sb = sb_desc;
8206 urb_priv->last_sb = sb_desc;
8207 break;
8208
8209 case PIPE_INTERRUPT:
8210 if(is_out) { /* Out Intr URB */
8211 sb_desc = create_sb(NULL, TT_OUT, buf, buf_len, mem_flags);
8212 if(sb_desc == NULL)
8213 return -ENOMEM;
8214
8215 /* The full field is set to yes, even if we don't actually check that
8216 this is a full-length transfer (i.e., that transfer_buffer_length %
8217 maxlen = 0).
8218 Setting full prevents the USB controller from sending an empty packet
8219 in that case. However, if URB_ZERO_PACKET was set we want that. */
8220 if (!(urb->transfer_flags & URB_ZERO_PACKET)) {
8221 set_sb_cmds(sb_desc, CMD_FULL);
8222 }
8223 /* Only generate TX interrupt if it's a Out URB*/
8224 set_sb_cmds(sb_desc, CMD_INTR);
8225
8226 } else { /* In Intr URB */
8227 sb_desc = create_sb_in(NULL, buf_len, maxlen, mem_flags);
8228 if(sb_desc == NULL)
8229 return -ENOMEM;
8230 }
8231 /* Set end-of-list flag for last SB */
8232 set_sb_cmds(sb_desc, CMD_EOL);
8233
8234 /* Attach SB to URB */
8235 urb_priv->first_sb = sb_desc;
8236 urb_priv->last_sb = sb_desc;
8237
8238 break;
8239 case PIPE_ISOCHRONOUS:
8240 if(is_out) { /* Out Isoc URB */
8241 int i;
8242 if(urb->number_of_packets == 0) {
8243 tc_err("Can't create SBs for Isoc URB with zero packets\n");
8244 return -EPIPE;
8245 }
8246 /* Create one SB descriptor for each packet and link them together. */
8247 for(i = 0; i < urb->number_of_packets; i++) {
8248 if (urb->iso_frame_desc[i].length > 0) {
8249
8250 sb_desc = create_sb(sb_desc, TT_OUT, urb->transfer_buffer +
8251 urb->iso_frame_desc[i].offset,
8252 urb->iso_frame_desc[i].length, mem_flags);
8253 if(sb_desc == NULL)
8254 return -ENOMEM;
8255
8256 /* Check if it's a full length packet */
8257 if (urb->iso_frame_desc[i].length ==
8258 usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe))) {
8259 set_sb_cmds(sb_desc, CMD_FULL);
8260 }
8261
8262 } else { /* zero length packet */
8263 sb_desc = create_sb(sb_desc, TT_ZOUT, &zout_buffer[0], 1, mem_flags);
8264 if(sb_desc == NULL)
8265 return -ENOMEM;
8266 set_sb_cmds(sb_desc, CMD_FULL);
8267 }
8268 /* Attach first SB descriptor to URB */
8269 if (i == 0) {
8270 urb_priv->first_sb = sb_desc;
8271 }
8272 }
8273 /* Set interrupt and end-of-list flags in last SB */
8274 set_sb_cmds(sb_desc, CMD_INTR | CMD_EOL);
8275 /* Attach last SB descriptor to URB */
8276 urb_priv->last_sb = sb_desc;
8277 tc_dbg("Created %d out SBs for Isoc URB:0x%x\n",
8278 urb->number_of_packets, (unsigned int)urb);
8279 } else { /* In Isoc URB */
8280 /* Actual number of packets is not relevant for periodic in traffic as
8281 long as it is more than zero. Set to 1 always. */
8282 sb_desc = create_sb(sb_desc, TT_IN, NULL, 1, mem_flags);
8283 if(sb_desc == NULL)
8284 return -ENOMEM;
8285 /* Set end-of-list flags for SB */
8286 set_sb_cmds(sb_desc, CMD_EOL);
8287
8288 /* Attach SB to URB */
8289 urb_priv->first_sb = sb_desc;
8290 urb_priv->last_sb = sb_desc;
8291 }
8292 break;
8293 default:
8294 tc_err("Unknown pipe-type\n");
8295 return -EPIPE;
8296 break;
8297 }
8298 return 0;
8299 }
8300
8301 int init_intr_urb(struct urb *urb, int mem_flags) {
8302 struct crisv10_urb_priv *urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
8303 struct USB_EP_Desc* ep_desc;
8304 int interval;
8305 int i;
8306 int ep_count;
8307
8308 ASSERT(urb_priv != NULL);
8309 ASSERT(usb_pipeint(urb->pipe));
8310 /* We can't support interval longer than amount of eof descriptors in
8311 TxIntrEPList */
8312 if(urb->interval > MAX_INTR_INTERVAL) {
8313 tc_err("Interrupt interval %dms too big (max: %dms)\n", urb->interval,
8314 MAX_INTR_INTERVAL);
8315 return -EINVAL;
8316 }
8317
8318 /* We assume that the SB descriptors already have been setup */
8319 ASSERT(urb_priv->first_sb != NULL);
8320
8321 /* Round of the interval to 2^n, it is obvious that this code favours
8322 smaller numbers, but that is actually a good thing */
8323 /* FIXME: The "rounding error" for larger intervals will be quite
8324 large. For in traffic this shouldn't be a problem since it will only
8325 mean that we "poll" more often. */
8326 interval = urb->interval;
8327 for (i = 0; interval; i++) {
8328 interval = interval >> 1;
8329 }
8330 urb_priv->interval = 1 << (i - 1);
8331
8332 /* We can only have max interval for Out Interrupt due to that we can only
8333 handle one linked in EP for a certain epid in the Intr descr array at the
8334 time. The USB Controller in the Etrax 100LX continues to process Intr EPs
8335 so we have no way of knowing which one that caused the actual transfer if
8336 we have several linked in. */
8337 if(usb_pipeout(urb->pipe)) {
8338 urb_priv->interval = MAX_INTR_INTERVAL;
8339 }
8340
8341 /* Calculate amount of EPs needed */
8342 ep_count = MAX_INTR_INTERVAL / urb_priv->interval;
8343
8344 for(i = 0; i < ep_count; i++) {
8345 ep_desc = create_ep(urb_priv->epid, urb_priv->first_sb, mem_flags);
8346 if(ep_desc == NULL) {
8347 /* Free any descriptors that we may have allocated before failure */
8348 while(i > 0) {
8349 i--;
8350 kfree(urb_priv->intr_ep_pool[i]);
8351 }
8352 return -ENOMEM;
8353 }
8354 urb_priv->intr_ep_pool[i] = ep_desc;
8355 }
8356 urb_priv->intr_ep_pool_length = ep_count;
8357 return 0;
8358 }
8359
8360 /* DMA RX/TX functions */
8361 /* ----------------------- */
8362
8363 static void tc_dma_init_rx_list(void) {
8364 int i;
8365
8366 /* Setup descriptor list except last one */
8367 for (i = 0; i < (NBR_OF_RX_DESC - 1); i++) {
8368 RxDescList[i].sw_len = RX_DESC_BUF_SIZE;
8369 RxDescList[i].command = 0;
8370 RxDescList[i].next = virt_to_phys(&RxDescList[i + 1]);
8371 RxDescList[i].buf = virt_to_phys(RxBuf + (i * RX_DESC_BUF_SIZE));
8372 RxDescList[i].hw_len = 0;
8373 RxDescList[i].status = 0;
8374
8375 /* DMA IN cache bug. (struct etrax_dma_descr has the same layout as
8376 USB_IN_Desc for the relevant fields.) */
8377 prepare_rx_descriptor((struct etrax_dma_descr*)&RxDescList[i]);
8378
8379 }
8380 /* Special handling of last descriptor */
8381 RxDescList[i].sw_len = RX_DESC_BUF_SIZE;
8382 RxDescList[i].command = IO_STATE(USB_IN_command, eol, yes);
8383 RxDescList[i].next = virt_to_phys(&RxDescList[0]);
8384 RxDescList[i].buf = virt_to_phys(RxBuf + (i * RX_DESC_BUF_SIZE));
8385 RxDescList[i].hw_len = 0;
8386 RxDescList[i].status = 0;
8387
8388 /* Setup list pointers that show progress in list */
8389 myNextRxDesc = &RxDescList[0];
8390 myLastRxDesc = &RxDescList[NBR_OF_RX_DESC - 1];
8391
8392 flush_etrax_cache();
8393 /* Point DMA to first descriptor in list and start it */
8394 *R_DMA_CH9_FIRST = virt_to_phys(myNextRxDesc);
8395 *R_DMA_CH9_CMD = IO_STATE(R_DMA_CH9_CMD, cmd, start);
8396 }
8397
8398
8399 static void tc_dma_init_tx_bulk_list(void) {
8400 int i;
8401 volatile struct USB_EP_Desc *epDescr;
8402
8403 for (i = 0; i < (NBR_OF_EPIDS - 1); i++) {
8404 epDescr = &(TxBulkEPList[i]);
8405 CHECK_ALIGN(epDescr);
8406 epDescr->hw_len = 0;
8407 epDescr->command = IO_FIELD(USB_EP_command, epid, i);
8408 epDescr->sub = 0;
8409 epDescr->next = virt_to_phys(&TxBulkEPList[i + 1]);
8410
8411 /* Initiate two EPs, disabled and with the eol flag set. No need for any
8412 preserved epid. */
8413
8414 /* The first one has the intr flag set so we get an interrupt when the DMA
8415 channel is about to become disabled. */
8416 CHECK_ALIGN(&TxBulkDummyEPList[i][0]);
8417 TxBulkDummyEPList[i][0].hw_len = 0;
8418 TxBulkDummyEPList[i][0].command = (IO_FIELD(USB_EP_command, epid, DUMMY_EPID) |
8419 IO_STATE(USB_EP_command, eol, yes) |
8420 IO_STATE(USB_EP_command, intr, yes));
8421 TxBulkDummyEPList[i][0].sub = 0;
8422 TxBulkDummyEPList[i][0].next = virt_to_phys(&TxBulkDummyEPList[i][1]);
8423
8424 /* The second one. */
8425 CHECK_ALIGN(&TxBulkDummyEPList[i][1]);
8426 TxBulkDummyEPList[i][1].hw_len = 0;
8427 TxBulkDummyEPList[i][1].command = (IO_FIELD(USB_EP_command, epid, DUMMY_EPID) |
8428 IO_STATE(USB_EP_command, eol, yes));
8429 TxBulkDummyEPList[i][1].sub = 0;
8430 /* The last dummy's next pointer is the same as the current EP's next pointer. */
8431 TxBulkDummyEPList[i][1].next = virt_to_phys(&TxBulkEPList[i + 1]);
8432 }
8433
8434 /* Special handling of last descr in list, make list circular */
8435 epDescr = &TxBulkEPList[i];
8436 CHECK_ALIGN(epDescr);
8437 epDescr->hw_len = 0;
8438 epDescr->command = IO_STATE(USB_EP_command, eol, yes) |
8439 IO_FIELD(USB_EP_command, epid, i);
8440 epDescr->sub = 0;
8441 epDescr->next = virt_to_phys(&TxBulkEPList[0]);
8442
8443 /* Init DMA sub-channel pointers to last item in each list */
8444 *R_DMA_CH8_SUB0_EP = virt_to_phys(&TxBulkEPList[i]);
8445 /* No point in starting the bulk channel yet.
8446 *R_DMA_CH8_SUB0_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start); */
8447 }
8448
8449 static void tc_dma_init_tx_ctrl_list(void) {
8450 int i;
8451 volatile struct USB_EP_Desc *epDescr;
8452
8453 for (i = 0; i < (NBR_OF_EPIDS - 1); i++) {
8454 epDescr = &(TxCtrlEPList[i]);
8455 CHECK_ALIGN(epDescr);
8456 epDescr->hw_len = 0;
8457 epDescr->command = IO_FIELD(USB_EP_command, epid, i);
8458 epDescr->sub = 0;
8459 epDescr->next = virt_to_phys(&TxCtrlEPList[i + 1]);
8460 }
8461 /* Special handling of last descr in list, make list circular */
8462 epDescr = &TxCtrlEPList[i];
8463 CHECK_ALIGN(epDescr);
8464 epDescr->hw_len = 0;
8465 epDescr->command = IO_STATE(USB_EP_command, eol, yes) |
8466 IO_FIELD(USB_EP_command, epid, i);
8467 epDescr->sub = 0;
8468 epDescr->next = virt_to_phys(&TxCtrlEPList[0]);
8469
8470 /* Init DMA sub-channel pointers to last item in each list */
8471 *R_DMA_CH8_SUB1_EP = virt_to_phys(&TxCtrlEPList[i]);
8472 /* No point in starting the ctrl channel yet.
8473 *R_DMA_CH8_SUB1_CMD = IO_STATE(R_DMA_CH8_SUB0_CMD, cmd, start); */
8474 }
8475
8476
8477 static void tc_dma_init_tx_intr_list(void) {
8478 int i;
8479
8480 TxIntrSB_zout.sw_len = 1;
8481 TxIntrSB_zout.next = 0;
8482 TxIntrSB_zout.buf = virt_to_phys(&zout_buffer[0]);
8483 TxIntrSB_zout.command = (IO_FIELD(USB_SB_command, rem, 0) |
8484 IO_STATE(USB_SB_command, tt, zout) |
8485 IO_STATE(USB_SB_command, full, yes) |
8486 IO_STATE(USB_SB_command, eot, yes) |
8487 IO_STATE(USB_SB_command, eol, yes));
8488
8489 for (i = 0; i < (MAX_INTR_INTERVAL - 1); i++) {
8490 CHECK_ALIGN(&TxIntrEPList[i]);
8491 TxIntrEPList[i].hw_len = 0;
8492 TxIntrEPList[i].command =
8493 (IO_STATE(USB_EP_command, eof, yes) |
8494 IO_STATE(USB_EP_command, enable, yes) |
8495 IO_FIELD(USB_EP_command, epid, INVALID_EPID));
8496 TxIntrEPList[i].sub = virt_to_phys(&TxIntrSB_zout);
8497 TxIntrEPList[i].next = virt_to_phys(&TxIntrEPList[i + 1]);
8498 }
8499
8500 /* Special handling of last descr in list, make list circular */
8501 CHECK_ALIGN(&TxIntrEPList[i]);
8502 TxIntrEPList[i].hw_len = 0;
8503 TxIntrEPList[i].command =
8504 (IO_STATE(USB_EP_command, eof, yes) |
8505 IO_STATE(USB_EP_command, eol, yes) |
8506 IO_STATE(USB_EP_command, enable, yes) |
8507 IO_FIELD(USB_EP_command, epid, INVALID_EPID));
8508 TxIntrEPList[i].sub = virt_to_phys(&TxIntrSB_zout);
8509 TxIntrEPList[i].next = virt_to_phys(&TxIntrEPList[0]);
8510
8511 intr_dbg("Initiated Intr EP descriptor list\n");
8512
8513
8514 /* Connect DMA 8 sub-channel 2 to first in list */
8515 *R_DMA_CH8_SUB2_EP = virt_to_phys(&TxIntrEPList[0]);
8516 }
8517
8518 static void tc_dma_init_tx_isoc_list(void) {
8519 int i;
8520
8521 DBFENTER;
8522
8523 /* Read comment at zout_buffer declaration for an explanation to this. */
8524 TxIsocSB_zout.sw_len = 1;
8525 TxIsocSB_zout.next = 0;
8526 TxIsocSB_zout.buf = virt_to_phys(&zout_buffer[0]);
8527 TxIsocSB_zout.command = (IO_FIELD(USB_SB_command, rem, 0) |
8528 IO_STATE(USB_SB_command, tt, zout) |
8529 IO_STATE(USB_SB_command, full, yes) |
8530 IO_STATE(USB_SB_command, eot, yes) |
8531 IO_STATE(USB_SB_command, eol, yes));
8532
8533 /* The last isochronous EP descriptor is a dummy. */
8534 for (i = 0; i < (NBR_OF_EPIDS - 1); i++) {
8535 CHECK_ALIGN(&TxIsocEPList[i]);
8536 TxIsocEPList[i].hw_len = 0;
8537 TxIsocEPList[i].command = IO_FIELD(USB_EP_command, epid, i);
8538 TxIsocEPList[i].sub = 0;
8539 TxIsocEPList[i].next = virt_to_phys(&TxIsocEPList[i + 1]);
8540 }
8541
8542 CHECK_ALIGN(&TxIsocEPList[i]);
8543 TxIsocEPList[i].hw_len = 0;
8544
8545 /* Must enable the last EP descr to get eof interrupt. */
8546 TxIsocEPList[i].command = (IO_STATE(USB_EP_command, enable, yes) |
8547 IO_STATE(USB_EP_command, eof, yes) |
8548 IO_STATE(USB_EP_command, eol, yes) |
8549 IO_FIELD(USB_EP_command, epid, INVALID_EPID));
8550 TxIsocEPList[i].sub = virt_to_phys(&TxIsocSB_zout);
8551 TxIsocEPList[i].next = virt_to_phys(&TxIsocEPList[0]);
8552
8553 *R_DMA_CH8_SUB3_EP = virt_to_phys(&TxIsocEPList[0]);
8554 *R_DMA_CH8_SUB3_CMD = IO_STATE(R_DMA_CH8_SUB3_CMD, cmd, start);
8555 }
8556
8557 static int tc_dma_init(struct usb_hcd *hcd) {
8558 tc_dma_init_rx_list();
8559 tc_dma_init_tx_bulk_list();
8560 tc_dma_init_tx_ctrl_list();
8561 tc_dma_init_tx_intr_list();
8562 tc_dma_init_tx_isoc_list();
8563
8564 if (cris_request_dma(USB_TX_DMA_NBR,
8565 "ETRAX 100LX built-in USB (Tx)",
8566 DMA_VERBOSE_ON_ERROR,
8567 dma_usb)) {
8568 err("Could not allocate DMA ch 8 for USB");
8569 return -EBUSY;
8570 }
8571
8572 if (cris_request_dma(USB_RX_DMA_NBR,
8573 "ETRAX 100LX built-in USB (Rx)",
8574 DMA_VERBOSE_ON_ERROR,
8575 dma_usb)) {
8576 err("Could not allocate DMA ch 9 for USB");
8577 return -EBUSY;
8578 }
8579
8580 *R_IRQ_MASK2_SET =
8581 /* Note that these interrupts are not used. */
8582 IO_STATE(R_IRQ_MASK2_SET, dma8_sub0_descr, set) |
8583 /* Sub channel 1 (ctrl) descr. interrupts are used. */
8584 IO_STATE(R_IRQ_MASK2_SET, dma8_sub1_descr, set) |
8585 IO_STATE(R_IRQ_MASK2_SET, dma8_sub2_descr, set) |
8586 /* Sub channel 3 (isoc) descr. interrupts are used. */
8587 IO_STATE(R_IRQ_MASK2_SET, dma8_sub3_descr, set);
8588
8589 /* Note that the dma9_descr interrupt is not used. */
8590 *R_IRQ_MASK2_SET =
8591 IO_STATE(R_IRQ_MASK2_SET, dma9_eop, set) |
8592 IO_STATE(R_IRQ_MASK2_SET, dma9_descr, set);
8593
8594 if (request_irq(ETRAX_USB_RX_IRQ, tc_dma_rx_interrupt, 0,
8595 "ETRAX 100LX built-in USB (Rx)", hcd)) {
8596 err("Could not allocate IRQ %d for USB", ETRAX_USB_RX_IRQ);
8597 return -EBUSY;
8598 }
8599
8600 if (request_irq(ETRAX_USB_TX_IRQ, tc_dma_tx_interrupt, 0,
8601 "ETRAX 100LX built-in USB (Tx)", hcd)) {
8602 err("Could not allocate IRQ %d for USB", ETRAX_USB_TX_IRQ);
8603 return -EBUSY;
8604 }
8605
8606 return 0;
8607 }
8608
8609 static void tc_dma_destroy(void) {
8610 free_irq(ETRAX_USB_RX_IRQ, NULL);
8611 free_irq(ETRAX_USB_TX_IRQ, NULL);
8612
8613 cris_free_dma(USB_TX_DMA_NBR, "ETRAX 100LX built-in USB (Tx)");
8614 cris_free_dma(USB_RX_DMA_NBR, "ETRAX 100LX built-in USB (Rx)");
8615
8616 }
8617
8618 static void tc_dma_link_intr_urb(struct urb *urb);
8619
8620 /* Handle processing of Bulk, Ctrl and Intr queues */
8621 static void tc_dma_process_queue(int epid) {
8622 struct urb *urb;
8623 struct crisv10_urb_priv *urb_priv;
8624 unsigned long flags;
8625 char toggle;
8626
8627 if(epid_state[epid].disabled) {
8628 /* Don't process any URBs on a disabled endpoint */
8629 return;
8630 }
8631
8632 /* Do not disturb us while fiddling with EPs and epids */
8633 local_irq_save(flags);
8634
8635 /* For bulk, Ctrl and Intr can we only have one URB active at a time for
8636 a specific EP. */
8637 if(activeUrbList[epid] != NULL) {
8638 /* An URB is already active on EP, skip checking queue */
8639 local_irq_restore(flags);
8640 return;
8641 }
8642
8643 urb = urb_list_first(epid);
8644 if(urb == NULL) {
8645 /* No URB waiting in EP queue. Nothing do to */
8646 local_irq_restore(flags);
8647 return;
8648 }
8649
8650 urb_priv = urb->hcpriv;
8651 ASSERT(urb_priv != NULL);
8652 ASSERT(urb_priv->urb_state == NOT_STARTED);
8653 ASSERT(!usb_pipeisoc(urb->pipe));
8654
8655 /* Remove this URB from the queue and move it to active */
8656 activeUrbList[epid] = urb;
8657 urb_list_del(urb, epid);
8658
8659 urb_priv->urb_state = STARTED;
8660
8661 /* Reset error counters (regardless of which direction this traffic is). */
8662 etrax_epid_clear_error(epid);
8663
8664 /* Special handling of Intr EP lists */
8665 if(usb_pipeint(urb->pipe)) {
8666 tc_dma_link_intr_urb(urb);
8667 local_irq_restore(flags);
8668 return;
8669 }
8670
8671 /* Software must preset the toggle bits for Bulk and Ctrl */
8672 if(usb_pipecontrol(urb->pipe)) {
8673 /* Toggle bits are initialized only during setup transaction in a
8674 CTRL transfer */
8675 etrax_epid_set_toggle(epid, 0, 0);
8676 etrax_epid_set_toggle(epid, 1, 0);
8677 } else {
8678 toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
8679 usb_pipeout(urb->pipe));
8680 etrax_epid_set_toggle(epid, usb_pipeout(urb->pipe), toggle);
8681 }
8682
8683 tc_dbg("Added SBs from (URB:0x%x %s %s) to epid %d: %s\n",
8684 (unsigned int)urb, str_dir(urb->pipe), str_type(urb->pipe), epid,
8685 sblist_to_str(urb_priv->first_sb));
8686
8687 /* We start the DMA sub channel without checking if it's running or not,
8688 because:
8689 1) If it's already running, issuing the start command is a nop.
8690 2) We avoid a test-and-set race condition. */
8691 switch(usb_pipetype(urb->pipe)) {
8692 case PIPE_BULK:
8693 /* Assert that the EP descriptor is disabled. */
8694 ASSERT(!(TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)));
8695
8696 /* Set up and enable the EP descriptor. */
8697 TxBulkEPList[epid].sub = virt_to_phys(urb_priv->first_sb);
8698 TxBulkEPList[epid].hw_len = 0;
8699 TxBulkEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
8700
8701 /* Check if the dummy list is already with us (if several urbs were queued). */
8702 if (usb_pipein(urb->pipe) && (TxBulkEPList[epid].next != virt_to_phys(&TxBulkDummyEPList[epid][0]))) {
8703 tc_dbg("Inviting dummy list to the party for urb 0x%lx, epid %d",
8704 (unsigned long)urb, epid);
8705
8706 /* We don't need to check if the DMA is at this EP or not before changing the
8707 next pointer, since we will do it in one 32-bit write (EP descriptors are
8708 32-bit aligned). */
8709 TxBulkEPList[epid].next = virt_to_phys(&TxBulkDummyEPList[epid][0]);
8710 }
8711
8712 restart_dma8_sub0();
8713
8714 /* Update/restart the bulk start timer since we just started the channel.*/
8715 mod_timer(&bulk_start_timer, jiffies + BULK_START_TIMER_INTERVAL);
8716 /* Update/restart the bulk eot timer since we just inserted traffic. */
8717 mod_timer(&bulk_eot_timer, jiffies + BULK_EOT_TIMER_INTERVAL);
8718 break;
8719 case PIPE_CONTROL:
8720 /* Assert that the EP descriptor is disabled. */
8721 ASSERT(!(TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)));
8722
8723 /* Set up and enable the EP descriptor. */
8724 TxCtrlEPList[epid].sub = virt_to_phys(urb_priv->first_sb);
8725 TxCtrlEPList[epid].hw_len = 0;
8726 TxCtrlEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
8727
8728 *R_DMA_CH8_SUB1_CMD = IO_STATE(R_DMA_CH8_SUB1_CMD, cmd, start);
8729 break;
8730 }
8731 local_irq_restore(flags);
8732 }
8733
8734 static void tc_dma_link_intr_urb(struct urb *urb) {
8735 struct crisv10_urb_priv *urb_priv = urb->hcpriv;
8736 volatile struct USB_EP_Desc *tmp_ep;
8737 struct USB_EP_Desc *ep_desc;
8738 int i = 0, epid;
8739 int pool_idx = 0;
8740
8741 ASSERT(urb_priv != NULL);
8742 epid = urb_priv->epid;
8743 ASSERT(urb_priv->interval > 0);
8744 ASSERT(urb_priv->intr_ep_pool_length > 0);
8745
8746 tmp_ep = &TxIntrEPList[0];
8747
8748 /* Only insert one EP descriptor in list for Out Intr URBs.
8749 We can only handle Out Intr with interval of 128ms because
8750 it's not possible to insert several Out Intr EPs because they
8751 are not consumed by the DMA. */
8752 if(usb_pipeout(urb->pipe)) {
8753 ep_desc = urb_priv->intr_ep_pool[0];
8754 ASSERT(ep_desc);
8755 ep_desc->next = tmp_ep->next;
8756 tmp_ep->next = virt_to_phys(ep_desc);
8757 i++;
8758 } else {
8759 /* Loop through Intr EP descriptor list and insert EP for URB at
8760 specified interval */
8761 do {
8762 /* Each EP descriptor with eof flag sat signals a new frame */
8763 if (tmp_ep->command & IO_MASK(USB_EP_command, eof)) {
8764 /* Insert a EP from URBs EP pool at correct interval */
8765 if ((i % urb_priv->interval) == 0) {
8766 ep_desc = urb_priv->intr_ep_pool[pool_idx];
8767 ASSERT(ep_desc);
8768 ep_desc->next = tmp_ep->next;
8769 tmp_ep->next = virt_to_phys(ep_desc);
8770 pool_idx++;
8771 ASSERT(pool_idx <= urb_priv->intr_ep_pool_length);
8772 }
8773 i++;
8774 }
8775 tmp_ep = (struct USB_EP_Desc *)phys_to_virt(tmp_ep->next);
8776 } while(tmp_ep != &TxIntrEPList[0]);
8777 }
8778
8779 intr_dbg("Added SBs to intr epid %d: %s interval:%d (%d EP)\n", epid,
8780 sblist_to_str(urb_priv->first_sb), urb_priv->interval, pool_idx);
8781
8782 /* We start the DMA sub channel without checking if it's running or not,
8783 because:
8784 1) If it's already running, issuing the start command is a nop.
8785 2) We avoid a test-and-set race condition. */
8786 *R_DMA_CH8_SUB2_CMD = IO_STATE(R_DMA_CH8_SUB2_CMD, cmd, start);
8787 }
8788
8789 static void tc_dma_process_isoc_urb(struct urb *urb) {
8790 unsigned long flags;
8791 struct crisv10_urb_priv *urb_priv = urb->hcpriv;
8792 int epid;
8793
8794 /* Do not disturb us while fiddling with EPs and epids */
8795 local_irq_save(flags);
8796
8797 ASSERT(urb_priv);
8798 ASSERT(urb_priv->first_sb);
8799 epid = urb_priv->epid;
8800
8801 if(activeUrbList[epid] == NULL) {
8802 /* EP is idle, so make this URB active */
8803 activeUrbList[epid] = urb;
8804 urb_list_del(urb, epid);
8805 ASSERT(TxIsocEPList[epid].sub == 0);
8806 ASSERT(!(TxIsocEPList[epid].command &
8807 IO_STATE(USB_EP_command, enable, yes)));
8808
8809 /* Differentiate between In and Out Isoc. Because In SBs are not consumed*/
8810 if(usb_pipein(urb->pipe)) {
8811 /* Each EP for In Isoc will have only one SB descriptor, setup when
8812 submitting the first active urb. We do it here by copying from URBs
8813 pre-allocated SB. */
8814 memcpy((void *)&(TxIsocSBList[epid]), urb_priv->first_sb,
8815 sizeof(TxIsocSBList[epid]));
8816 TxIsocEPList[epid].hw_len = 0;
8817 TxIsocEPList[epid].sub = virt_to_phys(&(TxIsocSBList[epid]));
8818 } else {
8819 /* For Out Isoc we attach the pre-allocated list of SBs for the URB */
8820 TxIsocEPList[epid].hw_len = 0;
8821 TxIsocEPList[epid].sub = virt_to_phys(urb_priv->first_sb);
8822
8823 isoc_dbg("Attached first URB:0x%x[%d] to epid:%d first_sb:0x%x"
8824 " last_sb::0x%x\n",
8825 (unsigned int)urb, urb_priv->urb_num, epid,
8826 (unsigned int)(urb_priv->first_sb),
8827 (unsigned int)(urb_priv->last_sb));
8828 }
8829
8830 if (urb->transfer_flags & URB_ISO_ASAP) {
8831 /* The isoc transfer should be started as soon as possible. The
8832 start_frame field is a return value if URB_ISO_ASAP was set. Comparing
8833 R_USB_FM_NUMBER with a USB Chief trace shows that the first isoc IN
8834 token is sent 2 frames later. I'm not sure how this affects usage of
8835 the start_frame field by the device driver, or how it affects things
8836 when USB_ISO_ASAP is not set, so therefore there's no compensation for
8837 the 2 frame "lag" here. */
8838 urb->start_frame = (*R_USB_FM_NUMBER & 0x7ff);
8839 TxIsocEPList[epid].command |= IO_STATE(USB_EP_command, enable, yes);
8840 urb_priv->urb_state = STARTED;
8841 isoc_dbg("URB_ISO_ASAP set, urb->start_frame set to %d\n",
8842 urb->start_frame);
8843 } else {
8844 /* Not started yet. */
8845 urb_priv->urb_state = NOT_STARTED;
8846 isoc_warn("urb_priv->urb_state set to NOT_STARTED for URB:0x%x\n",
8847 (unsigned int)urb);
8848 }
8849
8850 } else {
8851 /* An URB is already active on the EP. Leave URB in queue and let
8852 finish_isoc_urb process it after current active URB */
8853 ASSERT(TxIsocEPList[epid].sub != 0);
8854
8855 if(usb_pipein(urb->pipe)) {
8856 /* Because there already is a active In URB on this epid we do nothing
8857 and the finish_isoc_urb() function will handle switching to next URB*/
8858
8859 } else { /* For Out Isoc, insert new URBs traffic last in SB-list. */
8860 struct USB_SB_Desc *temp_sb_desc;
8861
8862 /* Set state STARTED to all Out Isoc URBs added to SB list because we
8863 don't know how many of them that are finished before descr interrupt*/
8864 urb_priv->urb_state = STARTED;
8865
8866 /* Find end of current SB list by looking for SB with eol flag sat */
8867 temp_sb_desc = phys_to_virt(TxIsocEPList[epid].sub);
8868 while ((temp_sb_desc->command & IO_MASK(USB_SB_command, eol)) !=
8869 IO_STATE(USB_SB_command, eol, yes)) {
8870 ASSERT(temp_sb_desc->next);
8871 temp_sb_desc = phys_to_virt(temp_sb_desc->next);
8872 }
8873
8874 isoc_dbg("Appended URB:0x%x[%d] (first:0x%x last:0x%x) to epid:%d"
8875 " sub:0x%x eol:0x%x\n",
8876 (unsigned int)urb, urb_priv->urb_num,
8877 (unsigned int)(urb_priv->first_sb),
8878 (unsigned int)(urb_priv->last_sb), epid,
8879 (unsigned int)phys_to_virt(TxIsocEPList[epid].sub),
8880 (unsigned int)temp_sb_desc);
8881
8882 /* Next pointer must be set before eol is removed. */
8883 temp_sb_desc->next = virt_to_phys(urb_priv->first_sb);
8884 /* Clear the previous end of list flag since there is a new in the
8885 added SB descriptor list. */
8886 temp_sb_desc->command &= ~IO_MASK(USB_SB_command, eol);
8887
8888 if (!(TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable))) {
8889 __u32 epid_data;
8890 /* 8.8.5 in Designer's Reference says we should check for and correct
8891 any errors in the EP here. That should not be necessary if
8892 epid_attn is handled correctly, so we assume all is ok. */
8893 epid_data = etrax_epid_iso_get(epid);
8894 if (IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data) !=
8895 IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) {
8896 isoc_err("Disabled Isoc EP with error:%d on epid:%d when appending"
8897 " URB:0x%x[%d]\n",
8898 IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data), epid,
8899 (unsigned int)urb, urb_priv->urb_num);
8900 }
8901
8902 /* The SB list was exhausted. */
8903 if (virt_to_phys(urb_priv->last_sb) != TxIsocEPList[epid].sub) {
8904 /* The new sublist did not get processed before the EP was
8905 disabled. Setup the EP again. */
8906
8907 if(virt_to_phys(temp_sb_desc) == TxIsocEPList[epid].sub) {
8908 isoc_dbg("EP for epid:%d stoped at SB:0x%x before newly inserted"
8909 ", restarting from this URBs SB:0x%x\n",
8910 epid, (unsigned int)temp_sb_desc,
8911 (unsigned int)(urb_priv->first_sb));
8912 TxIsocEPList[epid].hw_len = 0;
8913 TxIsocEPList[epid].sub = virt_to_phys(urb_priv->first_sb);
8914 urb->start_frame = (*R_USB_FM_NUMBER & 0x7ff);
8915 /* Enable the EP again so data gets processed this time */
8916 TxIsocEPList[epid].command |=
8917 IO_STATE(USB_EP_command, enable, yes);
8918
8919 } else {
8920 /* The EP has been disabled but not at end this URB (god knows
8921 where). This should generate an epid_attn so we should not be
8922 here */
8923 isoc_warn("EP was disabled on sb:0x%x before SB list for"
8924 " URB:0x%x[%d] got processed\n",
8925 (unsigned int)phys_to_virt(TxIsocEPList[epid].sub),
8926 (unsigned int)urb, urb_priv->urb_num);
8927 }
8928 } else {
8929 /* This might happend if we are slow on this function and isn't
8930 an error. */
8931 isoc_dbg("EP was disabled and finished with SBs from appended"
8932 " URB:0x%x[%d]\n", (unsigned int)urb, urb_priv->urb_num);
8933 }
8934 }
8935 }
8936 }
8937
8938 /* Start the DMA sub channel */
8939 *R_DMA_CH8_SUB3_CMD = IO_STATE(R_DMA_CH8_SUB3_CMD, cmd, start);
8940
8941 local_irq_restore(flags);
8942 }
8943
8944 static void tc_dma_unlink_intr_urb(struct urb *urb) {
8945 struct crisv10_urb_priv *urb_priv = urb->hcpriv;
8946 volatile struct USB_EP_Desc *first_ep; /* First EP in the list. */
8947 volatile struct USB_EP_Desc *curr_ep; /* Current EP, the iterator. */
8948 volatile struct USB_EP_Desc *next_ep; /* The EP after current. */
8949 volatile struct USB_EP_Desc *unlink_ep; /* The one we should remove from
8950 the list. */
8951 int count = 0;
8952 volatile int timeout = 10000;
8953 int epid;
8954
8955 /* Read 8.8.4 in Designer's Reference, "Removing an EP Descriptor from the
8956 List". */
8957 ASSERT(urb_priv);
8958 ASSERT(urb_priv->intr_ep_pool_length > 0);
8959 epid = urb_priv->epid;
8960
8961 /* First disable all Intr EPs belonging to epid for this URB */
8962 first_ep = &TxIntrEPList[0];
8963 curr_ep = first_ep;
8964 do {
8965 next_ep = (struct USB_EP_Desc *)phys_to_virt(curr_ep->next);
8966 if (IO_EXTRACT(USB_EP_command, epid, next_ep->command) == epid) {
8967 /* Disable EP */
8968 next_ep->command &= ~IO_MASK(USB_EP_command, enable);
8969 }
8970 curr_ep = phys_to_virt(curr_ep->next);
8971 } while (curr_ep != first_ep);
8972
8973
8974 /* Now unlink all EPs belonging to this epid from Descr list */
8975 first_ep = &TxIntrEPList[0];
8976 curr_ep = first_ep;
8977 do {
8978 next_ep = (struct USB_EP_Desc *)phys_to_virt(curr_ep->next);
8979 if (IO_EXTRACT(USB_EP_command, epid, next_ep->command) == epid) {
8980 /* This is the one we should unlink. */
8981 unlink_ep = next_ep;
8982
8983 /* Actually unlink the EP from the DMA list. */
8984 curr_ep->next = unlink_ep->next;
8985
8986 /* Wait until the DMA is no longer at this descriptor. */
8987 while((*R_DMA_CH8_SUB2_EP == virt_to_phys(unlink_ep)) &&
8988 (timeout-- > 0));
8989
8990 count++;
8991 }
8992 curr_ep = phys_to_virt(curr_ep->next);
8993 } while (curr_ep != first_ep);
8994
8995 if(count != urb_priv->intr_ep_pool_length) {
8996 intr_warn("Unlinked %d of %d Intr EPs for URB:0x%x[%d]\n", count,
8997 urb_priv->intr_ep_pool_length, (unsigned int)urb,
8998 urb_priv->urb_num);
8999 } else {
9000 intr_dbg("Unlinked %d of %d interrupt EPs for URB:0x%x\n", count,
9001 urb_priv->intr_ep_pool_length, (unsigned int)urb);
9002 }
9003 }
9004
9005 static void check_finished_bulk_tx_epids(struct usb_hcd *hcd,
9006 int timer) {
9007 unsigned long flags;
9008 int epid;
9009 struct urb *urb;
9010 struct crisv10_urb_priv * urb_priv;
9011 __u32 epid_data;
9012
9013 /* Protect TxEPList */
9014 local_irq_save(flags);
9015
9016 for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
9017 /* A finished EP descriptor is disabled and has a valid sub pointer */
9018 if (!(TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) &&
9019 (TxBulkEPList[epid].sub != 0)) {
9020
9021 /* Get the active URB for this epid */
9022 urb = activeUrbList[epid];
9023 /* Sanity checks */
9024 ASSERT(urb);
9025 urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
9026 ASSERT(urb_priv);
9027
9028 /* Only handle finished out Bulk EPs here,
9029 and let RX interrupt take care of the rest */
9030 if(!epid_out_traffic(epid)) {
9031 continue;
9032 }
9033
9034 if(timer) {
9035 tc_warn("Found finished %s Bulk epid:%d URB:0x%x[%d] from timeout\n",
9036 epid_out_traffic(epid) ? "Out" : "In", epid, (unsigned int)urb,
9037 urb_priv->urb_num);
9038 } else {
9039 tc_dbg("Found finished %s Bulk epid:%d URB:0x%x[%d] from interrupt\n",
9040 epid_out_traffic(epid) ? "Out" : "In", epid, (unsigned int)urb,
9041 urb_priv->urb_num);
9042 }
9043
9044 if(urb_priv->urb_state == UNLINK) {
9045 /* This Bulk URB is requested to be unlinked, that means that the EP
9046 has been disabled and we might not have sent all data */
9047 tc_finish_urb(hcd, urb, urb->status);
9048 continue;
9049 }
9050
9051 ASSERT(urb_priv->urb_state == STARTED);
9052 if (phys_to_virt(TxBulkEPList[epid].sub) != urb_priv->last_sb) {
9053 tc_err("Endpoint got disabled before reaching last sb\n");
9054 }
9055
9056 epid_data = etrax_epid_get(epid);
9057 if (IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data) ==
9058 IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) {
9059 /* This means that the endpoint has no error, is disabled
9060 and had inserted traffic, i.e. transfer successfully completed. */
9061 tc_finish_urb(hcd, urb, 0);
9062 } else {
9063 /* Shouldn't happen. We expect errors to be caught by epid
9064 attention. */
9065 tc_err("Found disabled bulk EP desc (epid:%d error:%d)\n",
9066 epid, IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data));
9067 }
9068 } else {
9069 tc_dbg("Ignoring In Bulk epid:%d, let RX interrupt handle it\n", epid);
9070 }
9071 }
9072
9073 local_irq_restore(flags);
9074 }
9075
9076 static void check_finished_ctrl_tx_epids(struct usb_hcd *hcd) {
9077 unsigned long flags;
9078 int epid;
9079 struct urb *urb;
9080 struct crisv10_urb_priv * urb_priv;
9081 __u32 epid_data;
9082
9083 /* Protect TxEPList */
9084 local_irq_save(flags);
9085
9086 for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
9087 if(epid == DUMMY_EPID)
9088 continue;
9089
9090 /* A finished EP descriptor is disabled and has a valid sub pointer */
9091 if (!(TxCtrlEPList[epid].command & IO_MASK(USB_EP_command, enable)) &&
9092 (TxCtrlEPList[epid].sub != 0)) {
9093
9094 /* Get the active URB for this epid */
9095 urb = activeUrbList[epid];
9096
9097 if(urb == NULL) {
9098 tc_warn("Found finished Ctrl epid:%d with no active URB\n", epid);
9099 continue;
9100 }
9101
9102 /* Sanity checks */
9103 ASSERT(usb_pipein(urb->pipe));
9104 urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
9105 ASSERT(urb_priv);
9106 if (phys_to_virt(TxCtrlEPList[epid].sub) != urb_priv->last_sb) {
9107 tc_err("Endpoint got disabled before reaching last sb\n");
9108 }
9109
9110 epid_data = etrax_epid_get(epid);
9111 if (IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data) ==
9112 IO_STATE_VALUE(R_USB_EPT_DATA, error_code, no_error)) {
9113 /* This means that the endpoint has no error, is disabled
9114 and had inserted traffic, i.e. transfer successfully completed. */
9115
9116 /* Check if RX-interrupt for In Ctrl has been processed before
9117 finishing the URB */
9118 if(urb_priv->ctrl_rx_done) {
9119 tc_dbg("Finishing In Ctrl URB:0x%x[%d] in tx_interrupt\n",
9120 (unsigned int)urb, urb_priv->urb_num);
9121 tc_finish_urb(hcd, urb, 0);
9122 } else {
9123 /* If we get zout descriptor interrupt before RX was done for a
9124 In Ctrl transfer, then we flag that and it will be finished
9125 in the RX-Interrupt */
9126 urb_priv->ctrl_zout_done = 1;
9127 tc_dbg("Got zout descr interrupt before RX interrupt\n");
9128 }
9129 } else {
9130 /* Shouldn't happen. We expect errors to be caught by epid
9131 attention. */
9132 tc_err("Found disabled Ctrl EP desc (epid:%d URB:0x%x[%d]) error_code:%d\n", epid, (unsigned int)urb, urb_priv->urb_num, IO_EXTRACT(R_USB_EPT_DATA, error_code, epid_data));
9133 __dump_ep_desc(&(TxCtrlEPList[epid]));
9134 __dump_ept_data(epid);
9135 }
9136 }
9137 }
9138 local_irq_restore(flags);
9139 }
9140
9141 /* This function goes through all epids that are setup for Out Isoc transfers
9142 and marks (isoc_out_done) all queued URBs that the DMA has finished
9143 transfer for.
9144 No URB completetion is done here to make interrupt routine return quickly.
9145 URBs are completed later with help of complete_isoc_bottom_half() that
9146 becomes schedules when this functions is finished. */
9147 static void check_finished_isoc_tx_epids(void) {
9148 unsigned long flags;
9149 int epid;
9150 struct urb *urb;
9151 struct crisv10_urb_priv * urb_priv;
9152 struct USB_SB_Desc* sb_desc;
9153 int epid_done;
9154
9155 /* Protect TxIsocEPList */
9156 local_irq_save(flags);
9157
9158 for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
9159 if (TxIsocEPList[epid].sub == 0 || epid == INVALID_EPID ||
9160 !epid_out_traffic(epid)) {
9161 /* Nothing here to see. */
9162 continue;
9163 }
9164 ASSERT(epid_inuse(epid));
9165 ASSERT(epid_isoc(epid));
9166
9167 sb_desc = phys_to_virt(TxIsocEPList[epid].sub);
9168 /* Find the last descriptor of the currently active URB for this ep.
9169 This is the first descriptor in the sub list marked for a descriptor
9170 interrupt. */
9171 while (sb_desc && !IO_EXTRACT(USB_SB_command, intr, sb_desc->command)) {
9172 sb_desc = sb_desc->next ? phys_to_virt(sb_desc->next) : 0;
9173 }
9174 ASSERT(sb_desc);
9175
9176 isoc_dbg("Descr IRQ checking epid:%d sub:0x%x intr:0x%x\n",
9177 epid, (unsigned int)phys_to_virt(TxIsocEPList[epid].sub),
9178 (unsigned int)sb_desc);
9179
9180 urb = activeUrbList[epid];
9181 if(urb == NULL) {
9182 isoc_err("Isoc Descr irq on epid:%d with no active URB\n", epid);
9183 continue;
9184 }
9185
9186 epid_done = 0;
9187 while(urb && !epid_done) {
9188 /* Sanity check. */
9189 ASSERT(usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS);
9190 ASSERT(usb_pipeout(urb->pipe));
9191
9192 urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
9193 ASSERT(urb_priv);
9194 ASSERT(urb_priv->urb_state == STARTED ||
9195 urb_priv->urb_state == UNLINK);
9196
9197 if (sb_desc != urb_priv->last_sb) {
9198 /* This urb has been sent. */
9199 urb_priv->isoc_out_done = 1;
9200
9201 } else { /* Found URB that has last_sb as the interrupt reason */
9202
9203 /* Check if EP has been disabled, meaning that all transfers are done*/
9204 if(!(TxIsocEPList[epid].command & IO_MASK(USB_EP_command, enable))) {
9205 ASSERT((sb_desc->command & IO_MASK(USB_SB_command, eol)) ==
9206 IO_STATE(USB_SB_command, eol, yes));
9207 ASSERT(sb_desc->next == 0);
9208 urb_priv->isoc_out_done = 1;
9209 } else {
9210 isoc_dbg("Skipping URB:0x%x[%d] because EP not disabled yet\n",
9211 (unsigned int)urb, urb_priv->urb_num);
9212 }
9213 /* Stop looking any further in queue */
9214 epid_done = 1;
9215 }
9216
9217 if (!epid_done) {
9218 if(urb == activeUrbList[epid]) {
9219 urb = urb_list_first(epid);
9220 } else {
9221 urb = urb_list_next(urb, epid);
9222 }
9223 }
9224 } /* END: while(urb && !epid_done) */
9225 }
9226
9227 local_irq_restore(flags);
9228 }
9229
9230
9231 /* This is where the Out Isoc URBs are realy completed. This function is
9232 scheduled from tc_dma_tx_interrupt() when one or more Out Isoc transfers
9233 are done. This functions completes all URBs earlier marked with
9234 isoc_out_done by fast interrupt routine check_finished_isoc_tx_epids() */
9235
9236 static void complete_isoc_bottom_half(struct work_struct* work) {
9237 struct crisv10_isoc_complete_data *comp_data;
9238 struct usb_iso_packet_descriptor *packet;
9239 struct crisv10_urb_priv * urb_priv;
9240 unsigned long flags;
9241 struct urb* urb;
9242 int epid_done;
9243 int epid;
9244 int i;
9245
9246 comp_data = container_of(work, struct crisv10_isoc_complete_data, usb_bh);
9247
9248 local_irq_save(flags);
9249
9250 for (epid = 0; epid < NBR_OF_EPIDS - 1; epid++) {
9251 if(!epid_inuse(epid) || !epid_isoc(epid) || !epid_out_traffic(epid) || epid == DUMMY_EPID) {
9252 /* Only check valid Out Isoc epids */
9253 continue;
9254 }
9255
9256 isoc_dbg("Isoc bottom-half checking epid:%d, sub:0x%x\n", epid,
9257 (unsigned int)phys_to_virt(TxIsocEPList[epid].sub));
9258
9259 /* The descriptor interrupt handler has marked all transmitted Out Isoc
9260 URBs with isoc_out_done. Now we traverse all epids and for all that
9261 have out Isoc traffic we traverse its URB list and complete the
9262 transmitted URBs. */
9263 epid_done = 0;
9264 while (!epid_done) {
9265
9266 /* Get the active urb (if any) */
9267 urb = activeUrbList[epid];
9268 if (urb == 0) {
9269 isoc_dbg("No active URB on epid:%d anymore\n", epid);
9270 epid_done = 1;
9271 continue;
9272 }
9273
9274 /* Sanity check. */
9275 ASSERT(usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS);
9276 ASSERT(usb_pipeout(urb->pipe));
9277
9278 urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
9279 ASSERT(urb_priv);
9280
9281 if (!(urb_priv->isoc_out_done)) {
9282 /* We have reached URB that isn't flaged done yet, stop traversing. */
9283 isoc_dbg("Stoped traversing Out Isoc URBs on epid:%d"
9284 " before not yet flaged URB:0x%x[%d]\n",
9285 epid, (unsigned int)urb, urb_priv->urb_num);
9286 epid_done = 1;
9287 continue;
9288 }
9289
9290 /* This urb has been sent. */
9291 isoc_dbg("Found URB:0x%x[%d] that is flaged isoc_out_done\n",
9292 (unsigned int)urb, urb_priv->urb_num);
9293
9294 /* Set ok on transfered packets for this URB and finish it */
9295 for (i = 0; i < urb->number_of_packets; i++) {
9296 packet = &urb->iso_frame_desc[i];
9297 packet->status = 0;
9298 packet->actual_length = packet->length;
9299 }
9300 urb_priv->isoc_packet_counter = urb->number_of_packets;
9301 tc_finish_urb(comp_data->hcd, urb, 0);
9302
9303 } /* END: while(!epid_done) */
9304 } /* END: for(epid...) */
9305
9306 local_irq_restore(flags);
9307 kmem_cache_free(isoc_compl_cache, comp_data);
9308 }
9309
9310
9311 static void check_finished_intr_tx_epids(struct usb_hcd *hcd) {
9312 unsigned long flags;
9313 int epid;
9314 struct urb *urb;
9315 struct crisv10_urb_priv * urb_priv;
9316 volatile struct USB_EP_Desc *curr_ep; /* Current EP, the iterator. */
9317 volatile struct USB_EP_Desc *next_ep; /* The EP after current. */
9318
9319 /* Protect TxintrEPList */
9320 local_irq_save(flags);
9321
9322 for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
9323 if(!epid_inuse(epid) || !epid_intr(epid) || !epid_out_traffic(epid)) {
9324 /* Nothing to see on this epid. Only check valid Out Intr epids */
9325 continue;
9326 }
9327
9328 urb = activeUrbList[epid];
9329 if(urb == 0) {
9330 intr_warn("Found Out Intr epid:%d with no active URB\n", epid);
9331 continue;
9332 }
9333
9334 /* Sanity check. */
9335 ASSERT(usb_pipetype(urb->pipe) == PIPE_INTERRUPT);
9336 ASSERT(usb_pipeout(urb->pipe));
9337
9338 urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
9339 ASSERT(urb_priv);
9340
9341 /* Go through EPs between first and second sof-EP. It's here Out Intr EPs
9342 are inserted.*/
9343 curr_ep = &TxIntrEPList[0];
9344 do {
9345 next_ep = (struct USB_EP_Desc *)phys_to_virt(curr_ep->next);
9346 if(next_ep == urb_priv->intr_ep_pool[0]) {
9347 /* We found the Out Intr EP for this epid */
9348
9349 /* Disable it so it doesn't get processed again */
9350 next_ep->command &= ~IO_MASK(USB_EP_command, enable);
9351
9352 /* Finish the active Out Intr URB with status OK */
9353 tc_finish_urb(hcd, urb, 0);
9354 }
9355 curr_ep = phys_to_virt(curr_ep->next);
9356 } while (curr_ep != &TxIntrEPList[1]);
9357
9358 }
9359 local_irq_restore(flags);
9360 }
9361
9362 /* Interrupt handler for DMA8/IRQ24 with subchannels (called from hardware intr) */
9363 static irqreturn_t tc_dma_tx_interrupt(int irq, void *vhc) {
9364 struct usb_hcd *hcd = (struct usb_hcd*)vhc;
9365 ASSERT(hcd);
9366
9367 if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub0_descr)) {
9368 /* Clear this interrupt */
9369 *R_DMA_CH8_SUB0_CLR_INTR = IO_STATE(R_DMA_CH8_SUB0_CLR_INTR, clr_descr, do);
9370 restart_dma8_sub0();
9371 }
9372
9373 if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub1_descr)) {
9374 /* Clear this interrupt */
9375 *R_DMA_CH8_SUB1_CLR_INTR = IO_STATE(R_DMA_CH8_SUB1_CLR_INTR, clr_descr, do);
9376 check_finished_ctrl_tx_epids(hcd);
9377 }
9378
9379 if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub2_descr)) {
9380 /* Clear this interrupt */
9381 *R_DMA_CH8_SUB2_CLR_INTR = IO_STATE(R_DMA_CH8_SUB2_CLR_INTR, clr_descr, do);
9382 check_finished_intr_tx_epids(hcd);
9383 }
9384
9385 if (*R_IRQ_READ2 & IO_MASK(R_IRQ_READ2, dma8_sub3_descr)) {
9386 struct crisv10_isoc_complete_data* comp_data;
9387
9388 /* Flag done Out Isoc for later completion */
9389 check_finished_isoc_tx_epids();
9390
9391 /* Clear this interrupt */
9392 *R_DMA_CH8_SUB3_CLR_INTR = IO_STATE(R_DMA_CH8_SUB3_CLR_INTR, clr_descr, do);
9393 /* Schedule bottom half of Out Isoc completion function. This function
9394 finishes the URBs marked with isoc_out_done */
9395 comp_data = (struct crisv10_isoc_complete_data*)
9396 kmem_cache_alloc(isoc_compl_cache, GFP_ATOMIC);
9397 ASSERT(comp_data != NULL);
9398 comp_data ->hcd = hcd;
9399
9400 INIT_WORK(&comp_data->usb_bh, complete_isoc_bottom_half);
9401 schedule_work(&comp_data->usb_bh);
9402 }
9403
9404 return IRQ_HANDLED;
9405 }
9406
9407 /* Interrupt handler for DMA9/IRQ25 (called from hardware intr) */
9408 static irqreturn_t tc_dma_rx_interrupt(int irq, void *vhc) {
9409 unsigned long flags;
9410 struct urb *urb;
9411 struct usb_hcd *hcd = (struct usb_hcd*)vhc;
9412 struct crisv10_urb_priv *urb_priv;
9413 int epid = 0;
9414 int real_error;
9415
9416 ASSERT(hcd);
9417
9418 /* Clear this interrupt. */
9419 *R_DMA_CH9_CLR_INTR = IO_STATE(R_DMA_CH9_CLR_INTR, clr_eop, do);
9420
9421 /* Custom clear interrupt for this interrupt */
9422 /* The reason we cli here is that we call the driver's callback functions. */
9423 local_irq_save(flags);
9424
9425 /* Note that this while loop assumes that all packets span only
9426 one rx descriptor. */
9427 while(myNextRxDesc->status & IO_MASK(USB_IN_status, eop)) {
9428 epid = IO_EXTRACT(USB_IN_status, epid, myNextRxDesc->status);
9429 /* Get the active URB for this epid */
9430 urb = activeUrbList[epid];
9431
9432 ASSERT(epid_inuse(epid));
9433 if (!urb) {
9434 dma_err("No urb for epid %d in rx interrupt\n", epid);
9435 goto skip_out;
9436 }
9437
9438 /* Check if any errors on epid */
9439 real_error = 0;
9440 if (myNextRxDesc->status & IO_MASK(USB_IN_status, error)) {
9441 __u32 r_usb_ept_data;
9442
9443 if (usb_pipeisoc(urb->pipe)) {
9444 r_usb_ept_data = etrax_epid_iso_get(epid);
9445 if((r_usb_ept_data & IO_MASK(R_USB_EPT_DATA_ISO, valid)) &&
9446 (IO_EXTRACT(R_USB_EPT_DATA_ISO, error_code, r_usb_ept_data) == 0) &&
9447 (myNextRxDesc->status & IO_MASK(USB_IN_status, nodata))) {
9448 /* Not an error, just a failure to receive an expected iso
9449 in packet in this frame. This is not documented
9450 in the designers reference. Continue processing.
9451 */
9452 } else real_error = 1;
9453 } else real_error = 1;
9454 }
9455
9456 if(real_error) {
9457 dma_err("Error in RX descr on epid:%d for URB 0x%x",
9458 epid, (unsigned int)urb);
9459 dump_ept_data(epid);
9460 dump_in_desc(myNextRxDesc);
9461 goto skip_out;
9462 }
9463
9464 urb_priv = (struct crisv10_urb_priv *)urb->hcpriv;
9465 ASSERT(urb_priv);
9466 ASSERT(urb_priv->urb_state == STARTED ||
9467 urb_priv->urb_state == UNLINK);
9468
9469 if ((usb_pipetype(urb->pipe) == PIPE_BULK) ||
9470 (usb_pipetype(urb->pipe) == PIPE_CONTROL) ||
9471 (usb_pipetype(urb->pipe) == PIPE_INTERRUPT)) {
9472
9473 /* We get nodata for empty data transactions, and the rx descriptor's
9474 hw_len field is not valid in that case. No data to copy in other
9475 words. */
9476 if (myNextRxDesc->status & IO_MASK(USB_IN_status, nodata)) {
9477 /* No data to copy */
9478 } else {
9479 /*
9480 dma_dbg("Processing RX for URB:0x%x epid:%d (data:%d ofs:%d)\n",
9481 (unsigned int)urb, epid, myNextRxDesc->hw_len,
9482 urb_priv->rx_offset);
9483 */
9484 /* Only copy data if URB isn't flaged to be unlinked*/
9485 if(urb_priv->urb_state != UNLINK) {
9486 /* Make sure the data fits in the buffer. */
9487 if(urb_priv->rx_offset + myNextRxDesc->hw_len
9488 <= urb->transfer_buffer_length) {
9489
9490 /* Copy the data to URBs buffer */
9491 memcpy(urb->transfer_buffer + urb_priv->rx_offset,
9492 phys_to_virt(myNextRxDesc->buf), myNextRxDesc->hw_len);
9493 urb_priv->rx_offset += myNextRxDesc->hw_len;
9494 } else {
9495 /* Signal overflow when returning URB */
9496 urb->status = -EOVERFLOW;
9497 tc_finish_urb_later(hcd, urb, urb->status);
9498 }
9499 }
9500 }
9501
9502 /* Check if it was the last packet in the transfer */
9503 if (myNextRxDesc->status & IO_MASK(USB_IN_status, eot)) {
9504 /* Special handling for In Ctrl URBs. */
9505 if(usb_pipecontrol(urb->pipe) && usb_pipein(urb->pipe) &&
9506 !(urb_priv->ctrl_zout_done)) {
9507 /* Flag that RX part of Ctrl transfer is done. Because zout descr
9508 interrupt hasn't happend yet will the URB be finished in the
9509 TX-Interrupt. */
9510 urb_priv->ctrl_rx_done = 1;
9511 tc_dbg("Not finishing In Ctrl URB:0x%x from rx_interrupt, waiting"
9512 " for zout\n", (unsigned int)urb);
9513 } else {
9514 tc_finish_urb(hcd, urb, 0);
9515 }
9516 }
9517 } else { /* ISOC RX */
9518 /*
9519 isoc_dbg("Processing RX for epid:%d (URB:0x%x) ISOC pipe\n",
9520 epid, (unsigned int)urb);
9521 */
9522
9523 struct usb_iso_packet_descriptor *packet;
9524
9525 if (urb_priv->urb_state == UNLINK) {
9526 isoc_warn("Ignoring Isoc Rx data for urb being unlinked.\n");
9527 goto skip_out;
9528 } else if (urb_priv->urb_state == NOT_STARTED) {
9529 isoc_err("What? Got Rx data for Isoc urb that isn't started?\n");
9530 goto skip_out;
9531 }
9532
9533 packet = &urb->iso_frame_desc[urb_priv->isoc_packet_counter];
9534 ASSERT(packet);
9535 packet->status = 0;
9536
9537 if (myNextRxDesc->status & IO_MASK(USB_IN_status, nodata)) {
9538 /* We get nodata for empty data transactions, and the rx descriptor's
9539 hw_len field is not valid in that case. We copy 0 bytes however to
9540 stay in synch. */
9541 packet->actual_length = 0;
9542 } else {
9543 packet->actual_length = myNextRxDesc->hw_len;
9544 /* Make sure the data fits in the buffer. */
9545 ASSERT(packet->actual_length <= packet->length);
9546 memcpy(urb->transfer_buffer + packet->offset,
9547 phys_to_virt(myNextRxDesc->buf), packet->actual_length);
9548 if(packet->actual_length > 0)
9549 isoc_dbg("Copied %d bytes, packet %d for URB:0x%x[%d]\n",
9550 packet->actual_length, urb_priv->isoc_packet_counter,
9551 (unsigned int)urb, urb_priv->urb_num);
9552 }
9553
9554 /* Increment the packet counter. */
9555 urb_priv->isoc_packet_counter++;
9556
9557 /* Note that we don't care about the eot field in the rx descriptor's
9558 status. It will always be set for isoc traffic. */
9559 if (urb->number_of_packets == urb_priv->isoc_packet_counter) {
9560 /* Complete the urb with status OK. */
9561 tc_finish_urb(hcd, urb, 0);
9562 }
9563 }
9564
9565 skip_out:
9566 myNextRxDesc->status = 0;
9567 myNextRxDesc->command |= IO_MASK(USB_IN_command, eol);
9568 myLastRxDesc->command &= ~IO_MASK(USB_IN_command, eol);
9569 myLastRxDesc = myNextRxDesc;
9570 myNextRxDesc = phys_to_virt(myNextRxDesc->next);
9571 flush_etrax_cache();
9572 *R_DMA_CH9_CMD = IO_STATE(R_DMA_CH9_CMD, cmd, restart);
9573 }
9574
9575 local_irq_restore(flags);
9576
9577 return IRQ_HANDLED;
9578 }
9579
9580 static void tc_bulk_start_timer_func(unsigned long dummy) {
9581 /* We might enable an EP descriptor behind the current DMA position when
9582 it's about to decide that there are no more bulk traffic and it should
9583 stop the bulk channel.
9584 Therefore we periodically check if the bulk channel is stopped and there
9585 is an enabled bulk EP descriptor, in which case we start the bulk
9586 channel. */
9587
9588 if (!(*R_DMA_CH8_SUB0_CMD & IO_MASK(R_DMA_CH8_SUB0_CMD, cmd))) {
9589 int epid;
9590
9591 timer_dbg("bulk_start_timer: Bulk DMA channel not running.\n");
9592
9593 for (epid = 0; epid < NBR_OF_EPIDS; epid++) {
9594 if (TxBulkEPList[epid].command & IO_MASK(USB_EP_command, enable)) {
9595 timer_warn("Found enabled EP for epid %d, starting bulk channel.\n",
9596 epid);
9597 restart_dma8_sub0();
9598
9599 /* Restart the bulk eot timer since we just started the bulk channel.*/
9600 mod_timer(&bulk_eot_timer, jiffies + BULK_EOT_TIMER_INTERVAL);
9601
9602 /* No need to search any further. */
9603 break;
9604 }
9605 }
9606 } else {
9607 timer_dbg("bulk_start_timer: Bulk DMA channel running.\n");
9608 }
9609 }
9610
9611 static void tc_bulk_eot_timer_func(unsigned long dummy) {
9612 struct usb_hcd *hcd = (struct usb_hcd*)dummy;
9613 ASSERT(hcd);
9614 /* Because of a race condition in the top half, we might miss a bulk eot.
9615 This timer "simulates" a bulk eot if we don't get one for a while,
9616 hopefully correcting the situation. */
9617 timer_dbg("bulk_eot_timer timed out.\n");
9618 check_finished_bulk_tx_epids(hcd, 1);
9619 }
9620
9621
9622 /*************************************************************/
9623 /*************************************************************/
9624 /* Device driver block */
9625 /*************************************************************/
9626 /*************************************************************/
9627
9628 /* Forward declarations for device driver functions */
9629 static int devdrv_hcd_probe(struct device *);
9630 static int devdrv_hcd_remove(struct device *);
9631 #ifdef CONFIG_PM
9632 static int devdrv_hcd_suspend(struct device *, u32, u32);
9633 static int devdrv_hcd_resume(struct device *, u32);
9634 #endif /* CONFIG_PM */
9635
9636 /* the device */
9637 static struct platform_device *devdrv_hc_platform_device;
9638
9639 /* device driver interface */
9640 static struct device_driver devdrv_hc_device_driver = {
9641 .name = (char *) hc_name,
9642 .bus = &platform_bus_type,
9643
9644 .probe = devdrv_hcd_probe,
9645 .remove = devdrv_hcd_remove,
9646
9647 #ifdef CONFIG_PM
9648 .suspend = devdrv_hcd_suspend,
9649 .resume = devdrv_hcd_resume,
9650 #endif /* CONFIG_PM */
9651 };
9652
9653 /* initialize the host controller and driver */
9654 static int __init_or_module devdrv_hcd_probe(struct device *dev)
9655 {
9656 struct usb_hcd *hcd;
9657 struct crisv10_hcd *crisv10_hcd;
9658 int retval;
9659
9660 /* Check DMA burst length */
9661 if(IO_EXTRACT(R_BUS_CONFIG, dma_burst, *R_BUS_CONFIG) !=
9662 IO_STATE(R_BUS_CONFIG, dma_burst, burst32)) {
9663 devdrv_err("Invalid DMA burst length in Etrax 100LX,"
9664 " needs to be 32\n");
9665 return -EPERM;
9666 }
9667
9668 //XXX: dev->usb_id don't exist, using "" instread? - claudio
9669 hcd = usb_create_hcd(&crisv10_hc_driver, dev, "");
9670 if (!hcd)
9671 return -ENOMEM;
9672
9673 crisv10_hcd = hcd_to_crisv10_hcd(hcd);
9674 spin_lock_init(&crisv10_hcd->lock);
9675 crisv10_hcd->num_ports = num_ports();
9676 crisv10_hcd->running = 0;
9677
9678 dev_set_drvdata(dev, crisv10_hcd);
9679
9680 devdrv_dbg("ETRAX USB IRQs HC:%d RX:%d TX:%d\n", ETRAX_USB_HC_IRQ,
9681 ETRAX_USB_RX_IRQ, ETRAX_USB_TX_IRQ);
9682
9683 /* Print out chip version read from registers */
9684 int rev_maj = *R_USB_REVISION & IO_MASK(R_USB_REVISION, major);
9685 int rev_min = *R_USB_REVISION & IO_MASK(R_USB_REVISION, minor);
9686 if(rev_min == 0) {
9687 devdrv_info("Etrax 100LX USB Revision %d v1,2\n", rev_maj);
9688 } else {
9689 devdrv_info("Etrax 100LX USB Revision %d v%d\n", rev_maj, rev_min);
9690 }
9691
9692 devdrv_info("Bulk timer interval, start:%d eot:%d\n",
9693 BULK_START_TIMER_INTERVAL,
9694 BULK_EOT_TIMER_INTERVAL);
9695
9696
9697 /* Init root hub data structures */
9698 if(rh_init()) {
9699 devdrv_err("Failed init data for Root Hub\n");
9700 retval = -ENOMEM;
9701 }
9702
9703 if(port_in_use(0)) {
9704 if (cris_request_io_interface(if_usb_1, "ETRAX100LX USB-HCD")) {
9705 printk(KERN_CRIT "usb-host: request IO interface usb1 failed");
9706 retval = -EBUSY;
9707 goto out;
9708 }
9709 devdrv_info("Claimed interface for USB physical port 1\n");
9710 }
9711 if(port_in_use(1)) {
9712 if (cris_request_io_interface(if_usb_2, "ETRAX100LX USB-HCD")) {
9713 /* Free first interface if second failed to be claimed */
9714 if(port_in_use(0)) {
9715 cris_free_io_interface(if_usb_1);
9716 }
9717 printk(KERN_CRIT "usb-host: request IO interface usb2 failed");
9718 retval = -EBUSY;
9719 goto out;
9720 }
9721 devdrv_info("Claimed interface for USB physical port 2\n");
9722 }
9723
9724 /* Init transfer controller structs and locks */
9725 if((retval = tc_init(hcd)) != 0) {
9726 goto out;
9727 }
9728
9729 /* Attach interrupt functions for DMA and init DMA controller */
9730 if((retval = tc_dma_init(hcd)) != 0) {
9731 goto out;
9732 }
9733
9734 /* Attach the top IRQ handler for USB controller interrupts */
9735 if (request_irq(ETRAX_USB_HC_IRQ, crisv10_hcd_top_irq, 0,
9736 "ETRAX 100LX built-in USB (HC)", hcd)) {
9737 err("Could not allocate IRQ %d for USB", ETRAX_USB_HC_IRQ);
9738 retval = -EBUSY;
9739 goto out;
9740 }
9741
9742 /* iso_eof is only enabled when isoc traffic is running. */
9743 *R_USB_IRQ_MASK_SET =
9744 /* IO_STATE(R_USB_IRQ_MASK_SET, iso_eof, set) | */
9745 IO_STATE(R_USB_IRQ_MASK_SET, bulk_eot, set) |
9746 IO_STATE(R_USB_IRQ_MASK_SET, epid_attn, set) |
9747 IO_STATE(R_USB_IRQ_MASK_SET, port_status, set) |
9748 IO_STATE(R_USB_IRQ_MASK_SET, ctl_status, set);
9749
9750
9751 crisv10_ready_wait();
9752 /* Reset the USB interface. */
9753 *R_USB_COMMAND =
9754 IO_STATE(R_USB_COMMAND, port_sel, nop) |
9755 IO_STATE(R_USB_COMMAND, port_cmd, reset) |
9756 IO_STATE(R_USB_COMMAND, ctrl_cmd, reset);
9757
9758 /* Designer's Reference, p. 8 - 10 says we should Initate R_USB_FM_PSTART to
9759 0x2A30 (10800), to guarantee that control traffic gets 10% of the
9760 bandwidth, and periodic transfer may allocate the rest (90%).
9761 This doesn't work though.
9762 The value 11960 is chosen to be just after the SOF token, with a couple
9763 of bit times extra for possible bit stuffing. */
9764 *R_USB_FM_PSTART = IO_FIELD(R_USB_FM_PSTART, value, 11960);
9765
9766 crisv10_ready_wait();
9767 /* Configure the USB interface as a host controller. */
9768 *R_USB_COMMAND =
9769 IO_STATE(R_USB_COMMAND, port_sel, nop) |
9770 IO_STATE(R_USB_COMMAND, port_cmd, reset) |
9771 IO_STATE(R_USB_COMMAND, ctrl_cmd, host_config);
9772
9773
9774 /* Check so controller not busy before enabling ports */
9775 crisv10_ready_wait();
9776
9777 /* Enable selected USB ports */
9778 if(port_in_use(0)) {
9779 *R_USB_PORT1_DISABLE = IO_STATE(R_USB_PORT1_DISABLE, disable, no);
9780 } else {
9781 *R_USB_PORT1_DISABLE = IO_STATE(R_USB_PORT1_DISABLE, disable, yes);
9782 }
9783 if(port_in_use(1)) {
9784 *R_USB_PORT2_DISABLE = IO_STATE(R_USB_PORT2_DISABLE, disable, no);
9785 } else {
9786 *R_USB_PORT2_DISABLE = IO_STATE(R_USB_PORT2_DISABLE, disable, yes);
9787 }
9788
9789 crisv10_ready_wait();
9790 /* Start processing of USB traffic. */
9791 *R_USB_COMMAND =
9792 IO_STATE(R_USB_COMMAND, port_sel, nop) |
9793 IO_STATE(R_USB_COMMAND, port_cmd, reset) |
9794 IO_STATE(R_USB_COMMAND, ctrl_cmd, host_run);
9795
9796 /* Do not continue probing initialization before USB interface is done */
9797 crisv10_ready_wait();
9798
9799 /* Register our Host Controller to USB Core
9800 * Finish the remaining parts of generic HCD initialization: allocate the
9801 * buffers of consistent memory, register the bus
9802 * and call the driver's reset() and start() routines. */
9803 retval = usb_add_hcd(hcd, ETRAX_USB_HC_IRQ, IRQF_DISABLED);
9804 if (retval != 0) {
9805 devdrv_err("Failed registering HCD driver\n");
9806 goto out;
9807 }
9808
9809 return 0;
9810
9811 out:
9812 devdrv_hcd_remove(dev);
9813 return retval;
9814 }
9815
9816
9817 /* cleanup after the host controller and driver */
9818 static int __init_or_module devdrv_hcd_remove(struct device *dev)
9819 {
9820 struct crisv10_hcd *crisv10_hcd = dev_get_drvdata(dev);
9821 struct usb_hcd *hcd;
9822
9823 if (!crisv10_hcd)
9824 return 0;
9825 hcd = crisv10_hcd_to_hcd(crisv10_hcd);
9826
9827
9828 /* Stop USB Controller in Etrax 100LX */
9829 crisv10_hcd_reset(hcd);
9830
9831 usb_remove_hcd(hcd);
9832 devdrv_dbg("Removed HCD from USB Core\n");
9833
9834 /* Free USB Controller IRQ */
9835 free_irq(ETRAX_USB_HC_IRQ, NULL);
9836
9837 /* Free resources */
9838 tc_dma_destroy();
9839 tc_destroy();
9840
9841
9842 if(port_in_use(0)) {
9843 cris_free_io_interface(if_usb_1);
9844 }
9845 if(port_in_use(1)) {
9846 cris_free_io_interface(if_usb_2);
9847 }
9848
9849 devdrv_dbg("Freed all claimed resources\n");
9850
9851 return 0;
9852 }
9853
9854
9855 #ifdef CONFIG_PM
9856
9857 static int devdrv_hcd_suspend(struct usb_hcd *hcd, u32 state, u32 level)
9858 {
9859 return 0; /* no-op for now */
9860 }
9861
9862 static int devdrv_hcd_resume(struct usb_hcd *hcd, u32 level)
9863 {
9864 return 0; /* no-op for now */
9865 }
9866
9867 #endif /* CONFIG_PM */
9868
9869
9870 /*************************************************************/
9871 /*************************************************************/
9872 /* Module block */
9873 /*************************************************************/
9874 /*************************************************************/
9875
9876 /* register driver */
9877 static int __init module_hcd_init(void)
9878 {
9879
9880 if (usb_disabled())
9881 return -ENODEV;
9882
9883 /* Here we select enabled ports by following defines created from
9884 menuconfig */
9885 #ifndef CONFIG_ETRAX_USB_HOST_PORT1
9886 ports &= ~(1<<0);
9887 #endif
9888 #ifndef CONFIG_ETRAX_USB_HOST_PORT2
9889 ports &= ~(1<<1);
9890 #endif
9891
9892 printk(KERN_INFO "%s version "VERSION" "COPYRIGHT"\n", product_desc);
9893
9894 devdrv_hc_platform_device =
9895 platform_device_register_simple((char *) hc_name, 0, NULL, 0);
9896
9897 if (IS_ERR(devdrv_hc_platform_device))
9898 return PTR_ERR(devdrv_hc_platform_device);
9899 return driver_register(&devdrv_hc_device_driver);
9900 /*
9901 * Note that we do not set the DMA mask for the device,
9902 * i.e. we pretend that we will use PIO, since no specific
9903 * allocation routines are needed for DMA buffers. This will
9904 * cause the HCD buffer allocation routines to fall back to
9905 * kmalloc().
9906 */
9907 }
9908
9909 /* unregister driver */
9910 static void __exit module_hcd_exit(void)
9911 {
9912 driver_unregister(&devdrv_hc_device_driver);
9913 }
9914
9915
9916 /* Module hooks */
9917 module_init(module_hcd_init);
9918 module_exit(module_hcd_exit);