kernel: bump 6.1 to 6.1.71
[openwrt/staging/dangole.git] / target / linux / gemini / patches-6.1 / 0002-usb-fotg210-Collect-pieces-of-dual-mode-controller.patch
1 From 30367636930864f71b2bd462adedcf8484313864 Mon Sep 17 00:00:00 2001
2 From: Linus Walleij <linus.walleij@linaro.org>
3 Date: Sun, 23 Oct 2022 16:47:06 +0200
4 Subject: [PATCH 02/29] usb: fotg210: Collect pieces of dual mode controller
5
6 The Faraday FOTG210 is a dual-mode OTG USB controller that can
7 act as host, peripheral or both. To be able to probe from one
8 hardware description and to follow the pattern of other dual-
9 mode controllers such as MUSB or MTU3 we need to collect the
10 two, currently completely separate drivers in the same
11 directory.
12
13 After this, users need to select the main symbol USB_FOTG210
14 and then each respective subdriver. We pave the road to
15 compile both drivers into the same kernel and select the
16 one we want to use at probe() time, and possibly add OTG
17 support in the end.
18
19 This patch doesn't do much more than create the new symbol
20 and collect the drivers in one place. We also add a comment
21 for the section of dual-mode controllers in the Kconfig
22 file so people can see what these selections are about.
23
24 Also add myself as maintainer as there has been little
25 response on my patches to these drivers.
26
27 Cc: Fabian Vogt <fabian@ritter-vogt.de>
28 Cc: Yuan-Hsin Chen <yhchen@faraday-tech.com>
29 Cc: Felipe Balbi <balbi@kernel.org>
30 Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
31 Link: https://lore.kernel.org/r/20221023144708.3596563-1-linus.walleij@linaro.org
32 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
33 ---
34 --- a/drivers/usb/Kconfig
35 +++ b/drivers/usb/Kconfig
36 @@ -111,8 +111,12 @@ source "drivers/usb/usbip/Kconfig"
37
38 endif
39
40 +comment "USB dual-mode controller drivers"
41 +
42 source "drivers/usb/cdns3/Kconfig"
43
44 +source "drivers/usb/fotg210/Kconfig"
45 +
46 source "drivers/usb/mtu3/Kconfig"
47
48 source "drivers/usb/musb/Kconfig"
49 --- a/drivers/usb/Makefile
50 +++ b/drivers/usb/Makefile
51 @@ -17,6 +17,8 @@ obj-$(CONFIG_USB_CDNS_SUPPORT) += cdns3/
52 obj-$(CONFIG_USB_CDNS3) += cdns3/
53 obj-$(CONFIG_USB_CDNSP_PCI) += cdns3/
54
55 +obj-$(CONFIG_USB_FOTG210) += fotg210/
56 +
57 obj-$(CONFIG_USB_MON) += mon/
58 obj-$(CONFIG_USB_MTU3) += mtu3/
59
60 --- /dev/null
61 +++ b/drivers/usb/fotg210/Kconfig
62 @@ -0,0 +1,36 @@
63 +# SPDX-License-Identifier: GPL-2.0
64 +
65 +config USB_FOTG210
66 + tristate "Faraday FOTG210 USB2 Dual Role controller"
67 + depends on USB || USB_GADGET
68 + depends on HAS_DMA && HAS_IOMEM
69 + default ARCH_GEMINI
70 + help
71 + Faraday FOTG210 is a dual-mode USB controller that can act
72 + in both host controller and peripheral controller mode.
73 +
74 +if USB_FOTG210
75 +
76 +config USB_FOTG210_HCD
77 + tristate "Faraday FOTG210 USB Host Controller support"
78 + depends on USB
79 + help
80 + Faraday FOTG210 is an OTG controller which can be configured as
81 + an USB2.0 host. It is designed to meet USB2.0 EHCI specification
82 + with minor modification.
83 +
84 + To compile this driver as a module, choose M here: the
85 + module will be called fotg210-hcd.
86 +
87 +config USB_FOTG210_UDC
88 + depends on USB_GADGET
89 + tristate "Faraday FOTG210 USB Peripheral Controller support"
90 + help
91 + Faraday USB2.0 OTG controller which can be configured as
92 + high speed or full speed USB device. This driver suppports
93 + Bulk Transfer so far.
94 +
95 + Say "y" to link the driver statically, or "m" to build a
96 + dynamically linked module called "fotg210-udc".
97 +
98 +endif
99 --- /dev/null
100 +++ b/drivers/usb/fotg210/Makefile
101 @@ -0,0 +1,3 @@
102 +# SPDX-License-Identifier: GPL-2.0
103 +obj-$(CONFIG_USB_FOTG210_HCD) += fotg210-hcd.o
104 +obj-$(CONFIG_USB_FOTG210_UDC) += fotg210-udc.o
105 --- a/drivers/usb/host/fotg210-hcd.c
106 +++ /dev/null
107 @@ -1,5724 +0,0 @@
108 -// SPDX-License-Identifier: GPL-2.0+
109 -/* Faraday FOTG210 EHCI-like driver
110 - *
111 - * Copyright (c) 2013 Faraday Technology Corporation
112 - *
113 - * Author: Yuan-Hsin Chen <yhchen@faraday-tech.com>
114 - * Feng-Hsin Chiang <john453@faraday-tech.com>
115 - * Po-Yu Chuang <ratbert.chuang@gmail.com>
116 - *
117 - * Most of code borrowed from the Linux-3.7 EHCI driver
118 - */
119 -#include <linux/module.h>
120 -#include <linux/of.h>
121 -#include <linux/device.h>
122 -#include <linux/dmapool.h>
123 -#include <linux/kernel.h>
124 -#include <linux/delay.h>
125 -#include <linux/ioport.h>
126 -#include <linux/sched.h>
127 -#include <linux/vmalloc.h>
128 -#include <linux/errno.h>
129 -#include <linux/init.h>
130 -#include <linux/hrtimer.h>
131 -#include <linux/list.h>
132 -#include <linux/interrupt.h>
133 -#include <linux/usb.h>
134 -#include <linux/usb/hcd.h>
135 -#include <linux/moduleparam.h>
136 -#include <linux/dma-mapping.h>
137 -#include <linux/debugfs.h>
138 -#include <linux/slab.h>
139 -#include <linux/uaccess.h>
140 -#include <linux/platform_device.h>
141 -#include <linux/io.h>
142 -#include <linux/iopoll.h>
143 -#include <linux/clk.h>
144 -
145 -#include <asm/byteorder.h>
146 -#include <asm/irq.h>
147 -#include <asm/unaligned.h>
148 -
149 -#define DRIVER_AUTHOR "Yuan-Hsin Chen"
150 -#define DRIVER_DESC "FOTG210 Host Controller (EHCI) Driver"
151 -static const char hcd_name[] = "fotg210_hcd";
152 -
153 -#undef FOTG210_URB_TRACE
154 -#define FOTG210_STATS
155 -
156 -/* magic numbers that can affect system performance */
157 -#define FOTG210_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */
158 -#define FOTG210_TUNE_RL_HS 4 /* nak throttle; see 4.9 */
159 -#define FOTG210_TUNE_RL_TT 0
160 -#define FOTG210_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */
161 -#define FOTG210_TUNE_MULT_TT 1
162 -
163 -/* Some drivers think it's safe to schedule isochronous transfers more than 256
164 - * ms into the future (partly as a result of an old bug in the scheduling
165 - * code). In an attempt to avoid trouble, we will use a minimum scheduling
166 - * length of 512 frames instead of 256.
167 - */
168 -#define FOTG210_TUNE_FLS 1 /* (medium) 512-frame schedule */
169 -
170 -/* Initial IRQ latency: faster than hw default */
171 -static int log2_irq_thresh; /* 0 to 6 */
172 -module_param(log2_irq_thresh, int, S_IRUGO);
173 -MODULE_PARM_DESC(log2_irq_thresh, "log2 IRQ latency, 1-64 microframes");
174 -
175 -/* initial park setting: slower than hw default */
176 -static unsigned park;
177 -module_param(park, uint, S_IRUGO);
178 -MODULE_PARM_DESC(park, "park setting; 1-3 back-to-back async packets");
179 -
180 -/* for link power management(LPM) feature */
181 -static unsigned int hird;
182 -module_param(hird, int, S_IRUGO);
183 -MODULE_PARM_DESC(hird, "host initiated resume duration, +1 for each 75us");
184 -
185 -#define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
186 -
187 -#include "fotg210.h"
188 -
189 -#define fotg210_dbg(fotg210, fmt, args...) \
190 - dev_dbg(fotg210_to_hcd(fotg210)->self.controller, fmt, ## args)
191 -#define fotg210_err(fotg210, fmt, args...) \
192 - dev_err(fotg210_to_hcd(fotg210)->self.controller, fmt, ## args)
193 -#define fotg210_info(fotg210, fmt, args...) \
194 - dev_info(fotg210_to_hcd(fotg210)->self.controller, fmt, ## args)
195 -#define fotg210_warn(fotg210, fmt, args...) \
196 - dev_warn(fotg210_to_hcd(fotg210)->self.controller, fmt, ## args)
197 -
198 -/* check the values in the HCSPARAMS register (host controller _Structural_
199 - * parameters) see EHCI spec, Table 2-4 for each value
200 - */
201 -static void dbg_hcs_params(struct fotg210_hcd *fotg210, char *label)
202 -{
203 - u32 params = fotg210_readl(fotg210, &fotg210->caps->hcs_params);
204 -
205 - fotg210_dbg(fotg210, "%s hcs_params 0x%x ports=%d\n", label, params,
206 - HCS_N_PORTS(params));
207 -}
208 -
209 -/* check the values in the HCCPARAMS register (host controller _Capability_
210 - * parameters) see EHCI Spec, Table 2-5 for each value
211 - */
212 -static void dbg_hcc_params(struct fotg210_hcd *fotg210, char *label)
213 -{
214 - u32 params = fotg210_readl(fotg210, &fotg210->caps->hcc_params);
215 -
216 - fotg210_dbg(fotg210, "%s hcc_params %04x uframes %s%s\n", label,
217 - params,
218 - HCC_PGM_FRAMELISTLEN(params) ? "256/512/1024" : "1024",
219 - HCC_CANPARK(params) ? " park" : "");
220 -}
221 -
222 -static void __maybe_unused
223 -dbg_qtd(const char *label, struct fotg210_hcd *fotg210, struct fotg210_qtd *qtd)
224 -{
225 - fotg210_dbg(fotg210, "%s td %p n%08x %08x t%08x p0=%08x\n", label, qtd,
226 - hc32_to_cpup(fotg210, &qtd->hw_next),
227 - hc32_to_cpup(fotg210, &qtd->hw_alt_next),
228 - hc32_to_cpup(fotg210, &qtd->hw_token),
229 - hc32_to_cpup(fotg210, &qtd->hw_buf[0]));
230 - if (qtd->hw_buf[1])
231 - fotg210_dbg(fotg210, " p1=%08x p2=%08x p3=%08x p4=%08x\n",
232 - hc32_to_cpup(fotg210, &qtd->hw_buf[1]),
233 - hc32_to_cpup(fotg210, &qtd->hw_buf[2]),
234 - hc32_to_cpup(fotg210, &qtd->hw_buf[3]),
235 - hc32_to_cpup(fotg210, &qtd->hw_buf[4]));
236 -}
237 -
238 -static void __maybe_unused
239 -dbg_qh(const char *label, struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
240 -{
241 - struct fotg210_qh_hw *hw = qh->hw;
242 -
243 - fotg210_dbg(fotg210, "%s qh %p n%08x info %x %x qtd %x\n", label, qh,
244 - hw->hw_next, hw->hw_info1, hw->hw_info2,
245 - hw->hw_current);
246 -
247 - dbg_qtd("overlay", fotg210, (struct fotg210_qtd *) &hw->hw_qtd_next);
248 -}
249 -
250 -static void __maybe_unused
251 -dbg_itd(const char *label, struct fotg210_hcd *fotg210, struct fotg210_itd *itd)
252 -{
253 - fotg210_dbg(fotg210, "%s[%d] itd %p, next %08x, urb %p\n", label,
254 - itd->frame, itd, hc32_to_cpu(fotg210, itd->hw_next),
255 - itd->urb);
256 -
257 - fotg210_dbg(fotg210,
258 - " trans: %08x %08x %08x %08x %08x %08x %08x %08x\n",
259 - hc32_to_cpu(fotg210, itd->hw_transaction[0]),
260 - hc32_to_cpu(fotg210, itd->hw_transaction[1]),
261 - hc32_to_cpu(fotg210, itd->hw_transaction[2]),
262 - hc32_to_cpu(fotg210, itd->hw_transaction[3]),
263 - hc32_to_cpu(fotg210, itd->hw_transaction[4]),
264 - hc32_to_cpu(fotg210, itd->hw_transaction[5]),
265 - hc32_to_cpu(fotg210, itd->hw_transaction[6]),
266 - hc32_to_cpu(fotg210, itd->hw_transaction[7]));
267 -
268 - fotg210_dbg(fotg210,
269 - " buf: %08x %08x %08x %08x %08x %08x %08x\n",
270 - hc32_to_cpu(fotg210, itd->hw_bufp[0]),
271 - hc32_to_cpu(fotg210, itd->hw_bufp[1]),
272 - hc32_to_cpu(fotg210, itd->hw_bufp[2]),
273 - hc32_to_cpu(fotg210, itd->hw_bufp[3]),
274 - hc32_to_cpu(fotg210, itd->hw_bufp[4]),
275 - hc32_to_cpu(fotg210, itd->hw_bufp[5]),
276 - hc32_to_cpu(fotg210, itd->hw_bufp[6]));
277 -
278 - fotg210_dbg(fotg210, " index: %d %d %d %d %d %d %d %d\n",
279 - itd->index[0], itd->index[1], itd->index[2],
280 - itd->index[3], itd->index[4], itd->index[5],
281 - itd->index[6], itd->index[7]);
282 -}
283 -
284 -static int __maybe_unused
285 -dbg_status_buf(char *buf, unsigned len, const char *label, u32 status)
286 -{
287 - return scnprintf(buf, len, "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s",
288 - label, label[0] ? " " : "", status,
289 - (status & STS_ASS) ? " Async" : "",
290 - (status & STS_PSS) ? " Periodic" : "",
291 - (status & STS_RECL) ? " Recl" : "",
292 - (status & STS_HALT) ? " Halt" : "",
293 - (status & STS_IAA) ? " IAA" : "",
294 - (status & STS_FATAL) ? " FATAL" : "",
295 - (status & STS_FLR) ? " FLR" : "",
296 - (status & STS_PCD) ? " PCD" : "",
297 - (status & STS_ERR) ? " ERR" : "",
298 - (status & STS_INT) ? " INT" : "");
299 -}
300 -
301 -static int __maybe_unused
302 -dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable)
303 -{
304 - return scnprintf(buf, len, "%s%sintrenable %02x%s%s%s%s%s%s",
305 - label, label[0] ? " " : "", enable,
306 - (enable & STS_IAA) ? " IAA" : "",
307 - (enable & STS_FATAL) ? " FATAL" : "",
308 - (enable & STS_FLR) ? " FLR" : "",
309 - (enable & STS_PCD) ? " PCD" : "",
310 - (enable & STS_ERR) ? " ERR" : "",
311 - (enable & STS_INT) ? " INT" : "");
312 -}
313 -
314 -static const char *const fls_strings[] = { "1024", "512", "256", "??" };
315 -
316 -static int dbg_command_buf(char *buf, unsigned len, const char *label,
317 - u32 command)
318 -{
319 - return scnprintf(buf, len,
320 - "%s%scommand %07x %s=%d ithresh=%d%s%s%s period=%s%s %s",
321 - label, label[0] ? " " : "", command,
322 - (command & CMD_PARK) ? " park" : "(park)",
323 - CMD_PARK_CNT(command),
324 - (command >> 16) & 0x3f,
325 - (command & CMD_IAAD) ? " IAAD" : "",
326 - (command & CMD_ASE) ? " Async" : "",
327 - (command & CMD_PSE) ? " Periodic" : "",
328 - fls_strings[(command >> 2) & 0x3],
329 - (command & CMD_RESET) ? " Reset" : "",
330 - (command & CMD_RUN) ? "RUN" : "HALT");
331 -}
332 -
333 -static char *dbg_port_buf(char *buf, unsigned len, const char *label, int port,
334 - u32 status)
335 -{
336 - char *sig;
337 -
338 - /* signaling state */
339 - switch (status & (3 << 10)) {
340 - case 0 << 10:
341 - sig = "se0";
342 - break;
343 - case 1 << 10:
344 - sig = "k";
345 - break; /* low speed */
346 - case 2 << 10:
347 - sig = "j";
348 - break;
349 - default:
350 - sig = "?";
351 - break;
352 - }
353 -
354 - scnprintf(buf, len, "%s%sport:%d status %06x %d sig=%s%s%s%s%s%s%s%s",
355 - label, label[0] ? " " : "", port, status,
356 - status >> 25, /*device address */
357 - sig,
358 - (status & PORT_RESET) ? " RESET" : "",
359 - (status & PORT_SUSPEND) ? " SUSPEND" : "",
360 - (status & PORT_RESUME) ? " RESUME" : "",
361 - (status & PORT_PEC) ? " PEC" : "",
362 - (status & PORT_PE) ? " PE" : "",
363 - (status & PORT_CSC) ? " CSC" : "",
364 - (status & PORT_CONNECT) ? " CONNECT" : "");
365 -
366 - return buf;
367 -}
368 -
369 -/* functions have the "wrong" filename when they're output... */
370 -#define dbg_status(fotg210, label, status) { \
371 - char _buf[80]; \
372 - dbg_status_buf(_buf, sizeof(_buf), label, status); \
373 - fotg210_dbg(fotg210, "%s\n", _buf); \
374 -}
375 -
376 -#define dbg_cmd(fotg210, label, command) { \
377 - char _buf[80]; \
378 - dbg_command_buf(_buf, sizeof(_buf), label, command); \
379 - fotg210_dbg(fotg210, "%s\n", _buf); \
380 -}
381 -
382 -#define dbg_port(fotg210, label, port, status) { \
383 - char _buf[80]; \
384 - fotg210_dbg(fotg210, "%s\n", \
385 - dbg_port_buf(_buf, sizeof(_buf), label, port, status));\
386 -}
387 -
388 -/* troubleshooting help: expose state in debugfs */
389 -static int debug_async_open(struct inode *, struct file *);
390 -static int debug_periodic_open(struct inode *, struct file *);
391 -static int debug_registers_open(struct inode *, struct file *);
392 -static int debug_async_open(struct inode *, struct file *);
393 -
394 -static ssize_t debug_output(struct file*, char __user*, size_t, loff_t*);
395 -static int debug_close(struct inode *, struct file *);
396 -
397 -static const struct file_operations debug_async_fops = {
398 - .owner = THIS_MODULE,
399 - .open = debug_async_open,
400 - .read = debug_output,
401 - .release = debug_close,
402 - .llseek = default_llseek,
403 -};
404 -static const struct file_operations debug_periodic_fops = {
405 - .owner = THIS_MODULE,
406 - .open = debug_periodic_open,
407 - .read = debug_output,
408 - .release = debug_close,
409 - .llseek = default_llseek,
410 -};
411 -static const struct file_operations debug_registers_fops = {
412 - .owner = THIS_MODULE,
413 - .open = debug_registers_open,
414 - .read = debug_output,
415 - .release = debug_close,
416 - .llseek = default_llseek,
417 -};
418 -
419 -static struct dentry *fotg210_debug_root;
420 -
421 -struct debug_buffer {
422 - ssize_t (*fill_func)(struct debug_buffer *); /* fill method */
423 - struct usb_bus *bus;
424 - struct mutex mutex; /* protect filling of buffer */
425 - size_t count; /* number of characters filled into buffer */
426 - char *output_buf;
427 - size_t alloc_size;
428 -};
429 -
430 -static inline char speed_char(u32 scratch)
431 -{
432 - switch (scratch & (3 << 12)) {
433 - case QH_FULL_SPEED:
434 - return 'f';
435 -
436 - case QH_LOW_SPEED:
437 - return 'l';
438 -
439 - case QH_HIGH_SPEED:
440 - return 'h';
441 -
442 - default:
443 - return '?';
444 - }
445 -}
446 -
447 -static inline char token_mark(struct fotg210_hcd *fotg210, __hc32 token)
448 -{
449 - __u32 v = hc32_to_cpu(fotg210, token);
450 -
451 - if (v & QTD_STS_ACTIVE)
452 - return '*';
453 - if (v & QTD_STS_HALT)
454 - return '-';
455 - if (!IS_SHORT_READ(v))
456 - return ' ';
457 - /* tries to advance through hw_alt_next */
458 - return '/';
459 -}
460 -
461 -static void qh_lines(struct fotg210_hcd *fotg210, struct fotg210_qh *qh,
462 - char **nextp, unsigned *sizep)
463 -{
464 - u32 scratch;
465 - u32 hw_curr;
466 - struct fotg210_qtd *td;
467 - unsigned temp;
468 - unsigned size = *sizep;
469 - char *next = *nextp;
470 - char mark;
471 - __le32 list_end = FOTG210_LIST_END(fotg210);
472 - struct fotg210_qh_hw *hw = qh->hw;
473 -
474 - if (hw->hw_qtd_next == list_end) /* NEC does this */
475 - mark = '@';
476 - else
477 - mark = token_mark(fotg210, hw->hw_token);
478 - if (mark == '/') { /* qh_alt_next controls qh advance? */
479 - if ((hw->hw_alt_next & QTD_MASK(fotg210)) ==
480 - fotg210->async->hw->hw_alt_next)
481 - mark = '#'; /* blocked */
482 - else if (hw->hw_alt_next == list_end)
483 - mark = '.'; /* use hw_qtd_next */
484 - /* else alt_next points to some other qtd */
485 - }
486 - scratch = hc32_to_cpup(fotg210, &hw->hw_info1);
487 - hw_curr = (mark == '*') ? hc32_to_cpup(fotg210, &hw->hw_current) : 0;
488 - temp = scnprintf(next, size,
489 - "qh/%p dev%d %cs ep%d %08x %08x(%08x%c %s nak%d)",
490 - qh, scratch & 0x007f,
491 - speed_char(scratch),
492 - (scratch >> 8) & 0x000f,
493 - scratch, hc32_to_cpup(fotg210, &hw->hw_info2),
494 - hc32_to_cpup(fotg210, &hw->hw_token), mark,
495 - (cpu_to_hc32(fotg210, QTD_TOGGLE) & hw->hw_token)
496 - ? "data1" : "data0",
497 - (hc32_to_cpup(fotg210, &hw->hw_alt_next) >> 1) & 0x0f);
498 - size -= temp;
499 - next += temp;
500 -
501 - /* hc may be modifying the list as we read it ... */
502 - list_for_each_entry(td, &qh->qtd_list, qtd_list) {
503 - scratch = hc32_to_cpup(fotg210, &td->hw_token);
504 - mark = ' ';
505 - if (hw_curr == td->qtd_dma)
506 - mark = '*';
507 - else if (hw->hw_qtd_next == cpu_to_hc32(fotg210, td->qtd_dma))
508 - mark = '+';
509 - else if (QTD_LENGTH(scratch)) {
510 - if (td->hw_alt_next == fotg210->async->hw->hw_alt_next)
511 - mark = '#';
512 - else if (td->hw_alt_next != list_end)
513 - mark = '/';
514 - }
515 - temp = snprintf(next, size,
516 - "\n\t%p%c%s len=%d %08x urb %p",
517 - td, mark, ({ char *tmp;
518 - switch ((scratch>>8)&0x03) {
519 - case 0:
520 - tmp = "out";
521 - break;
522 - case 1:
523 - tmp = "in";
524 - break;
525 - case 2:
526 - tmp = "setup";
527 - break;
528 - default:
529 - tmp = "?";
530 - break;
531 - } tmp; }),
532 - (scratch >> 16) & 0x7fff,
533 - scratch,
534 - td->urb);
535 - if (size < temp)
536 - temp = size;
537 - size -= temp;
538 - next += temp;
539 - }
540 -
541 - temp = snprintf(next, size, "\n");
542 - if (size < temp)
543 - temp = size;
544 -
545 - size -= temp;
546 - next += temp;
547 -
548 - *sizep = size;
549 - *nextp = next;
550 -}
551 -
552 -static ssize_t fill_async_buffer(struct debug_buffer *buf)
553 -{
554 - struct usb_hcd *hcd;
555 - struct fotg210_hcd *fotg210;
556 - unsigned long flags;
557 - unsigned temp, size;
558 - char *next;
559 - struct fotg210_qh *qh;
560 -
561 - hcd = bus_to_hcd(buf->bus);
562 - fotg210 = hcd_to_fotg210(hcd);
563 - next = buf->output_buf;
564 - size = buf->alloc_size;
565 -
566 - *next = 0;
567 -
568 - /* dumps a snapshot of the async schedule.
569 - * usually empty except for long-term bulk reads, or head.
570 - * one QH per line, and TDs we know about
571 - */
572 - spin_lock_irqsave(&fotg210->lock, flags);
573 - for (qh = fotg210->async->qh_next.qh; size > 0 && qh;
574 - qh = qh->qh_next.qh)
575 - qh_lines(fotg210, qh, &next, &size);
576 - if (fotg210->async_unlink && size > 0) {
577 - temp = scnprintf(next, size, "\nunlink =\n");
578 - size -= temp;
579 - next += temp;
580 -
581 - for (qh = fotg210->async_unlink; size > 0 && qh;
582 - qh = qh->unlink_next)
583 - qh_lines(fotg210, qh, &next, &size);
584 - }
585 - spin_unlock_irqrestore(&fotg210->lock, flags);
586 -
587 - return strlen(buf->output_buf);
588 -}
589 -
590 -/* count tds, get ep direction */
591 -static unsigned output_buf_tds_dir(char *buf, struct fotg210_hcd *fotg210,
592 - struct fotg210_qh_hw *hw, struct fotg210_qh *qh, unsigned size)
593 -{
594 - u32 scratch = hc32_to_cpup(fotg210, &hw->hw_info1);
595 - struct fotg210_qtd *qtd;
596 - char *type = "";
597 - unsigned temp = 0;
598 -
599 - /* count tds, get ep direction */
600 - list_for_each_entry(qtd, &qh->qtd_list, qtd_list) {
601 - temp++;
602 - switch ((hc32_to_cpu(fotg210, qtd->hw_token) >> 8) & 0x03) {
603 - case 0:
604 - type = "out";
605 - continue;
606 - case 1:
607 - type = "in";
608 - continue;
609 - }
610 - }
611 -
612 - return scnprintf(buf, size, "(%c%d ep%d%s [%d/%d] q%d p%d)",
613 - speed_char(scratch), scratch & 0x007f,
614 - (scratch >> 8) & 0x000f, type, qh->usecs,
615 - qh->c_usecs, temp, (scratch >> 16) & 0x7ff);
616 -}
617 -
618 -#define DBG_SCHED_LIMIT 64
619 -static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
620 -{
621 - struct usb_hcd *hcd;
622 - struct fotg210_hcd *fotg210;
623 - unsigned long flags;
624 - union fotg210_shadow p, *seen;
625 - unsigned temp, size, seen_count;
626 - char *next;
627 - unsigned i;
628 - __hc32 tag;
629 -
630 - seen = kmalloc_array(DBG_SCHED_LIMIT, sizeof(*seen), GFP_ATOMIC);
631 - if (!seen)
632 - return 0;
633 -
634 - seen_count = 0;
635 -
636 - hcd = bus_to_hcd(buf->bus);
637 - fotg210 = hcd_to_fotg210(hcd);
638 - next = buf->output_buf;
639 - size = buf->alloc_size;
640 -
641 - temp = scnprintf(next, size, "size = %d\n", fotg210->periodic_size);
642 - size -= temp;
643 - next += temp;
644 -
645 - /* dump a snapshot of the periodic schedule.
646 - * iso changes, interrupt usually doesn't.
647 - */
648 - spin_lock_irqsave(&fotg210->lock, flags);
649 - for (i = 0; i < fotg210->periodic_size; i++) {
650 - p = fotg210->pshadow[i];
651 - if (likely(!p.ptr))
652 - continue;
653 -
654 - tag = Q_NEXT_TYPE(fotg210, fotg210->periodic[i]);
655 -
656 - temp = scnprintf(next, size, "%4d: ", i);
657 - size -= temp;
658 - next += temp;
659 -
660 - do {
661 - struct fotg210_qh_hw *hw;
662 -
663 - switch (hc32_to_cpu(fotg210, tag)) {
664 - case Q_TYPE_QH:
665 - hw = p.qh->hw;
666 - temp = scnprintf(next, size, " qh%d-%04x/%p",
667 - p.qh->period,
668 - hc32_to_cpup(fotg210,
669 - &hw->hw_info2)
670 - /* uframe masks */
671 - & (QH_CMASK | QH_SMASK),
672 - p.qh);
673 - size -= temp;
674 - next += temp;
675 - /* don't repeat what follows this qh */
676 - for (temp = 0; temp < seen_count; temp++) {
677 - if (seen[temp].ptr != p.ptr)
678 - continue;
679 - if (p.qh->qh_next.ptr) {
680 - temp = scnprintf(next, size,
681 - " ...");
682 - size -= temp;
683 - next += temp;
684 - }
685 - break;
686 - }
687 - /* show more info the first time around */
688 - if (temp == seen_count) {
689 - temp = output_buf_tds_dir(next,
690 - fotg210, hw,
691 - p.qh, size);
692 -
693 - if (seen_count < DBG_SCHED_LIMIT)
694 - seen[seen_count++].qh = p.qh;
695 - } else
696 - temp = 0;
697 - tag = Q_NEXT_TYPE(fotg210, hw->hw_next);
698 - p = p.qh->qh_next;
699 - break;
700 - case Q_TYPE_FSTN:
701 - temp = scnprintf(next, size,
702 - " fstn-%8x/%p",
703 - p.fstn->hw_prev, p.fstn);
704 - tag = Q_NEXT_TYPE(fotg210, p.fstn->hw_next);
705 - p = p.fstn->fstn_next;
706 - break;
707 - case Q_TYPE_ITD:
708 - temp = scnprintf(next, size,
709 - " itd/%p", p.itd);
710 - tag = Q_NEXT_TYPE(fotg210, p.itd->hw_next);
711 - p = p.itd->itd_next;
712 - break;
713 - }
714 - size -= temp;
715 - next += temp;
716 - } while (p.ptr);
717 -
718 - temp = scnprintf(next, size, "\n");
719 - size -= temp;
720 - next += temp;
721 - }
722 - spin_unlock_irqrestore(&fotg210->lock, flags);
723 - kfree(seen);
724 -
725 - return buf->alloc_size - size;
726 -}
727 -#undef DBG_SCHED_LIMIT
728 -
729 -static const char *rh_state_string(struct fotg210_hcd *fotg210)
730 -{
731 - switch (fotg210->rh_state) {
732 - case FOTG210_RH_HALTED:
733 - return "halted";
734 - case FOTG210_RH_SUSPENDED:
735 - return "suspended";
736 - case FOTG210_RH_RUNNING:
737 - return "running";
738 - case FOTG210_RH_STOPPING:
739 - return "stopping";
740 - }
741 - return "?";
742 -}
743 -
744 -static ssize_t fill_registers_buffer(struct debug_buffer *buf)
745 -{
746 - struct usb_hcd *hcd;
747 - struct fotg210_hcd *fotg210;
748 - unsigned long flags;
749 - unsigned temp, size, i;
750 - char *next, scratch[80];
751 - static const char fmt[] = "%*s\n";
752 - static const char label[] = "";
753 -
754 - hcd = bus_to_hcd(buf->bus);
755 - fotg210 = hcd_to_fotg210(hcd);
756 - next = buf->output_buf;
757 - size = buf->alloc_size;
758 -
759 - spin_lock_irqsave(&fotg210->lock, flags);
760 -
761 - if (!HCD_HW_ACCESSIBLE(hcd)) {
762 - size = scnprintf(next, size,
763 - "bus %s, device %s\n"
764 - "%s\n"
765 - "SUSPENDED(no register access)\n",
766 - hcd->self.controller->bus->name,
767 - dev_name(hcd->self.controller),
768 - hcd->product_desc);
769 - goto done;
770 - }
771 -
772 - /* Capability Registers */
773 - i = HC_VERSION(fotg210, fotg210_readl(fotg210,
774 - &fotg210->caps->hc_capbase));
775 - temp = scnprintf(next, size,
776 - "bus %s, device %s\n"
777 - "%s\n"
778 - "EHCI %x.%02x, rh state %s\n",
779 - hcd->self.controller->bus->name,
780 - dev_name(hcd->self.controller),
781 - hcd->product_desc,
782 - i >> 8, i & 0x0ff, rh_state_string(fotg210));
783 - size -= temp;
784 - next += temp;
785 -
786 - /* FIXME interpret both types of params */
787 - i = fotg210_readl(fotg210, &fotg210->caps->hcs_params);
788 - temp = scnprintf(next, size, "structural params 0x%08x\n", i);
789 - size -= temp;
790 - next += temp;
791 -
792 - i = fotg210_readl(fotg210, &fotg210->caps->hcc_params);
793 - temp = scnprintf(next, size, "capability params 0x%08x\n", i);
794 - size -= temp;
795 - next += temp;
796 -
797 - /* Operational Registers */
798 - temp = dbg_status_buf(scratch, sizeof(scratch), label,
799 - fotg210_readl(fotg210, &fotg210->regs->status));
800 - temp = scnprintf(next, size, fmt, temp, scratch);
801 - size -= temp;
802 - next += temp;
803 -
804 - temp = dbg_command_buf(scratch, sizeof(scratch), label,
805 - fotg210_readl(fotg210, &fotg210->regs->command));
806 - temp = scnprintf(next, size, fmt, temp, scratch);
807 - size -= temp;
808 - next += temp;
809 -
810 - temp = dbg_intr_buf(scratch, sizeof(scratch), label,
811 - fotg210_readl(fotg210, &fotg210->regs->intr_enable));
812 - temp = scnprintf(next, size, fmt, temp, scratch);
813 - size -= temp;
814 - next += temp;
815 -
816 - temp = scnprintf(next, size, "uframe %04x\n",
817 - fotg210_read_frame_index(fotg210));
818 - size -= temp;
819 - next += temp;
820 -
821 - if (fotg210->async_unlink) {
822 - temp = scnprintf(next, size, "async unlink qh %p\n",
823 - fotg210->async_unlink);
824 - size -= temp;
825 - next += temp;
826 - }
827 -
828 -#ifdef FOTG210_STATS
829 - temp = scnprintf(next, size,
830 - "irq normal %ld err %ld iaa %ld(lost %ld)\n",
831 - fotg210->stats.normal, fotg210->stats.error,
832 - fotg210->stats.iaa, fotg210->stats.lost_iaa);
833 - size -= temp;
834 - next += temp;
835 -
836 - temp = scnprintf(next, size, "complete %ld unlink %ld\n",
837 - fotg210->stats.complete, fotg210->stats.unlink);
838 - size -= temp;
839 - next += temp;
840 -#endif
841 -
842 -done:
843 - spin_unlock_irqrestore(&fotg210->lock, flags);
844 -
845 - return buf->alloc_size - size;
846 -}
847 -
848 -static struct debug_buffer
849 -*alloc_buffer(struct usb_bus *bus, ssize_t (*fill_func)(struct debug_buffer *))
850 -{
851 - struct debug_buffer *buf;
852 -
853 - buf = kzalloc(sizeof(struct debug_buffer), GFP_KERNEL);
854 -
855 - if (buf) {
856 - buf->bus = bus;
857 - buf->fill_func = fill_func;
858 - mutex_init(&buf->mutex);
859 - buf->alloc_size = PAGE_SIZE;
860 - }
861 -
862 - return buf;
863 -}
864 -
865 -static int fill_buffer(struct debug_buffer *buf)
866 -{
867 - int ret = 0;
868 -
869 - if (!buf->output_buf)
870 - buf->output_buf = vmalloc(buf->alloc_size);
871 -
872 - if (!buf->output_buf) {
873 - ret = -ENOMEM;
874 - goto out;
875 - }
876 -
877 - ret = buf->fill_func(buf);
878 -
879 - if (ret >= 0) {
880 - buf->count = ret;
881 - ret = 0;
882 - }
883 -
884 -out:
885 - return ret;
886 -}
887 -
888 -static ssize_t debug_output(struct file *file, char __user *user_buf,
889 - size_t len, loff_t *offset)
890 -{
891 - struct debug_buffer *buf = file->private_data;
892 - int ret = 0;
893 -
894 - mutex_lock(&buf->mutex);
895 - if (buf->count == 0) {
896 - ret = fill_buffer(buf);
897 - if (ret != 0) {
898 - mutex_unlock(&buf->mutex);
899 - goto out;
900 - }
901 - }
902 - mutex_unlock(&buf->mutex);
903 -
904 - ret = simple_read_from_buffer(user_buf, len, offset,
905 - buf->output_buf, buf->count);
906 -
907 -out:
908 - return ret;
909 -
910 -}
911 -
912 -static int debug_close(struct inode *inode, struct file *file)
913 -{
914 - struct debug_buffer *buf = file->private_data;
915 -
916 - if (buf) {
917 - vfree(buf->output_buf);
918 - kfree(buf);
919 - }
920 -
921 - return 0;
922 -}
923 -static int debug_async_open(struct inode *inode, struct file *file)
924 -{
925 - file->private_data = alloc_buffer(inode->i_private, fill_async_buffer);
926 -
927 - return file->private_data ? 0 : -ENOMEM;
928 -}
929 -
930 -static int debug_periodic_open(struct inode *inode, struct file *file)
931 -{
932 - struct debug_buffer *buf;
933 -
934 - buf = alloc_buffer(inode->i_private, fill_periodic_buffer);
935 - if (!buf)
936 - return -ENOMEM;
937 -
938 - buf->alloc_size = (sizeof(void *) == 4 ? 6 : 8)*PAGE_SIZE;
939 - file->private_data = buf;
940 - return 0;
941 -}
942 -
943 -static int debug_registers_open(struct inode *inode, struct file *file)
944 -{
945 - file->private_data = alloc_buffer(inode->i_private,
946 - fill_registers_buffer);
947 -
948 - return file->private_data ? 0 : -ENOMEM;
949 -}
950 -
951 -static inline void create_debug_files(struct fotg210_hcd *fotg210)
952 -{
953 - struct usb_bus *bus = &fotg210_to_hcd(fotg210)->self;
954 - struct dentry *root;
955 -
956 - root = debugfs_create_dir(bus->bus_name, fotg210_debug_root);
957 -
958 - debugfs_create_file("async", S_IRUGO, root, bus, &debug_async_fops);
959 - debugfs_create_file("periodic", S_IRUGO, root, bus,
960 - &debug_periodic_fops);
961 - debugfs_create_file("registers", S_IRUGO, root, bus,
962 - &debug_registers_fops);
963 -}
964 -
965 -static inline void remove_debug_files(struct fotg210_hcd *fotg210)
966 -{
967 - struct usb_bus *bus = &fotg210_to_hcd(fotg210)->self;
968 -
969 - debugfs_lookup_and_remove(bus->bus_name, fotg210_debug_root);
970 -}
971 -
972 -/* handshake - spin reading hc until handshake completes or fails
973 - * @ptr: address of hc register to be read
974 - * @mask: bits to look at in result of read
975 - * @done: value of those bits when handshake succeeds
976 - * @usec: timeout in microseconds
977 - *
978 - * Returns negative errno, or zero on success
979 - *
980 - * Success happens when the "mask" bits have the specified value (hardware
981 - * handshake done). There are two failure modes: "usec" have passed (major
982 - * hardware flakeout), or the register reads as all-ones (hardware removed).
983 - *
984 - * That last failure should_only happen in cases like physical cardbus eject
985 - * before driver shutdown. But it also seems to be caused by bugs in cardbus
986 - * bridge shutdown: shutting down the bridge before the devices using it.
987 - */
988 -static int handshake(struct fotg210_hcd *fotg210, void __iomem *ptr,
989 - u32 mask, u32 done, int usec)
990 -{
991 - u32 result;
992 - int ret;
993 -
994 - ret = readl_poll_timeout_atomic(ptr, result,
995 - ((result & mask) == done ||
996 - result == U32_MAX), 1, usec);
997 - if (result == U32_MAX) /* card removed */
998 - return -ENODEV;
999 -
1000 - return ret;
1001 -}
1002 -
1003 -/* Force HC to halt state from unknown (EHCI spec section 2.3).
1004 - * Must be called with interrupts enabled and the lock not held.
1005 - */
1006 -static int fotg210_halt(struct fotg210_hcd *fotg210)
1007 -{
1008 - u32 temp;
1009 -
1010 - spin_lock_irq(&fotg210->lock);
1011 -
1012 - /* disable any irqs left enabled by previous code */
1013 - fotg210_writel(fotg210, 0, &fotg210->regs->intr_enable);
1014 -
1015 - /*
1016 - * This routine gets called during probe before fotg210->command
1017 - * has been initialized, so we can't rely on its value.
1018 - */
1019 - fotg210->command &= ~CMD_RUN;
1020 - temp = fotg210_readl(fotg210, &fotg210->regs->command);
1021 - temp &= ~(CMD_RUN | CMD_IAAD);
1022 - fotg210_writel(fotg210, temp, &fotg210->regs->command);
1023 -
1024 - spin_unlock_irq(&fotg210->lock);
1025 - synchronize_irq(fotg210_to_hcd(fotg210)->irq);
1026 -
1027 - return handshake(fotg210, &fotg210->regs->status,
1028 - STS_HALT, STS_HALT, 16 * 125);
1029 -}
1030 -
1031 -/* Reset a non-running (STS_HALT == 1) controller.
1032 - * Must be called with interrupts enabled and the lock not held.
1033 - */
1034 -static int fotg210_reset(struct fotg210_hcd *fotg210)
1035 -{
1036 - int retval;
1037 - u32 command = fotg210_readl(fotg210, &fotg210->regs->command);
1038 -
1039 - /* If the EHCI debug controller is active, special care must be
1040 - * taken before and after a host controller reset
1041 - */
1042 - if (fotg210->debug && !dbgp_reset_prep(fotg210_to_hcd(fotg210)))
1043 - fotg210->debug = NULL;
1044 -
1045 - command |= CMD_RESET;
1046 - dbg_cmd(fotg210, "reset", command);
1047 - fotg210_writel(fotg210, command, &fotg210->regs->command);
1048 - fotg210->rh_state = FOTG210_RH_HALTED;
1049 - fotg210->next_statechange = jiffies;
1050 - retval = handshake(fotg210, &fotg210->regs->command,
1051 - CMD_RESET, 0, 250 * 1000);
1052 -
1053 - if (retval)
1054 - return retval;
1055 -
1056 - if (fotg210->debug)
1057 - dbgp_external_startup(fotg210_to_hcd(fotg210));
1058 -
1059 - fotg210->port_c_suspend = fotg210->suspended_ports =
1060 - fotg210->resuming_ports = 0;
1061 - return retval;
1062 -}
1063 -
1064 -/* Idle the controller (turn off the schedules).
1065 - * Must be called with interrupts enabled and the lock not held.
1066 - */
1067 -static void fotg210_quiesce(struct fotg210_hcd *fotg210)
1068 -{
1069 - u32 temp;
1070 -
1071 - if (fotg210->rh_state != FOTG210_RH_RUNNING)
1072 - return;
1073 -
1074 - /* wait for any schedule enables/disables to take effect */
1075 - temp = (fotg210->command << 10) & (STS_ASS | STS_PSS);
1076 - handshake(fotg210, &fotg210->regs->status, STS_ASS | STS_PSS, temp,
1077 - 16 * 125);
1078 -
1079 - /* then disable anything that's still active */
1080 - spin_lock_irq(&fotg210->lock);
1081 - fotg210->command &= ~(CMD_ASE | CMD_PSE);
1082 - fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command);
1083 - spin_unlock_irq(&fotg210->lock);
1084 -
1085 - /* hardware can take 16 microframes to turn off ... */
1086 - handshake(fotg210, &fotg210->regs->status, STS_ASS | STS_PSS, 0,
1087 - 16 * 125);
1088 -}
1089 -
1090 -static void end_unlink_async(struct fotg210_hcd *fotg210);
1091 -static void unlink_empty_async(struct fotg210_hcd *fotg210);
1092 -static void fotg210_work(struct fotg210_hcd *fotg210);
1093 -static void start_unlink_intr(struct fotg210_hcd *fotg210,
1094 - struct fotg210_qh *qh);
1095 -static void end_unlink_intr(struct fotg210_hcd *fotg210, struct fotg210_qh *qh);
1096 -
1097 -/* Set a bit in the USBCMD register */
1098 -static void fotg210_set_command_bit(struct fotg210_hcd *fotg210, u32 bit)
1099 -{
1100 - fotg210->command |= bit;
1101 - fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command);
1102 -
1103 - /* unblock posted write */
1104 - fotg210_readl(fotg210, &fotg210->regs->command);
1105 -}
1106 -
1107 -/* Clear a bit in the USBCMD register */
1108 -static void fotg210_clear_command_bit(struct fotg210_hcd *fotg210, u32 bit)
1109 -{
1110 - fotg210->command &= ~bit;
1111 - fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command);
1112 -
1113 - /* unblock posted write */
1114 - fotg210_readl(fotg210, &fotg210->regs->command);
1115 -}
1116 -
1117 -/* EHCI timer support... Now using hrtimers.
1118 - *
1119 - * Lots of different events are triggered from fotg210->hrtimer. Whenever
1120 - * the timer routine runs, it checks each possible event; events that are
1121 - * currently enabled and whose expiration time has passed get handled.
1122 - * The set of enabled events is stored as a collection of bitflags in
1123 - * fotg210->enabled_hrtimer_events, and they are numbered in order of
1124 - * increasing delay values (ranging between 1 ms and 100 ms).
1125 - *
1126 - * Rather than implementing a sorted list or tree of all pending events,
1127 - * we keep track only of the lowest-numbered pending event, in
1128 - * fotg210->next_hrtimer_event. Whenever fotg210->hrtimer gets restarted, its
1129 - * expiration time is set to the timeout value for this event.
1130 - *
1131 - * As a result, events might not get handled right away; the actual delay
1132 - * could be anywhere up to twice the requested delay. This doesn't
1133 - * matter, because none of the events are especially time-critical. The
1134 - * ones that matter most all have a delay of 1 ms, so they will be
1135 - * handled after 2 ms at most, which is okay. In addition to this, we
1136 - * allow for an expiration range of 1 ms.
1137 - */
1138 -
1139 -/* Delay lengths for the hrtimer event types.
1140 - * Keep this list sorted by delay length, in the same order as
1141 - * the event types indexed by enum fotg210_hrtimer_event in fotg210.h.
1142 - */
1143 -static unsigned event_delays_ns[] = {
1144 - 1 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_POLL_ASS */
1145 - 1 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_POLL_PSS */
1146 - 1 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_POLL_DEAD */
1147 - 1125 * NSEC_PER_USEC, /* FOTG210_HRTIMER_UNLINK_INTR */
1148 - 2 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_FREE_ITDS */
1149 - 6 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_ASYNC_UNLINKS */
1150 - 10 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_IAA_WATCHDOG */
1151 - 10 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_DISABLE_PERIODIC */
1152 - 15 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_DISABLE_ASYNC */
1153 - 100 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_IO_WATCHDOG */
1154 -};
1155 -
1156 -/* Enable a pending hrtimer event */
1157 -static void fotg210_enable_event(struct fotg210_hcd *fotg210, unsigned event,
1158 - bool resched)
1159 -{
1160 - ktime_t *timeout = &fotg210->hr_timeouts[event];
1161 -
1162 - if (resched)
1163 - *timeout = ktime_add(ktime_get(), event_delays_ns[event]);
1164 - fotg210->enabled_hrtimer_events |= (1 << event);
1165 -
1166 - /* Track only the lowest-numbered pending event */
1167 - if (event < fotg210->next_hrtimer_event) {
1168 - fotg210->next_hrtimer_event = event;
1169 - hrtimer_start_range_ns(&fotg210->hrtimer, *timeout,
1170 - NSEC_PER_MSEC, HRTIMER_MODE_ABS);
1171 - }
1172 -}
1173 -
1174 -
1175 -/* Poll the STS_ASS status bit; see when it agrees with CMD_ASE */
1176 -static void fotg210_poll_ASS(struct fotg210_hcd *fotg210)
1177 -{
1178 - unsigned actual, want;
1179 -
1180 - /* Don't enable anything if the controller isn't running (e.g., died) */
1181 - if (fotg210->rh_state != FOTG210_RH_RUNNING)
1182 - return;
1183 -
1184 - want = (fotg210->command & CMD_ASE) ? STS_ASS : 0;
1185 - actual = fotg210_readl(fotg210, &fotg210->regs->status) & STS_ASS;
1186 -
1187 - if (want != actual) {
1188 -
1189 - /* Poll again later, but give up after about 20 ms */
1190 - if (fotg210->ASS_poll_count++ < 20) {
1191 - fotg210_enable_event(fotg210, FOTG210_HRTIMER_POLL_ASS,
1192 - true);
1193 - return;
1194 - }
1195 - fotg210_dbg(fotg210, "Waited too long for the async schedule status (%x/%x), giving up\n",
1196 - want, actual);
1197 - }
1198 - fotg210->ASS_poll_count = 0;
1199 -
1200 - /* The status is up-to-date; restart or stop the schedule as needed */
1201 - if (want == 0) { /* Stopped */
1202 - if (fotg210->async_count > 0)
1203 - fotg210_set_command_bit(fotg210, CMD_ASE);
1204 -
1205 - } else { /* Running */
1206 - if (fotg210->async_count == 0) {
1207 -
1208 - /* Turn off the schedule after a while */
1209 - fotg210_enable_event(fotg210,
1210 - FOTG210_HRTIMER_DISABLE_ASYNC,
1211 - true);
1212 - }
1213 - }
1214 -}
1215 -
1216 -/* Turn off the async schedule after a brief delay */
1217 -static void fotg210_disable_ASE(struct fotg210_hcd *fotg210)
1218 -{
1219 - fotg210_clear_command_bit(fotg210, CMD_ASE);
1220 -}
1221 -
1222 -
1223 -/* Poll the STS_PSS status bit; see when it agrees with CMD_PSE */
1224 -static void fotg210_poll_PSS(struct fotg210_hcd *fotg210)
1225 -{
1226 - unsigned actual, want;
1227 -
1228 - /* Don't do anything if the controller isn't running (e.g., died) */
1229 - if (fotg210->rh_state != FOTG210_RH_RUNNING)
1230 - return;
1231 -
1232 - want = (fotg210->command & CMD_PSE) ? STS_PSS : 0;
1233 - actual = fotg210_readl(fotg210, &fotg210->regs->status) & STS_PSS;
1234 -
1235 - if (want != actual) {
1236 -
1237 - /* Poll again later, but give up after about 20 ms */
1238 - if (fotg210->PSS_poll_count++ < 20) {
1239 - fotg210_enable_event(fotg210, FOTG210_HRTIMER_POLL_PSS,
1240 - true);
1241 - return;
1242 - }
1243 - fotg210_dbg(fotg210, "Waited too long for the periodic schedule status (%x/%x), giving up\n",
1244 - want, actual);
1245 - }
1246 - fotg210->PSS_poll_count = 0;
1247 -
1248 - /* The status is up-to-date; restart or stop the schedule as needed */
1249 - if (want == 0) { /* Stopped */
1250 - if (fotg210->periodic_count > 0)
1251 - fotg210_set_command_bit(fotg210, CMD_PSE);
1252 -
1253 - } else { /* Running */
1254 - if (fotg210->periodic_count == 0) {
1255 -
1256 - /* Turn off the schedule after a while */
1257 - fotg210_enable_event(fotg210,
1258 - FOTG210_HRTIMER_DISABLE_PERIODIC,
1259 - true);
1260 - }
1261 - }
1262 -}
1263 -
1264 -/* Turn off the periodic schedule after a brief delay */
1265 -static void fotg210_disable_PSE(struct fotg210_hcd *fotg210)
1266 -{
1267 - fotg210_clear_command_bit(fotg210, CMD_PSE);
1268 -}
1269 -
1270 -
1271 -/* Poll the STS_HALT status bit; see when a dead controller stops */
1272 -static void fotg210_handle_controller_death(struct fotg210_hcd *fotg210)
1273 -{
1274 - if (!(fotg210_readl(fotg210, &fotg210->regs->status) & STS_HALT)) {
1275 -
1276 - /* Give up after a few milliseconds */
1277 - if (fotg210->died_poll_count++ < 5) {
1278 - /* Try again later */
1279 - fotg210_enable_event(fotg210,
1280 - FOTG210_HRTIMER_POLL_DEAD, true);
1281 - return;
1282 - }
1283 - fotg210_warn(fotg210, "Waited too long for the controller to stop, giving up\n");
1284 - }
1285 -
1286 - /* Clean up the mess */
1287 - fotg210->rh_state = FOTG210_RH_HALTED;
1288 - fotg210_writel(fotg210, 0, &fotg210->regs->intr_enable);
1289 - fotg210_work(fotg210);
1290 - end_unlink_async(fotg210);
1291 -
1292 - /* Not in process context, so don't try to reset the controller */
1293 -}
1294 -
1295 -
1296 -/* Handle unlinked interrupt QHs once they are gone from the hardware */
1297 -static void fotg210_handle_intr_unlinks(struct fotg210_hcd *fotg210)
1298 -{
1299 - bool stopped = (fotg210->rh_state < FOTG210_RH_RUNNING);
1300 -
1301 - /*
1302 - * Process all the QHs on the intr_unlink list that were added
1303 - * before the current unlink cycle began. The list is in
1304 - * temporal order, so stop when we reach the first entry in the
1305 - * current cycle. But if the root hub isn't running then
1306 - * process all the QHs on the list.
1307 - */
1308 - fotg210->intr_unlinking = true;
1309 - while (fotg210->intr_unlink) {
1310 - struct fotg210_qh *qh = fotg210->intr_unlink;
1311 -
1312 - if (!stopped && qh->unlink_cycle == fotg210->intr_unlink_cycle)
1313 - break;
1314 - fotg210->intr_unlink = qh->unlink_next;
1315 - qh->unlink_next = NULL;
1316 - end_unlink_intr(fotg210, qh);
1317 - }
1318 -
1319 - /* Handle remaining entries later */
1320 - if (fotg210->intr_unlink) {
1321 - fotg210_enable_event(fotg210, FOTG210_HRTIMER_UNLINK_INTR,
1322 - true);
1323 - ++fotg210->intr_unlink_cycle;
1324 - }
1325 - fotg210->intr_unlinking = false;
1326 -}
1327 -
1328 -
1329 -/* Start another free-iTDs/siTDs cycle */
1330 -static void start_free_itds(struct fotg210_hcd *fotg210)
1331 -{
1332 - if (!(fotg210->enabled_hrtimer_events &
1333 - BIT(FOTG210_HRTIMER_FREE_ITDS))) {
1334 - fotg210->last_itd_to_free = list_entry(
1335 - fotg210->cached_itd_list.prev,
1336 - struct fotg210_itd, itd_list);
1337 - fotg210_enable_event(fotg210, FOTG210_HRTIMER_FREE_ITDS, true);
1338 - }
1339 -}
1340 -
1341 -/* Wait for controller to stop using old iTDs and siTDs */
1342 -static void end_free_itds(struct fotg210_hcd *fotg210)
1343 -{
1344 - struct fotg210_itd *itd, *n;
1345 -
1346 - if (fotg210->rh_state < FOTG210_RH_RUNNING)
1347 - fotg210->last_itd_to_free = NULL;
1348 -
1349 - list_for_each_entry_safe(itd, n, &fotg210->cached_itd_list, itd_list) {
1350 - list_del(&itd->itd_list);
1351 - dma_pool_free(fotg210->itd_pool, itd, itd->itd_dma);
1352 - if (itd == fotg210->last_itd_to_free)
1353 - break;
1354 - }
1355 -
1356 - if (!list_empty(&fotg210->cached_itd_list))
1357 - start_free_itds(fotg210);
1358 -}
1359 -
1360 -
1361 -/* Handle lost (or very late) IAA interrupts */
1362 -static void fotg210_iaa_watchdog(struct fotg210_hcd *fotg210)
1363 -{
1364 - if (fotg210->rh_state != FOTG210_RH_RUNNING)
1365 - return;
1366 -
1367 - /*
1368 - * Lost IAA irqs wedge things badly; seen first with a vt8235.
1369 - * So we need this watchdog, but must protect it against both
1370 - * (a) SMP races against real IAA firing and retriggering, and
1371 - * (b) clean HC shutdown, when IAA watchdog was pending.
1372 - */
1373 - if (fotg210->async_iaa) {
1374 - u32 cmd, status;
1375 -
1376 - /* If we get here, IAA is *REALLY* late. It's barely
1377 - * conceivable that the system is so busy that CMD_IAAD
1378 - * is still legitimately set, so let's be sure it's
1379 - * clear before we read STS_IAA. (The HC should clear
1380 - * CMD_IAAD when it sets STS_IAA.)
1381 - */
1382 - cmd = fotg210_readl(fotg210, &fotg210->regs->command);
1383 -
1384 - /*
1385 - * If IAA is set here it either legitimately triggered
1386 - * after the watchdog timer expired (_way_ late, so we'll
1387 - * still count it as lost) ... or a silicon erratum:
1388 - * - VIA seems to set IAA without triggering the IRQ;
1389 - * - IAAD potentially cleared without setting IAA.
1390 - */
1391 - status = fotg210_readl(fotg210, &fotg210->regs->status);
1392 - if ((status & STS_IAA) || !(cmd & CMD_IAAD)) {
1393 - INCR(fotg210->stats.lost_iaa);
1394 - fotg210_writel(fotg210, STS_IAA,
1395 - &fotg210->regs->status);
1396 - }
1397 -
1398 - fotg210_dbg(fotg210, "IAA watchdog: status %x cmd %x\n",
1399 - status, cmd);
1400 - end_unlink_async(fotg210);
1401 - }
1402 -}
1403 -
1404 -
1405 -/* Enable the I/O watchdog, if appropriate */
1406 -static void turn_on_io_watchdog(struct fotg210_hcd *fotg210)
1407 -{
1408 - /* Not needed if the controller isn't running or it's already enabled */
1409 - if (fotg210->rh_state != FOTG210_RH_RUNNING ||
1410 - (fotg210->enabled_hrtimer_events &
1411 - BIT(FOTG210_HRTIMER_IO_WATCHDOG)))
1412 - return;
1413 -
1414 - /*
1415 - * Isochronous transfers always need the watchdog.
1416 - * For other sorts we use it only if the flag is set.
1417 - */
1418 - if (fotg210->isoc_count > 0 || (fotg210->need_io_watchdog &&
1419 - fotg210->async_count + fotg210->intr_count > 0))
1420 - fotg210_enable_event(fotg210, FOTG210_HRTIMER_IO_WATCHDOG,
1421 - true);
1422 -}
1423 -
1424 -
1425 -/* Handler functions for the hrtimer event types.
1426 - * Keep this array in the same order as the event types indexed by
1427 - * enum fotg210_hrtimer_event in fotg210.h.
1428 - */
1429 -static void (*event_handlers[])(struct fotg210_hcd *) = {
1430 - fotg210_poll_ASS, /* FOTG210_HRTIMER_POLL_ASS */
1431 - fotg210_poll_PSS, /* FOTG210_HRTIMER_POLL_PSS */
1432 - fotg210_handle_controller_death, /* FOTG210_HRTIMER_POLL_DEAD */
1433 - fotg210_handle_intr_unlinks, /* FOTG210_HRTIMER_UNLINK_INTR */
1434 - end_free_itds, /* FOTG210_HRTIMER_FREE_ITDS */
1435 - unlink_empty_async, /* FOTG210_HRTIMER_ASYNC_UNLINKS */
1436 - fotg210_iaa_watchdog, /* FOTG210_HRTIMER_IAA_WATCHDOG */
1437 - fotg210_disable_PSE, /* FOTG210_HRTIMER_DISABLE_PERIODIC */
1438 - fotg210_disable_ASE, /* FOTG210_HRTIMER_DISABLE_ASYNC */
1439 - fotg210_work, /* FOTG210_HRTIMER_IO_WATCHDOG */
1440 -};
1441 -
1442 -static enum hrtimer_restart fotg210_hrtimer_func(struct hrtimer *t)
1443 -{
1444 - struct fotg210_hcd *fotg210 =
1445 - container_of(t, struct fotg210_hcd, hrtimer);
1446 - ktime_t now;
1447 - unsigned long events;
1448 - unsigned long flags;
1449 - unsigned e;
1450 -
1451 - spin_lock_irqsave(&fotg210->lock, flags);
1452 -
1453 - events = fotg210->enabled_hrtimer_events;
1454 - fotg210->enabled_hrtimer_events = 0;
1455 - fotg210->next_hrtimer_event = FOTG210_HRTIMER_NO_EVENT;
1456 -
1457 - /*
1458 - * Check each pending event. If its time has expired, handle
1459 - * the event; otherwise re-enable it.
1460 - */
1461 - now = ktime_get();
1462 - for_each_set_bit(e, &events, FOTG210_HRTIMER_NUM_EVENTS) {
1463 - if (ktime_compare(now, fotg210->hr_timeouts[e]) >= 0)
1464 - event_handlers[e](fotg210);
1465 - else
1466 - fotg210_enable_event(fotg210, e, false);
1467 - }
1468 -
1469 - spin_unlock_irqrestore(&fotg210->lock, flags);
1470 - return HRTIMER_NORESTART;
1471 -}
1472 -
1473 -#define fotg210_bus_suspend NULL
1474 -#define fotg210_bus_resume NULL
1475 -
1476 -static int check_reset_complete(struct fotg210_hcd *fotg210, int index,
1477 - u32 __iomem *status_reg, int port_status)
1478 -{
1479 - if (!(port_status & PORT_CONNECT))
1480 - return port_status;
1481 -
1482 - /* if reset finished and it's still not enabled -- handoff */
1483 - if (!(port_status & PORT_PE))
1484 - /* with integrated TT, there's nobody to hand it to! */
1485 - fotg210_dbg(fotg210, "Failed to enable port %d on root hub TT\n",
1486 - index + 1);
1487 - else
1488 - fotg210_dbg(fotg210, "port %d reset complete, port enabled\n",
1489 - index + 1);
1490 -
1491 - return port_status;
1492 -}
1493 -
1494 -
1495 -/* build "status change" packet (one or two bytes) from HC registers */
1496 -
1497 -static int fotg210_hub_status_data(struct usb_hcd *hcd, char *buf)
1498 -{
1499 - struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
1500 - u32 temp, status;
1501 - u32 mask;
1502 - int retval = 1;
1503 - unsigned long flags;
1504 -
1505 - /* init status to no-changes */
1506 - buf[0] = 0;
1507 -
1508 - /* Inform the core about resumes-in-progress by returning
1509 - * a non-zero value even if there are no status changes.
1510 - */
1511 - status = fotg210->resuming_ports;
1512 -
1513 - mask = PORT_CSC | PORT_PEC;
1514 - /* PORT_RESUME from hardware ~= PORT_STAT_C_SUSPEND */
1515 -
1516 - /* no hub change reports (bit 0) for now (power, ...) */
1517 -
1518 - /* port N changes (bit N)? */
1519 - spin_lock_irqsave(&fotg210->lock, flags);
1520 -
1521 - temp = fotg210_readl(fotg210, &fotg210->regs->port_status);
1522 -
1523 - /*
1524 - * Return status information even for ports with OWNER set.
1525 - * Otherwise hub_wq wouldn't see the disconnect event when a
1526 - * high-speed device is switched over to the companion
1527 - * controller by the user.
1528 - */
1529 -
1530 - if ((temp & mask) != 0 || test_bit(0, &fotg210->port_c_suspend) ||
1531 - (fotg210->reset_done[0] &&
1532 - time_after_eq(jiffies, fotg210->reset_done[0]))) {
1533 - buf[0] |= 1 << 1;
1534 - status = STS_PCD;
1535 - }
1536 - /* FIXME autosuspend idle root hubs */
1537 - spin_unlock_irqrestore(&fotg210->lock, flags);
1538 - return status ? retval : 0;
1539 -}
1540 -
1541 -static void fotg210_hub_descriptor(struct fotg210_hcd *fotg210,
1542 - struct usb_hub_descriptor *desc)
1543 -{
1544 - int ports = HCS_N_PORTS(fotg210->hcs_params);
1545 - u16 temp;
1546 -
1547 - desc->bDescriptorType = USB_DT_HUB;
1548 - desc->bPwrOn2PwrGood = 10; /* fotg210 1.0, 2.3.9 says 20ms max */
1549 - desc->bHubContrCurrent = 0;
1550 -
1551 - desc->bNbrPorts = ports;
1552 - temp = 1 + (ports / 8);
1553 - desc->bDescLength = 7 + 2 * temp;
1554 -
1555 - /* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */
1556 - memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
1557 - memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
1558 -
1559 - temp = HUB_CHAR_INDV_PORT_OCPM; /* per-port overcurrent reporting */
1560 - temp |= HUB_CHAR_NO_LPSM; /* no power switching */
1561 - desc->wHubCharacteristics = cpu_to_le16(temp);
1562 -}
1563 -
1564 -static int fotg210_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
1565 - u16 wIndex, char *buf, u16 wLength)
1566 -{
1567 - struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
1568 - int ports = HCS_N_PORTS(fotg210->hcs_params);
1569 - u32 __iomem *status_reg = &fotg210->regs->port_status;
1570 - u32 temp, temp1, status;
1571 - unsigned long flags;
1572 - int retval = 0;
1573 - unsigned selector;
1574 -
1575 - /*
1576 - * FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR.
1577 - * HCS_INDICATOR may say we can change LEDs to off/amber/green.
1578 - * (track current state ourselves) ... blink for diagnostics,
1579 - * power, "this is the one", etc. EHCI spec supports this.
1580 - */
1581 -
1582 - spin_lock_irqsave(&fotg210->lock, flags);
1583 - switch (typeReq) {
1584 - case ClearHubFeature:
1585 - switch (wValue) {
1586 - case C_HUB_LOCAL_POWER:
1587 - case C_HUB_OVER_CURRENT:
1588 - /* no hub-wide feature/status flags */
1589 - break;
1590 - default:
1591 - goto error;
1592 - }
1593 - break;
1594 - case ClearPortFeature:
1595 - if (!wIndex || wIndex > ports)
1596 - goto error;
1597 - wIndex--;
1598 - temp = fotg210_readl(fotg210, status_reg);
1599 - temp &= ~PORT_RWC_BITS;
1600 -
1601 - /*
1602 - * Even if OWNER is set, so the port is owned by the
1603 - * companion controller, hub_wq needs to be able to clear
1604 - * the port-change status bits (especially
1605 - * USB_PORT_STAT_C_CONNECTION).
1606 - */
1607 -
1608 - switch (wValue) {
1609 - case USB_PORT_FEAT_ENABLE:
1610 - fotg210_writel(fotg210, temp & ~PORT_PE, status_reg);
1611 - break;
1612 - case USB_PORT_FEAT_C_ENABLE:
1613 - fotg210_writel(fotg210, temp | PORT_PEC, status_reg);
1614 - break;
1615 - case USB_PORT_FEAT_SUSPEND:
1616 - if (temp & PORT_RESET)
1617 - goto error;
1618 - if (!(temp & PORT_SUSPEND))
1619 - break;
1620 - if ((temp & PORT_PE) == 0)
1621 - goto error;
1622 -
1623 - /* resume signaling for 20 msec */
1624 - fotg210_writel(fotg210, temp | PORT_RESUME, status_reg);
1625 - fotg210->reset_done[wIndex] = jiffies
1626 - + msecs_to_jiffies(USB_RESUME_TIMEOUT);
1627 - break;
1628 - case USB_PORT_FEAT_C_SUSPEND:
1629 - clear_bit(wIndex, &fotg210->port_c_suspend);
1630 - break;
1631 - case USB_PORT_FEAT_C_CONNECTION:
1632 - fotg210_writel(fotg210, temp | PORT_CSC, status_reg);
1633 - break;
1634 - case USB_PORT_FEAT_C_OVER_CURRENT:
1635 - fotg210_writel(fotg210, temp | OTGISR_OVC,
1636 - &fotg210->regs->otgisr);
1637 - break;
1638 - case USB_PORT_FEAT_C_RESET:
1639 - /* GetPortStatus clears reset */
1640 - break;
1641 - default:
1642 - goto error;
1643 - }
1644 - fotg210_readl(fotg210, &fotg210->regs->command);
1645 - break;
1646 - case GetHubDescriptor:
1647 - fotg210_hub_descriptor(fotg210, (struct usb_hub_descriptor *)
1648 - buf);
1649 - break;
1650 - case GetHubStatus:
1651 - /* no hub-wide feature/status flags */
1652 - memset(buf, 0, 4);
1653 - /*cpu_to_le32s ((u32 *) buf); */
1654 - break;
1655 - case GetPortStatus:
1656 - if (!wIndex || wIndex > ports)
1657 - goto error;
1658 - wIndex--;
1659 - status = 0;
1660 - temp = fotg210_readl(fotg210, status_reg);
1661 -
1662 - /* wPortChange bits */
1663 - if (temp & PORT_CSC)
1664 - status |= USB_PORT_STAT_C_CONNECTION << 16;
1665 - if (temp & PORT_PEC)
1666 - status |= USB_PORT_STAT_C_ENABLE << 16;
1667 -
1668 - temp1 = fotg210_readl(fotg210, &fotg210->regs->otgisr);
1669 - if (temp1 & OTGISR_OVC)
1670 - status |= USB_PORT_STAT_C_OVERCURRENT << 16;
1671 -
1672 - /* whoever resumes must GetPortStatus to complete it!! */
1673 - if (temp & PORT_RESUME) {
1674 -
1675 - /* Remote Wakeup received? */
1676 - if (!fotg210->reset_done[wIndex]) {
1677 - /* resume signaling for 20 msec */
1678 - fotg210->reset_done[wIndex] = jiffies
1679 - + msecs_to_jiffies(20);
1680 - /* check the port again */
1681 - mod_timer(&fotg210_to_hcd(fotg210)->rh_timer,
1682 - fotg210->reset_done[wIndex]);
1683 - }
1684 -
1685 - /* resume completed? */
1686 - else if (time_after_eq(jiffies,
1687 - fotg210->reset_done[wIndex])) {
1688 - clear_bit(wIndex, &fotg210->suspended_ports);
1689 - set_bit(wIndex, &fotg210->port_c_suspend);
1690 - fotg210->reset_done[wIndex] = 0;
1691 -
1692 - /* stop resume signaling */
1693 - temp = fotg210_readl(fotg210, status_reg);
1694 - fotg210_writel(fotg210, temp &
1695 - ~(PORT_RWC_BITS | PORT_RESUME),
1696 - status_reg);
1697 - clear_bit(wIndex, &fotg210->resuming_ports);
1698 - retval = handshake(fotg210, status_reg,
1699 - PORT_RESUME, 0, 2000);/* 2ms */
1700 - if (retval != 0) {
1701 - fotg210_err(fotg210,
1702 - "port %d resume error %d\n",
1703 - wIndex + 1, retval);
1704 - goto error;
1705 - }
1706 - temp &= ~(PORT_SUSPEND|PORT_RESUME|(3<<10));
1707 - }
1708 - }
1709 -
1710 - /* whoever resets must GetPortStatus to complete it!! */
1711 - if ((temp & PORT_RESET) && time_after_eq(jiffies,
1712 - fotg210->reset_done[wIndex])) {
1713 - status |= USB_PORT_STAT_C_RESET << 16;
1714 - fotg210->reset_done[wIndex] = 0;
1715 - clear_bit(wIndex, &fotg210->resuming_ports);
1716 -
1717 - /* force reset to complete */
1718 - fotg210_writel(fotg210,
1719 - temp & ~(PORT_RWC_BITS | PORT_RESET),
1720 - status_reg);
1721 - /* REVISIT: some hardware needs 550+ usec to clear
1722 - * this bit; seems too long to spin routinely...
1723 - */
1724 - retval = handshake(fotg210, status_reg,
1725 - PORT_RESET, 0, 1000);
1726 - if (retval != 0) {
1727 - fotg210_err(fotg210, "port %d reset error %d\n",
1728 - wIndex + 1, retval);
1729 - goto error;
1730 - }
1731 -
1732 - /* see what we found out */
1733 - temp = check_reset_complete(fotg210, wIndex, status_reg,
1734 - fotg210_readl(fotg210, status_reg));
1735 -
1736 - /* restart schedule */
1737 - fotg210->command |= CMD_RUN;
1738 - fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command);
1739 - }
1740 -
1741 - if (!(temp & (PORT_RESUME|PORT_RESET))) {
1742 - fotg210->reset_done[wIndex] = 0;
1743 - clear_bit(wIndex, &fotg210->resuming_ports);
1744 - }
1745 -
1746 - /* transfer dedicated ports to the companion hc */
1747 - if ((temp & PORT_CONNECT) &&
1748 - test_bit(wIndex, &fotg210->companion_ports)) {
1749 - temp &= ~PORT_RWC_BITS;
1750 - fotg210_writel(fotg210, temp, status_reg);
1751 - fotg210_dbg(fotg210, "port %d --> companion\n",
1752 - wIndex + 1);
1753 - temp = fotg210_readl(fotg210, status_reg);
1754 - }
1755 -
1756 - /*
1757 - * Even if OWNER is set, there's no harm letting hub_wq
1758 - * see the wPortStatus values (they should all be 0 except
1759 - * for PORT_POWER anyway).
1760 - */
1761 -
1762 - if (temp & PORT_CONNECT) {
1763 - status |= USB_PORT_STAT_CONNECTION;
1764 - status |= fotg210_port_speed(fotg210, temp);
1765 - }
1766 - if (temp & PORT_PE)
1767 - status |= USB_PORT_STAT_ENABLE;
1768 -
1769 - /* maybe the port was unsuspended without our knowledge */
1770 - if (temp & (PORT_SUSPEND|PORT_RESUME)) {
1771 - status |= USB_PORT_STAT_SUSPEND;
1772 - } else if (test_bit(wIndex, &fotg210->suspended_ports)) {
1773 - clear_bit(wIndex, &fotg210->suspended_ports);
1774 - clear_bit(wIndex, &fotg210->resuming_ports);
1775 - fotg210->reset_done[wIndex] = 0;
1776 - if (temp & PORT_PE)
1777 - set_bit(wIndex, &fotg210->port_c_suspend);
1778 - }
1779 -
1780 - temp1 = fotg210_readl(fotg210, &fotg210->regs->otgisr);
1781 - if (temp1 & OTGISR_OVC)
1782 - status |= USB_PORT_STAT_OVERCURRENT;
1783 - if (temp & PORT_RESET)
1784 - status |= USB_PORT_STAT_RESET;
1785 - if (test_bit(wIndex, &fotg210->port_c_suspend))
1786 - status |= USB_PORT_STAT_C_SUSPEND << 16;
1787 -
1788 - if (status & ~0xffff) /* only if wPortChange is interesting */
1789 - dbg_port(fotg210, "GetStatus", wIndex + 1, temp);
1790 - put_unaligned_le32(status, buf);
1791 - break;
1792 - case SetHubFeature:
1793 - switch (wValue) {
1794 - case C_HUB_LOCAL_POWER:
1795 - case C_HUB_OVER_CURRENT:
1796 - /* no hub-wide feature/status flags */
1797 - break;
1798 - default:
1799 - goto error;
1800 - }
1801 - break;
1802 - case SetPortFeature:
1803 - selector = wIndex >> 8;
1804 - wIndex &= 0xff;
1805 -
1806 - if (!wIndex || wIndex > ports)
1807 - goto error;
1808 - wIndex--;
1809 - temp = fotg210_readl(fotg210, status_reg);
1810 - temp &= ~PORT_RWC_BITS;
1811 - switch (wValue) {
1812 - case USB_PORT_FEAT_SUSPEND:
1813 - if ((temp & PORT_PE) == 0
1814 - || (temp & PORT_RESET) != 0)
1815 - goto error;
1816 -
1817 - /* After above check the port must be connected.
1818 - * Set appropriate bit thus could put phy into low power
1819 - * mode if we have hostpc feature
1820 - */
1821 - fotg210_writel(fotg210, temp | PORT_SUSPEND,
1822 - status_reg);
1823 - set_bit(wIndex, &fotg210->suspended_ports);
1824 - break;
1825 - case USB_PORT_FEAT_RESET:
1826 - if (temp & PORT_RESUME)
1827 - goto error;
1828 - /* line status bits may report this as low speed,
1829 - * which can be fine if this root hub has a
1830 - * transaction translator built in.
1831 - */
1832 - fotg210_dbg(fotg210, "port %d reset\n", wIndex + 1);
1833 - temp |= PORT_RESET;
1834 - temp &= ~PORT_PE;
1835 -
1836 - /*
1837 - * caller must wait, then call GetPortStatus
1838 - * usb 2.0 spec says 50 ms resets on root
1839 - */
1840 - fotg210->reset_done[wIndex] = jiffies
1841 - + msecs_to_jiffies(50);
1842 - fotg210_writel(fotg210, temp, status_reg);
1843 - break;
1844 -
1845 - /* For downstream facing ports (these): one hub port is put
1846 - * into test mode according to USB2 11.24.2.13, then the hub
1847 - * must be reset (which for root hub now means rmmod+modprobe,
1848 - * or else system reboot). See EHCI 2.3.9 and 4.14 for info
1849 - * about the EHCI-specific stuff.
1850 - */
1851 - case USB_PORT_FEAT_TEST:
1852 - if (!selector || selector > 5)
1853 - goto error;
1854 - spin_unlock_irqrestore(&fotg210->lock, flags);
1855 - fotg210_quiesce(fotg210);
1856 - spin_lock_irqsave(&fotg210->lock, flags);
1857 -
1858 - /* Put all enabled ports into suspend */
1859 - temp = fotg210_readl(fotg210, status_reg) &
1860 - ~PORT_RWC_BITS;
1861 - if (temp & PORT_PE)
1862 - fotg210_writel(fotg210, temp | PORT_SUSPEND,
1863 - status_reg);
1864 -
1865 - spin_unlock_irqrestore(&fotg210->lock, flags);
1866 - fotg210_halt(fotg210);
1867 - spin_lock_irqsave(&fotg210->lock, flags);
1868 -
1869 - temp = fotg210_readl(fotg210, status_reg);
1870 - temp |= selector << 16;
1871 - fotg210_writel(fotg210, temp, status_reg);
1872 - break;
1873 -
1874 - default:
1875 - goto error;
1876 - }
1877 - fotg210_readl(fotg210, &fotg210->regs->command);
1878 - break;
1879 -
1880 - default:
1881 -error:
1882 - /* "stall" on error */
1883 - retval = -EPIPE;
1884 - }
1885 - spin_unlock_irqrestore(&fotg210->lock, flags);
1886 - return retval;
1887 -}
1888 -
1889 -static void __maybe_unused fotg210_relinquish_port(struct usb_hcd *hcd,
1890 - int portnum)
1891 -{
1892 - return;
1893 -}
1894 -
1895 -static int __maybe_unused fotg210_port_handed_over(struct usb_hcd *hcd,
1896 - int portnum)
1897 -{
1898 - return 0;
1899 -}
1900 -
1901 -/* There's basically three types of memory:
1902 - * - data used only by the HCD ... kmalloc is fine
1903 - * - async and periodic schedules, shared by HC and HCD ... these
1904 - * need to use dma_pool or dma_alloc_coherent
1905 - * - driver buffers, read/written by HC ... single shot DMA mapped
1906 - *
1907 - * There's also "register" data (e.g. PCI or SOC), which is memory mapped.
1908 - * No memory seen by this driver is pageable.
1909 - */
1910 -
1911 -/* Allocate the key transfer structures from the previously allocated pool */
1912 -static inline void fotg210_qtd_init(struct fotg210_hcd *fotg210,
1913 - struct fotg210_qtd *qtd, dma_addr_t dma)
1914 -{
1915 - memset(qtd, 0, sizeof(*qtd));
1916 - qtd->qtd_dma = dma;
1917 - qtd->hw_token = cpu_to_hc32(fotg210, QTD_STS_HALT);
1918 - qtd->hw_next = FOTG210_LIST_END(fotg210);
1919 - qtd->hw_alt_next = FOTG210_LIST_END(fotg210);
1920 - INIT_LIST_HEAD(&qtd->qtd_list);
1921 -}
1922 -
1923 -static struct fotg210_qtd *fotg210_qtd_alloc(struct fotg210_hcd *fotg210,
1924 - gfp_t flags)
1925 -{
1926 - struct fotg210_qtd *qtd;
1927 - dma_addr_t dma;
1928 -
1929 - qtd = dma_pool_alloc(fotg210->qtd_pool, flags, &dma);
1930 - if (qtd != NULL)
1931 - fotg210_qtd_init(fotg210, qtd, dma);
1932 -
1933 - return qtd;
1934 -}
1935 -
1936 -static inline void fotg210_qtd_free(struct fotg210_hcd *fotg210,
1937 - struct fotg210_qtd *qtd)
1938 -{
1939 - dma_pool_free(fotg210->qtd_pool, qtd, qtd->qtd_dma);
1940 -}
1941 -
1942 -
1943 -static void qh_destroy(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
1944 -{
1945 - /* clean qtds first, and know this is not linked */
1946 - if (!list_empty(&qh->qtd_list) || qh->qh_next.ptr) {
1947 - fotg210_dbg(fotg210, "unused qh not empty!\n");
1948 - BUG();
1949 - }
1950 - if (qh->dummy)
1951 - fotg210_qtd_free(fotg210, qh->dummy);
1952 - dma_pool_free(fotg210->qh_pool, qh->hw, qh->qh_dma);
1953 - kfree(qh);
1954 -}
1955 -
1956 -static struct fotg210_qh *fotg210_qh_alloc(struct fotg210_hcd *fotg210,
1957 - gfp_t flags)
1958 -{
1959 - struct fotg210_qh *qh;
1960 - dma_addr_t dma;
1961 -
1962 - qh = kzalloc(sizeof(*qh), GFP_ATOMIC);
1963 - if (!qh)
1964 - goto done;
1965 - qh->hw = (struct fotg210_qh_hw *)
1966 - dma_pool_zalloc(fotg210->qh_pool, flags, &dma);
1967 - if (!qh->hw)
1968 - goto fail;
1969 - qh->qh_dma = dma;
1970 - INIT_LIST_HEAD(&qh->qtd_list);
1971 -
1972 - /* dummy td enables safe urb queuing */
1973 - qh->dummy = fotg210_qtd_alloc(fotg210, flags);
1974 - if (qh->dummy == NULL) {
1975 - fotg210_dbg(fotg210, "no dummy td\n");
1976 - goto fail1;
1977 - }
1978 -done:
1979 - return qh;
1980 -fail1:
1981 - dma_pool_free(fotg210->qh_pool, qh->hw, qh->qh_dma);
1982 -fail:
1983 - kfree(qh);
1984 - return NULL;
1985 -}
1986 -
1987 -/* The queue heads and transfer descriptors are managed from pools tied
1988 - * to each of the "per device" structures.
1989 - * This is the initialisation and cleanup code.
1990 - */
1991 -
1992 -static void fotg210_mem_cleanup(struct fotg210_hcd *fotg210)
1993 -{
1994 - if (fotg210->async)
1995 - qh_destroy(fotg210, fotg210->async);
1996 - fotg210->async = NULL;
1997 -
1998 - if (fotg210->dummy)
1999 - qh_destroy(fotg210, fotg210->dummy);
2000 - fotg210->dummy = NULL;
2001 -
2002 - /* DMA consistent memory and pools */
2003 - dma_pool_destroy(fotg210->qtd_pool);
2004 - fotg210->qtd_pool = NULL;
2005 -
2006 - dma_pool_destroy(fotg210->qh_pool);
2007 - fotg210->qh_pool = NULL;
2008 -
2009 - dma_pool_destroy(fotg210->itd_pool);
2010 - fotg210->itd_pool = NULL;
2011 -
2012 - if (fotg210->periodic)
2013 - dma_free_coherent(fotg210_to_hcd(fotg210)->self.controller,
2014 - fotg210->periodic_size * sizeof(u32),
2015 - fotg210->periodic, fotg210->periodic_dma);
2016 - fotg210->periodic = NULL;
2017 -
2018 - /* shadow periodic table */
2019 - kfree(fotg210->pshadow);
2020 - fotg210->pshadow = NULL;
2021 -}
2022 -
2023 -/* remember to add cleanup code (above) if you add anything here */
2024 -static int fotg210_mem_init(struct fotg210_hcd *fotg210, gfp_t flags)
2025 -{
2026 - int i;
2027 -
2028 - /* QTDs for control/bulk/intr transfers */
2029 - fotg210->qtd_pool = dma_pool_create("fotg210_qtd",
2030 - fotg210_to_hcd(fotg210)->self.controller,
2031 - sizeof(struct fotg210_qtd),
2032 - 32 /* byte alignment (for hw parts) */,
2033 - 4096 /* can't cross 4K */);
2034 - if (!fotg210->qtd_pool)
2035 - goto fail;
2036 -
2037 - /* QHs for control/bulk/intr transfers */
2038 - fotg210->qh_pool = dma_pool_create("fotg210_qh",
2039 - fotg210_to_hcd(fotg210)->self.controller,
2040 - sizeof(struct fotg210_qh_hw),
2041 - 32 /* byte alignment (for hw parts) */,
2042 - 4096 /* can't cross 4K */);
2043 - if (!fotg210->qh_pool)
2044 - goto fail;
2045 -
2046 - fotg210->async = fotg210_qh_alloc(fotg210, flags);
2047 - if (!fotg210->async)
2048 - goto fail;
2049 -
2050 - /* ITD for high speed ISO transfers */
2051 - fotg210->itd_pool = dma_pool_create("fotg210_itd",
2052 - fotg210_to_hcd(fotg210)->self.controller,
2053 - sizeof(struct fotg210_itd),
2054 - 64 /* byte alignment (for hw parts) */,
2055 - 4096 /* can't cross 4K */);
2056 - if (!fotg210->itd_pool)
2057 - goto fail;
2058 -
2059 - /* Hardware periodic table */
2060 - fotg210->periodic =
2061 - dma_alloc_coherent(fotg210_to_hcd(fotg210)->self.controller,
2062 - fotg210->periodic_size * sizeof(__le32),
2063 - &fotg210->periodic_dma, 0);
2064 - if (fotg210->periodic == NULL)
2065 - goto fail;
2066 -
2067 - for (i = 0; i < fotg210->periodic_size; i++)
2068 - fotg210->periodic[i] = FOTG210_LIST_END(fotg210);
2069 -
2070 - /* software shadow of hardware table */
2071 - fotg210->pshadow = kcalloc(fotg210->periodic_size, sizeof(void *),
2072 - flags);
2073 - if (fotg210->pshadow != NULL)
2074 - return 0;
2075 -
2076 -fail:
2077 - fotg210_dbg(fotg210, "couldn't init memory\n");
2078 - fotg210_mem_cleanup(fotg210);
2079 - return -ENOMEM;
2080 -}
2081 -/* EHCI hardware queue manipulation ... the core. QH/QTD manipulation.
2082 - *
2083 - * Control, bulk, and interrupt traffic all use "qh" lists. They list "qtd"
2084 - * entries describing USB transactions, max 16-20kB/entry (with 4kB-aligned
2085 - * buffers needed for the larger number). We use one QH per endpoint, queue
2086 - * multiple urbs (all three types) per endpoint. URBs may need several qtds.
2087 - *
2088 - * ISO traffic uses "ISO TD" (itd) records, and (along with
2089 - * interrupts) needs careful scheduling. Performance improvements can be
2090 - * an ongoing challenge. That's in "ehci-sched.c".
2091 - *
2092 - * USB 1.1 devices are handled (a) by "companion" OHCI or UHCI root hubs,
2093 - * or otherwise through transaction translators (TTs) in USB 2.0 hubs using
2094 - * (b) special fields in qh entries or (c) split iso entries. TTs will
2095 - * buffer low/full speed data so the host collects it at high speed.
2096 - */
2097 -
2098 -/* fill a qtd, returning how much of the buffer we were able to queue up */
2099 -static int qtd_fill(struct fotg210_hcd *fotg210, struct fotg210_qtd *qtd,
2100 - dma_addr_t buf, size_t len, int token, int maxpacket)
2101 -{
2102 - int i, count;
2103 - u64 addr = buf;
2104 -
2105 - /* one buffer entry per 4K ... first might be short or unaligned */
2106 - qtd->hw_buf[0] = cpu_to_hc32(fotg210, (u32)addr);
2107 - qtd->hw_buf_hi[0] = cpu_to_hc32(fotg210, (u32)(addr >> 32));
2108 - count = 0x1000 - (buf & 0x0fff); /* rest of that page */
2109 - if (likely(len < count)) /* ... iff needed */
2110 - count = len;
2111 - else {
2112 - buf += 0x1000;
2113 - buf &= ~0x0fff;
2114 -
2115 - /* per-qtd limit: from 16K to 20K (best alignment) */
2116 - for (i = 1; count < len && i < 5; i++) {
2117 - addr = buf;
2118 - qtd->hw_buf[i] = cpu_to_hc32(fotg210, (u32)addr);
2119 - qtd->hw_buf_hi[i] = cpu_to_hc32(fotg210,
2120 - (u32)(addr >> 32));
2121 - buf += 0x1000;
2122 - if ((count + 0x1000) < len)
2123 - count += 0x1000;
2124 - else
2125 - count = len;
2126 - }
2127 -
2128 - /* short packets may only terminate transfers */
2129 - if (count != len)
2130 - count -= (count % maxpacket);
2131 - }
2132 - qtd->hw_token = cpu_to_hc32(fotg210, (count << 16) | token);
2133 - qtd->length = count;
2134 -
2135 - return count;
2136 -}
2137 -
2138 -static inline void qh_update(struct fotg210_hcd *fotg210,
2139 - struct fotg210_qh *qh, struct fotg210_qtd *qtd)
2140 -{
2141 - struct fotg210_qh_hw *hw = qh->hw;
2142 -
2143 - /* writes to an active overlay are unsafe */
2144 - BUG_ON(qh->qh_state != QH_STATE_IDLE);
2145 -
2146 - hw->hw_qtd_next = QTD_NEXT(fotg210, qtd->qtd_dma);
2147 - hw->hw_alt_next = FOTG210_LIST_END(fotg210);
2148 -
2149 - /* Except for control endpoints, we make hardware maintain data
2150 - * toggle (like OHCI) ... here (re)initialize the toggle in the QH,
2151 - * and set the pseudo-toggle in udev. Only usb_clear_halt() will
2152 - * ever clear it.
2153 - */
2154 - if (!(hw->hw_info1 & cpu_to_hc32(fotg210, QH_TOGGLE_CTL))) {
2155 - unsigned is_out, epnum;
2156 -
2157 - is_out = qh->is_out;
2158 - epnum = (hc32_to_cpup(fotg210, &hw->hw_info1) >> 8) & 0x0f;
2159 - if (unlikely(!usb_gettoggle(qh->dev, epnum, is_out))) {
2160 - hw->hw_token &= ~cpu_to_hc32(fotg210, QTD_TOGGLE);
2161 - usb_settoggle(qh->dev, epnum, is_out, 1);
2162 - }
2163 - }
2164 -
2165 - hw->hw_token &= cpu_to_hc32(fotg210, QTD_TOGGLE | QTD_STS_PING);
2166 -}
2167 -
2168 -/* if it weren't for a common silicon quirk (writing the dummy into the qh
2169 - * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault
2170 - * recovery (including urb dequeue) would need software changes to a QH...
2171 - */
2172 -static void qh_refresh(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
2173 -{
2174 - struct fotg210_qtd *qtd;
2175 -
2176 - if (list_empty(&qh->qtd_list))
2177 - qtd = qh->dummy;
2178 - else {
2179 - qtd = list_entry(qh->qtd_list.next,
2180 - struct fotg210_qtd, qtd_list);
2181 - /*
2182 - * first qtd may already be partially processed.
2183 - * If we come here during unlink, the QH overlay region
2184 - * might have reference to the just unlinked qtd. The
2185 - * qtd is updated in qh_completions(). Update the QH
2186 - * overlay here.
2187 - */
2188 - if (cpu_to_hc32(fotg210, qtd->qtd_dma) == qh->hw->hw_current) {
2189 - qh->hw->hw_qtd_next = qtd->hw_next;
2190 - qtd = NULL;
2191 - }
2192 - }
2193 -
2194 - if (qtd)
2195 - qh_update(fotg210, qh, qtd);
2196 -}
2197 -
2198 -static void qh_link_async(struct fotg210_hcd *fotg210, struct fotg210_qh *qh);
2199 -
2200 -static void fotg210_clear_tt_buffer_complete(struct usb_hcd *hcd,
2201 - struct usb_host_endpoint *ep)
2202 -{
2203 - struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
2204 - struct fotg210_qh *qh = ep->hcpriv;
2205 - unsigned long flags;
2206 -
2207 - spin_lock_irqsave(&fotg210->lock, flags);
2208 - qh->clearing_tt = 0;
2209 - if (qh->qh_state == QH_STATE_IDLE && !list_empty(&qh->qtd_list)
2210 - && fotg210->rh_state == FOTG210_RH_RUNNING)
2211 - qh_link_async(fotg210, qh);
2212 - spin_unlock_irqrestore(&fotg210->lock, flags);
2213 -}
2214 -
2215 -static void fotg210_clear_tt_buffer(struct fotg210_hcd *fotg210,
2216 - struct fotg210_qh *qh, struct urb *urb, u32 token)
2217 -{
2218 -
2219 - /* If an async split transaction gets an error or is unlinked,
2220 - * the TT buffer may be left in an indeterminate state. We
2221 - * have to clear the TT buffer.
2222 - *
2223 - * Note: this routine is never called for Isochronous transfers.
2224 - */
2225 - if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) {
2226 - struct usb_device *tt = urb->dev->tt->hub;
2227 -
2228 - dev_dbg(&tt->dev,
2229 - "clear tt buffer port %d, a%d ep%d t%08x\n",
2230 - urb->dev->ttport, urb->dev->devnum,
2231 - usb_pipeendpoint(urb->pipe), token);
2232 -
2233 - if (urb->dev->tt->hub !=
2234 - fotg210_to_hcd(fotg210)->self.root_hub) {
2235 - if (usb_hub_clear_tt_buffer(urb) == 0)
2236 - qh->clearing_tt = 1;
2237 - }
2238 - }
2239 -}
2240 -
2241 -static int qtd_copy_status(struct fotg210_hcd *fotg210, struct urb *urb,
2242 - size_t length, u32 token)
2243 -{
2244 - int status = -EINPROGRESS;
2245 -
2246 - /* count IN/OUT bytes, not SETUP (even short packets) */
2247 - if (likely(QTD_PID(token) != 2))
2248 - urb->actual_length += length - QTD_LENGTH(token);
2249 -
2250 - /* don't modify error codes */
2251 - if (unlikely(urb->unlinked))
2252 - return status;
2253 -
2254 - /* force cleanup after short read; not always an error */
2255 - if (unlikely(IS_SHORT_READ(token)))
2256 - status = -EREMOTEIO;
2257 -
2258 - /* serious "can't proceed" faults reported by the hardware */
2259 - if (token & QTD_STS_HALT) {
2260 - if (token & QTD_STS_BABBLE) {
2261 - /* FIXME "must" disable babbling device's port too */
2262 - status = -EOVERFLOW;
2263 - /* CERR nonzero + halt --> stall */
2264 - } else if (QTD_CERR(token)) {
2265 - status = -EPIPE;
2266 -
2267 - /* In theory, more than one of the following bits can be set
2268 - * since they are sticky and the transaction is retried.
2269 - * Which to test first is rather arbitrary.
2270 - */
2271 - } else if (token & QTD_STS_MMF) {
2272 - /* fs/ls interrupt xfer missed the complete-split */
2273 - status = -EPROTO;
2274 - } else if (token & QTD_STS_DBE) {
2275 - status = (QTD_PID(token) == 1) /* IN ? */
2276 - ? -ENOSR /* hc couldn't read data */
2277 - : -ECOMM; /* hc couldn't write data */
2278 - } else if (token & QTD_STS_XACT) {
2279 - /* timeout, bad CRC, wrong PID, etc */
2280 - fotg210_dbg(fotg210, "devpath %s ep%d%s 3strikes\n",
2281 - urb->dev->devpath,
2282 - usb_pipeendpoint(urb->pipe),
2283 - usb_pipein(urb->pipe) ? "in" : "out");
2284 - status = -EPROTO;
2285 - } else { /* unknown */
2286 - status = -EPROTO;
2287 - }
2288 -
2289 - fotg210_dbg(fotg210,
2290 - "dev%d ep%d%s qtd token %08x --> status %d\n",
2291 - usb_pipedevice(urb->pipe),
2292 - usb_pipeendpoint(urb->pipe),
2293 - usb_pipein(urb->pipe) ? "in" : "out",
2294 - token, status);
2295 - }
2296 -
2297 - return status;
2298 -}
2299 -
2300 -static void fotg210_urb_done(struct fotg210_hcd *fotg210, struct urb *urb,
2301 - int status)
2302 -__releases(fotg210->lock)
2303 -__acquires(fotg210->lock)
2304 -{
2305 - if (likely(urb->hcpriv != NULL)) {
2306 - struct fotg210_qh *qh = (struct fotg210_qh *) urb->hcpriv;
2307 -
2308 - /* S-mask in a QH means it's an interrupt urb */
2309 - if ((qh->hw->hw_info2 & cpu_to_hc32(fotg210, QH_SMASK)) != 0) {
2310 -
2311 - /* ... update hc-wide periodic stats (for usbfs) */
2312 - fotg210_to_hcd(fotg210)->self.bandwidth_int_reqs--;
2313 - }
2314 - }
2315 -
2316 - if (unlikely(urb->unlinked)) {
2317 - INCR(fotg210->stats.unlink);
2318 - } else {
2319 - /* report non-error and short read status as zero */
2320 - if (status == -EINPROGRESS || status == -EREMOTEIO)
2321 - status = 0;
2322 - INCR(fotg210->stats.complete);
2323 - }
2324 -
2325 -#ifdef FOTG210_URB_TRACE
2326 - fotg210_dbg(fotg210,
2327 - "%s %s urb %p ep%d%s status %d len %d/%d\n",
2328 - __func__, urb->dev->devpath, urb,
2329 - usb_pipeendpoint(urb->pipe),
2330 - usb_pipein(urb->pipe) ? "in" : "out",
2331 - status,
2332 - urb->actual_length, urb->transfer_buffer_length);
2333 -#endif
2334 -
2335 - /* complete() can reenter this HCD */
2336 - usb_hcd_unlink_urb_from_ep(fotg210_to_hcd(fotg210), urb);
2337 - spin_unlock(&fotg210->lock);
2338 - usb_hcd_giveback_urb(fotg210_to_hcd(fotg210), urb, status);
2339 - spin_lock(&fotg210->lock);
2340 -}
2341 -
2342 -static int qh_schedule(struct fotg210_hcd *fotg210, struct fotg210_qh *qh);
2343 -
2344 -/* Process and free completed qtds for a qh, returning URBs to drivers.
2345 - * Chases up to qh->hw_current. Returns number of completions called,
2346 - * indicating how much "real" work we did.
2347 - */
2348 -static unsigned qh_completions(struct fotg210_hcd *fotg210,
2349 - struct fotg210_qh *qh)
2350 -{
2351 - struct fotg210_qtd *last, *end = qh->dummy;
2352 - struct fotg210_qtd *qtd, *tmp;
2353 - int last_status;
2354 - int stopped;
2355 - unsigned count = 0;
2356 - u8 state;
2357 - struct fotg210_qh_hw *hw = qh->hw;
2358 -
2359 - if (unlikely(list_empty(&qh->qtd_list)))
2360 - return count;
2361 -
2362 - /* completions (or tasks on other cpus) must never clobber HALT
2363 - * till we've gone through and cleaned everything up, even when
2364 - * they add urbs to this qh's queue or mark them for unlinking.
2365 - *
2366 - * NOTE: unlinking expects to be done in queue order.
2367 - *
2368 - * It's a bug for qh->qh_state to be anything other than
2369 - * QH_STATE_IDLE, unless our caller is scan_async() or
2370 - * scan_intr().
2371 - */
2372 - state = qh->qh_state;
2373 - qh->qh_state = QH_STATE_COMPLETING;
2374 - stopped = (state == QH_STATE_IDLE);
2375 -
2376 -rescan:
2377 - last = NULL;
2378 - last_status = -EINPROGRESS;
2379 - qh->needs_rescan = 0;
2380 -
2381 - /* remove de-activated QTDs from front of queue.
2382 - * after faults (including short reads), cleanup this urb
2383 - * then let the queue advance.
2384 - * if queue is stopped, handles unlinks.
2385 - */
2386 - list_for_each_entry_safe(qtd, tmp, &qh->qtd_list, qtd_list) {
2387 - struct urb *urb;
2388 - u32 token = 0;
2389 -
2390 - urb = qtd->urb;
2391 -
2392 - /* clean up any state from previous QTD ...*/
2393 - if (last) {
2394 - if (likely(last->urb != urb)) {
2395 - fotg210_urb_done(fotg210, last->urb,
2396 - last_status);
2397 - count++;
2398 - last_status = -EINPROGRESS;
2399 - }
2400 - fotg210_qtd_free(fotg210, last);
2401 - last = NULL;
2402 - }
2403 -
2404 - /* ignore urbs submitted during completions we reported */
2405 - if (qtd == end)
2406 - break;
2407 -
2408 - /* hardware copies qtd out of qh overlay */
2409 - rmb();
2410 - token = hc32_to_cpu(fotg210, qtd->hw_token);
2411 -
2412 - /* always clean up qtds the hc de-activated */
2413 -retry_xacterr:
2414 - if ((token & QTD_STS_ACTIVE) == 0) {
2415 -
2416 - /* Report Data Buffer Error: non-fatal but useful */
2417 - if (token & QTD_STS_DBE)
2418 - fotg210_dbg(fotg210,
2419 - "detected DataBufferErr for urb %p ep%d%s len %d, qtd %p [qh %p]\n",
2420 - urb, usb_endpoint_num(&urb->ep->desc),
2421 - usb_endpoint_dir_in(&urb->ep->desc)
2422 - ? "in" : "out",
2423 - urb->transfer_buffer_length, qtd, qh);
2424 -
2425 - /* on STALL, error, and short reads this urb must
2426 - * complete and all its qtds must be recycled.
2427 - */
2428 - if ((token & QTD_STS_HALT) != 0) {
2429 -
2430 - /* retry transaction errors until we
2431 - * reach the software xacterr limit
2432 - */
2433 - if ((token & QTD_STS_XACT) &&
2434 - QTD_CERR(token) == 0 &&
2435 - ++qh->xacterrs < QH_XACTERR_MAX &&
2436 - !urb->unlinked) {
2437 - fotg210_dbg(fotg210,
2438 - "detected XactErr len %zu/%zu retry %d\n",
2439 - qtd->length - QTD_LENGTH(token),
2440 - qtd->length,
2441 - qh->xacterrs);
2442 -
2443 - /* reset the token in the qtd and the
2444 - * qh overlay (which still contains
2445 - * the qtd) so that we pick up from
2446 - * where we left off
2447 - */
2448 - token &= ~QTD_STS_HALT;
2449 - token |= QTD_STS_ACTIVE |
2450 - (FOTG210_TUNE_CERR << 10);
2451 - qtd->hw_token = cpu_to_hc32(fotg210,
2452 - token);
2453 - wmb();
2454 - hw->hw_token = cpu_to_hc32(fotg210,
2455 - token);
2456 - goto retry_xacterr;
2457 - }
2458 - stopped = 1;
2459 -
2460 - /* magic dummy for some short reads; qh won't advance.
2461 - * that silicon quirk can kick in with this dummy too.
2462 - *
2463 - * other short reads won't stop the queue, including
2464 - * control transfers (status stage handles that) or
2465 - * most other single-qtd reads ... the queue stops if
2466 - * URB_SHORT_NOT_OK was set so the driver submitting
2467 - * the urbs could clean it up.
2468 - */
2469 - } else if (IS_SHORT_READ(token) &&
2470 - !(qtd->hw_alt_next &
2471 - FOTG210_LIST_END(fotg210))) {
2472 - stopped = 1;
2473 - }
2474 -
2475 - /* stop scanning when we reach qtds the hc is using */
2476 - } else if (likely(!stopped
2477 - && fotg210->rh_state >= FOTG210_RH_RUNNING)) {
2478 - break;
2479 -
2480 - /* scan the whole queue for unlinks whenever it stops */
2481 - } else {
2482 - stopped = 1;
2483 -
2484 - /* cancel everything if we halt, suspend, etc */
2485 - if (fotg210->rh_state < FOTG210_RH_RUNNING)
2486 - last_status = -ESHUTDOWN;
2487 -
2488 - /* this qtd is active; skip it unless a previous qtd
2489 - * for its urb faulted, or its urb was canceled.
2490 - */
2491 - else if (last_status == -EINPROGRESS && !urb->unlinked)
2492 - continue;
2493 -
2494 - /* qh unlinked; token in overlay may be most current */
2495 - if (state == QH_STATE_IDLE &&
2496 - cpu_to_hc32(fotg210, qtd->qtd_dma)
2497 - == hw->hw_current) {
2498 - token = hc32_to_cpu(fotg210, hw->hw_token);
2499 -
2500 - /* An unlink may leave an incomplete
2501 - * async transaction in the TT buffer.
2502 - * We have to clear it.
2503 - */
2504 - fotg210_clear_tt_buffer(fotg210, qh, urb,
2505 - token);
2506 - }
2507 - }
2508 -
2509 - /* unless we already know the urb's status, collect qtd status
2510 - * and update count of bytes transferred. in common short read
2511 - * cases with only one data qtd (including control transfers),
2512 - * queue processing won't halt. but with two or more qtds (for
2513 - * example, with a 32 KB transfer), when the first qtd gets a
2514 - * short read the second must be removed by hand.
2515 - */
2516 - if (last_status == -EINPROGRESS) {
2517 - last_status = qtd_copy_status(fotg210, urb,
2518 - qtd->length, token);
2519 - if (last_status == -EREMOTEIO &&
2520 - (qtd->hw_alt_next &
2521 - FOTG210_LIST_END(fotg210)))
2522 - last_status = -EINPROGRESS;
2523 -
2524 - /* As part of low/full-speed endpoint-halt processing
2525 - * we must clear the TT buffer (11.17.5).
2526 - */
2527 - if (unlikely(last_status != -EINPROGRESS &&
2528 - last_status != -EREMOTEIO)) {
2529 - /* The TT's in some hubs malfunction when they
2530 - * receive this request following a STALL (they
2531 - * stop sending isochronous packets). Since a
2532 - * STALL can't leave the TT buffer in a busy
2533 - * state (if you believe Figures 11-48 - 11-51
2534 - * in the USB 2.0 spec), we won't clear the TT
2535 - * buffer in this case. Strictly speaking this
2536 - * is a violation of the spec.
2537 - */
2538 - if (last_status != -EPIPE)
2539 - fotg210_clear_tt_buffer(fotg210, qh,
2540 - urb, token);
2541 - }
2542 - }
2543 -
2544 - /* if we're removing something not at the queue head,
2545 - * patch the hardware queue pointer.
2546 - */
2547 - if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
2548 - last = list_entry(qtd->qtd_list.prev,
2549 - struct fotg210_qtd, qtd_list);
2550 - last->hw_next = qtd->hw_next;
2551 - }
2552 -
2553 - /* remove qtd; it's recycled after possible urb completion */
2554 - list_del(&qtd->qtd_list);
2555 - last = qtd;
2556 -
2557 - /* reinit the xacterr counter for the next qtd */
2558 - qh->xacterrs = 0;
2559 - }
2560 -
2561 - /* last urb's completion might still need calling */
2562 - if (likely(last != NULL)) {
2563 - fotg210_urb_done(fotg210, last->urb, last_status);
2564 - count++;
2565 - fotg210_qtd_free(fotg210, last);
2566 - }
2567 -
2568 - /* Do we need to rescan for URBs dequeued during a giveback? */
2569 - if (unlikely(qh->needs_rescan)) {
2570 - /* If the QH is already unlinked, do the rescan now. */
2571 - if (state == QH_STATE_IDLE)
2572 - goto rescan;
2573 -
2574 - /* Otherwise we have to wait until the QH is fully unlinked.
2575 - * Our caller will start an unlink if qh->needs_rescan is
2576 - * set. But if an unlink has already started, nothing needs
2577 - * to be done.
2578 - */
2579 - if (state != QH_STATE_LINKED)
2580 - qh->needs_rescan = 0;
2581 - }
2582 -
2583 - /* restore original state; caller must unlink or relink */
2584 - qh->qh_state = state;
2585 -
2586 - /* be sure the hardware's done with the qh before refreshing
2587 - * it after fault cleanup, or recovering from silicon wrongly
2588 - * overlaying the dummy qtd (which reduces DMA chatter).
2589 - */
2590 - if (stopped != 0 || hw->hw_qtd_next == FOTG210_LIST_END(fotg210)) {
2591 - switch (state) {
2592 - case QH_STATE_IDLE:
2593 - qh_refresh(fotg210, qh);
2594 - break;
2595 - case QH_STATE_LINKED:
2596 - /* We won't refresh a QH that's linked (after the HC
2597 - * stopped the queue). That avoids a race:
2598 - * - HC reads first part of QH;
2599 - * - CPU updates that first part and the token;
2600 - * - HC reads rest of that QH, including token
2601 - * Result: HC gets an inconsistent image, and then
2602 - * DMAs to/from the wrong memory (corrupting it).
2603 - *
2604 - * That should be rare for interrupt transfers,
2605 - * except maybe high bandwidth ...
2606 - */
2607 -
2608 - /* Tell the caller to start an unlink */
2609 - qh->needs_rescan = 1;
2610 - break;
2611 - /* otherwise, unlink already started */
2612 - }
2613 - }
2614 -
2615 - return count;
2616 -}
2617 -
2618 -/* reverse of qh_urb_transaction: free a list of TDs.
2619 - * used for cleanup after errors, before HC sees an URB's TDs.
2620 - */
2621 -static void qtd_list_free(struct fotg210_hcd *fotg210, struct urb *urb,
2622 - struct list_head *head)
2623 -{
2624 - struct fotg210_qtd *qtd, *temp;
2625 -
2626 - list_for_each_entry_safe(qtd, temp, head, qtd_list) {
2627 - list_del(&qtd->qtd_list);
2628 - fotg210_qtd_free(fotg210, qtd);
2629 - }
2630 -}
2631 -
2632 -/* create a list of filled qtds for this URB; won't link into qh.
2633 - */
2634 -static struct list_head *qh_urb_transaction(struct fotg210_hcd *fotg210,
2635 - struct urb *urb, struct list_head *head, gfp_t flags)
2636 -{
2637 - struct fotg210_qtd *qtd, *qtd_prev;
2638 - dma_addr_t buf;
2639 - int len, this_sg_len, maxpacket;
2640 - int is_input;
2641 - u32 token;
2642 - int i;
2643 - struct scatterlist *sg;
2644 -
2645 - /*
2646 - * URBs map to sequences of QTDs: one logical transaction
2647 - */
2648 - qtd = fotg210_qtd_alloc(fotg210, flags);
2649 - if (unlikely(!qtd))
2650 - return NULL;
2651 - list_add_tail(&qtd->qtd_list, head);
2652 - qtd->urb = urb;
2653 -
2654 - token = QTD_STS_ACTIVE;
2655 - token |= (FOTG210_TUNE_CERR << 10);
2656 - /* for split transactions, SplitXState initialized to zero */
2657 -
2658 - len = urb->transfer_buffer_length;
2659 - is_input = usb_pipein(urb->pipe);
2660 - if (usb_pipecontrol(urb->pipe)) {
2661 - /* SETUP pid */
2662 - qtd_fill(fotg210, qtd, urb->setup_dma,
2663 - sizeof(struct usb_ctrlrequest),
2664 - token | (2 /* "setup" */ << 8), 8);
2665 -
2666 - /* ... and always at least one more pid */
2667 - token ^= QTD_TOGGLE;
2668 - qtd_prev = qtd;
2669 - qtd = fotg210_qtd_alloc(fotg210, flags);
2670 - if (unlikely(!qtd))
2671 - goto cleanup;
2672 - qtd->urb = urb;
2673 - qtd_prev->hw_next = QTD_NEXT(fotg210, qtd->qtd_dma);
2674 - list_add_tail(&qtd->qtd_list, head);
2675 -
2676 - /* for zero length DATA stages, STATUS is always IN */
2677 - if (len == 0)
2678 - token |= (1 /* "in" */ << 8);
2679 - }
2680 -
2681 - /*
2682 - * data transfer stage: buffer setup
2683 - */
2684 - i = urb->num_mapped_sgs;
2685 - if (len > 0 && i > 0) {
2686 - sg = urb->sg;
2687 - buf = sg_dma_address(sg);
2688 -
2689 - /* urb->transfer_buffer_length may be smaller than the
2690 - * size of the scatterlist (or vice versa)
2691 - */
2692 - this_sg_len = min_t(int, sg_dma_len(sg), len);
2693 - } else {
2694 - sg = NULL;
2695 - buf = urb->transfer_dma;
2696 - this_sg_len = len;
2697 - }
2698 -
2699 - if (is_input)
2700 - token |= (1 /* "in" */ << 8);
2701 - /* else it's already initted to "out" pid (0 << 8) */
2702 -
2703 - maxpacket = usb_maxpacket(urb->dev, urb->pipe);
2704 -
2705 - /*
2706 - * buffer gets wrapped in one or more qtds;
2707 - * last one may be "short" (including zero len)
2708 - * and may serve as a control status ack
2709 - */
2710 - for (;;) {
2711 - int this_qtd_len;
2712 -
2713 - this_qtd_len = qtd_fill(fotg210, qtd, buf, this_sg_len, token,
2714 - maxpacket);
2715 - this_sg_len -= this_qtd_len;
2716 - len -= this_qtd_len;
2717 - buf += this_qtd_len;
2718 -
2719 - /*
2720 - * short reads advance to a "magic" dummy instead of the next
2721 - * qtd ... that forces the queue to stop, for manual cleanup.
2722 - * (this will usually be overridden later.)
2723 - */
2724 - if (is_input)
2725 - qtd->hw_alt_next = fotg210->async->hw->hw_alt_next;
2726 -
2727 - /* qh makes control packets use qtd toggle; maybe switch it */
2728 - if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
2729 - token ^= QTD_TOGGLE;
2730 -
2731 - if (likely(this_sg_len <= 0)) {
2732 - if (--i <= 0 || len <= 0)
2733 - break;
2734 - sg = sg_next(sg);
2735 - buf = sg_dma_address(sg);
2736 - this_sg_len = min_t(int, sg_dma_len(sg), len);
2737 - }
2738 -
2739 - qtd_prev = qtd;
2740 - qtd = fotg210_qtd_alloc(fotg210, flags);
2741 - if (unlikely(!qtd))
2742 - goto cleanup;
2743 - qtd->urb = urb;
2744 - qtd_prev->hw_next = QTD_NEXT(fotg210, qtd->qtd_dma);
2745 - list_add_tail(&qtd->qtd_list, head);
2746 - }
2747 -
2748 - /*
2749 - * unless the caller requires manual cleanup after short reads,
2750 - * have the alt_next mechanism keep the queue running after the
2751 - * last data qtd (the only one, for control and most other cases).
2752 - */
2753 - if (likely((urb->transfer_flags & URB_SHORT_NOT_OK) == 0 ||
2754 - usb_pipecontrol(urb->pipe)))
2755 - qtd->hw_alt_next = FOTG210_LIST_END(fotg210);
2756 -
2757 - /*
2758 - * control requests may need a terminating data "status" ack;
2759 - * other OUT ones may need a terminating short packet
2760 - * (zero length).
2761 - */
2762 - if (likely(urb->transfer_buffer_length != 0)) {
2763 - int one_more = 0;
2764 -
2765 - if (usb_pipecontrol(urb->pipe)) {
2766 - one_more = 1;
2767 - token ^= 0x0100; /* "in" <--> "out" */
2768 - token |= QTD_TOGGLE; /* force DATA1 */
2769 - } else if (usb_pipeout(urb->pipe)
2770 - && (urb->transfer_flags & URB_ZERO_PACKET)
2771 - && !(urb->transfer_buffer_length % maxpacket)) {
2772 - one_more = 1;
2773 - }
2774 - if (one_more) {
2775 - qtd_prev = qtd;
2776 - qtd = fotg210_qtd_alloc(fotg210, flags);
2777 - if (unlikely(!qtd))
2778 - goto cleanup;
2779 - qtd->urb = urb;
2780 - qtd_prev->hw_next = QTD_NEXT(fotg210, qtd->qtd_dma);
2781 - list_add_tail(&qtd->qtd_list, head);
2782 -
2783 - /* never any data in such packets */
2784 - qtd_fill(fotg210, qtd, 0, 0, token, 0);
2785 - }
2786 - }
2787 -
2788 - /* by default, enable interrupt on urb completion */
2789 - if (likely(!(urb->transfer_flags & URB_NO_INTERRUPT)))
2790 - qtd->hw_token |= cpu_to_hc32(fotg210, QTD_IOC);
2791 - return head;
2792 -
2793 -cleanup:
2794 - qtd_list_free(fotg210, urb, head);
2795 - return NULL;
2796 -}
2797 -
2798 -/* Would be best to create all qh's from config descriptors,
2799 - * when each interface/altsetting is established. Unlink
2800 - * any previous qh and cancel its urbs first; endpoints are
2801 - * implicitly reset then (data toggle too).
2802 - * That'd mean updating how usbcore talks to HCDs. (2.7?)
2803 - */
2804 -
2805 -
2806 -/* Each QH holds a qtd list; a QH is used for everything except iso.
2807 - *
2808 - * For interrupt urbs, the scheduler must set the microframe scheduling
2809 - * mask(s) each time the QH gets scheduled. For highspeed, that's
2810 - * just one microframe in the s-mask. For split interrupt transactions
2811 - * there are additional complications: c-mask, maybe FSTNs.
2812 - */
2813 -static struct fotg210_qh *qh_make(struct fotg210_hcd *fotg210, struct urb *urb,
2814 - gfp_t flags)
2815 -{
2816 - struct fotg210_qh *qh = fotg210_qh_alloc(fotg210, flags);
2817 - struct usb_host_endpoint *ep;
2818 - u32 info1 = 0, info2 = 0;
2819 - int is_input, type;
2820 - int maxp = 0;
2821 - int mult;
2822 - struct usb_tt *tt = urb->dev->tt;
2823 - struct fotg210_qh_hw *hw;
2824 -
2825 - if (!qh)
2826 - return qh;
2827 -
2828 - /*
2829 - * init endpoint/device data for this QH
2830 - */
2831 - info1 |= usb_pipeendpoint(urb->pipe) << 8;
2832 - info1 |= usb_pipedevice(urb->pipe) << 0;
2833 -
2834 - is_input = usb_pipein(urb->pipe);
2835 - type = usb_pipetype(urb->pipe);
2836 - ep = usb_pipe_endpoint(urb->dev, urb->pipe);
2837 - maxp = usb_endpoint_maxp(&ep->desc);
2838 - mult = usb_endpoint_maxp_mult(&ep->desc);
2839 -
2840 - /* 1024 byte maxpacket is a hardware ceiling. High bandwidth
2841 - * acts like up to 3KB, but is built from smaller packets.
2842 - */
2843 - if (maxp > 1024) {
2844 - fotg210_dbg(fotg210, "bogus qh maxpacket %d\n", maxp);
2845 - goto done;
2846 - }
2847 -
2848 - /* Compute interrupt scheduling parameters just once, and save.
2849 - * - allowing for high bandwidth, how many nsec/uframe are used?
2850 - * - split transactions need a second CSPLIT uframe; same question
2851 - * - splits also need a schedule gap (for full/low speed I/O)
2852 - * - qh has a polling interval
2853 - *
2854 - * For control/bulk requests, the HC or TT handles these.
2855 - */
2856 - if (type == PIPE_INTERRUPT) {
2857 - qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
2858 - is_input, 0, mult * maxp));
2859 - qh->start = NO_FRAME;
2860 -
2861 - if (urb->dev->speed == USB_SPEED_HIGH) {
2862 - qh->c_usecs = 0;
2863 - qh->gap_uf = 0;
2864 -
2865 - qh->period = urb->interval >> 3;
2866 - if (qh->period == 0 && urb->interval != 1) {
2867 - /* NOTE interval 2 or 4 uframes could work.
2868 - * But interval 1 scheduling is simpler, and
2869 - * includes high bandwidth.
2870 - */
2871 - urb->interval = 1;
2872 - } else if (qh->period > fotg210->periodic_size) {
2873 - qh->period = fotg210->periodic_size;
2874 - urb->interval = qh->period << 3;
2875 - }
2876 - } else {
2877 - int think_time;
2878 -
2879 - /* gap is f(FS/LS transfer times) */
2880 - qh->gap_uf = 1 + usb_calc_bus_time(urb->dev->speed,
2881 - is_input, 0, maxp) / (125 * 1000);
2882 -
2883 - /* FIXME this just approximates SPLIT/CSPLIT times */
2884 - if (is_input) { /* SPLIT, gap, CSPLIT+DATA */
2885 - qh->c_usecs = qh->usecs + HS_USECS(0);
2886 - qh->usecs = HS_USECS(1);
2887 - } else { /* SPLIT+DATA, gap, CSPLIT */
2888 - qh->usecs += HS_USECS(1);
2889 - qh->c_usecs = HS_USECS(0);
2890 - }
2891 -
2892 - think_time = tt ? tt->think_time : 0;
2893 - qh->tt_usecs = NS_TO_US(think_time +
2894 - usb_calc_bus_time(urb->dev->speed,
2895 - is_input, 0, maxp));
2896 - qh->period = urb->interval;
2897 - if (qh->period > fotg210->periodic_size) {
2898 - qh->period = fotg210->periodic_size;
2899 - urb->interval = qh->period;
2900 - }
2901 - }
2902 - }
2903 -
2904 - /* support for tt scheduling, and access to toggles */
2905 - qh->dev = urb->dev;
2906 -
2907 - /* using TT? */
2908 - switch (urb->dev->speed) {
2909 - case USB_SPEED_LOW:
2910 - info1 |= QH_LOW_SPEED;
2911 - fallthrough;
2912 -
2913 - case USB_SPEED_FULL:
2914 - /* EPS 0 means "full" */
2915 - if (type != PIPE_INTERRUPT)
2916 - info1 |= (FOTG210_TUNE_RL_TT << 28);
2917 - if (type == PIPE_CONTROL) {
2918 - info1 |= QH_CONTROL_EP; /* for TT */
2919 - info1 |= QH_TOGGLE_CTL; /* toggle from qtd */
2920 - }
2921 - info1 |= maxp << 16;
2922 -
2923 - info2 |= (FOTG210_TUNE_MULT_TT << 30);
2924 -
2925 - /* Some Freescale processors have an erratum in which the
2926 - * port number in the queue head was 0..N-1 instead of 1..N.
2927 - */
2928 - if (fotg210_has_fsl_portno_bug(fotg210))
2929 - info2 |= (urb->dev->ttport-1) << 23;
2930 - else
2931 - info2 |= urb->dev->ttport << 23;
2932 -
2933 - /* set the address of the TT; for TDI's integrated
2934 - * root hub tt, leave it zeroed.
2935 - */
2936 - if (tt && tt->hub != fotg210_to_hcd(fotg210)->self.root_hub)
2937 - info2 |= tt->hub->devnum << 16;
2938 -
2939 - /* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */
2940 -
2941 - break;
2942 -
2943 - case USB_SPEED_HIGH: /* no TT involved */
2944 - info1 |= QH_HIGH_SPEED;
2945 - if (type == PIPE_CONTROL) {
2946 - info1 |= (FOTG210_TUNE_RL_HS << 28);
2947 - info1 |= 64 << 16; /* usb2 fixed maxpacket */
2948 - info1 |= QH_TOGGLE_CTL; /* toggle from qtd */
2949 - info2 |= (FOTG210_TUNE_MULT_HS << 30);
2950 - } else if (type == PIPE_BULK) {
2951 - info1 |= (FOTG210_TUNE_RL_HS << 28);
2952 - /* The USB spec says that high speed bulk endpoints
2953 - * always use 512 byte maxpacket. But some device
2954 - * vendors decided to ignore that, and MSFT is happy
2955 - * to help them do so. So now people expect to use
2956 - * such nonconformant devices with Linux too; sigh.
2957 - */
2958 - info1 |= maxp << 16;
2959 - info2 |= (FOTG210_TUNE_MULT_HS << 30);
2960 - } else { /* PIPE_INTERRUPT */
2961 - info1 |= maxp << 16;
2962 - info2 |= mult << 30;
2963 - }
2964 - break;
2965 - default:
2966 - fotg210_dbg(fotg210, "bogus dev %p speed %d\n", urb->dev,
2967 - urb->dev->speed);
2968 -done:
2969 - qh_destroy(fotg210, qh);
2970 - return NULL;
2971 - }
2972 -
2973 - /* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */
2974 -
2975 - /* init as live, toggle clear, advance to dummy */
2976 - qh->qh_state = QH_STATE_IDLE;
2977 - hw = qh->hw;
2978 - hw->hw_info1 = cpu_to_hc32(fotg210, info1);
2979 - hw->hw_info2 = cpu_to_hc32(fotg210, info2);
2980 - qh->is_out = !is_input;
2981 - usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), !is_input, 1);
2982 - qh_refresh(fotg210, qh);
2983 - return qh;
2984 -}
2985 -
2986 -static void enable_async(struct fotg210_hcd *fotg210)
2987 -{
2988 - if (fotg210->async_count++)
2989 - return;
2990 -
2991 - /* Stop waiting to turn off the async schedule */
2992 - fotg210->enabled_hrtimer_events &= ~BIT(FOTG210_HRTIMER_DISABLE_ASYNC);
2993 -
2994 - /* Don't start the schedule until ASS is 0 */
2995 - fotg210_poll_ASS(fotg210);
2996 - turn_on_io_watchdog(fotg210);
2997 -}
2998 -
2999 -static void disable_async(struct fotg210_hcd *fotg210)
3000 -{
3001 - if (--fotg210->async_count)
3002 - return;
3003 -
3004 - /* The async schedule and async_unlink list are supposed to be empty */
3005 - WARN_ON(fotg210->async->qh_next.qh || fotg210->async_unlink);
3006 -
3007 - /* Don't turn off the schedule until ASS is 1 */
3008 - fotg210_poll_ASS(fotg210);
3009 -}
3010 -
3011 -/* move qh (and its qtds) onto async queue; maybe enable queue. */
3012 -
3013 -static void qh_link_async(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
3014 -{
3015 - __hc32 dma = QH_NEXT(fotg210, qh->qh_dma);
3016 - struct fotg210_qh *head;
3017 -
3018 - /* Don't link a QH if there's a Clear-TT-Buffer pending */
3019 - if (unlikely(qh->clearing_tt))
3020 - return;
3021 -
3022 - WARN_ON(qh->qh_state != QH_STATE_IDLE);
3023 -
3024 - /* clear halt and/or toggle; and maybe recover from silicon quirk */
3025 - qh_refresh(fotg210, qh);
3026 -
3027 - /* splice right after start */
3028 - head = fotg210->async;
3029 - qh->qh_next = head->qh_next;
3030 - qh->hw->hw_next = head->hw->hw_next;
3031 - wmb();
3032 -
3033 - head->qh_next.qh = qh;
3034 - head->hw->hw_next = dma;
3035 -
3036 - qh->xacterrs = 0;
3037 - qh->qh_state = QH_STATE_LINKED;
3038 - /* qtd completions reported later by interrupt */
3039 -
3040 - enable_async(fotg210);
3041 -}
3042 -
3043 -/* For control/bulk/interrupt, return QH with these TDs appended.
3044 - * Allocates and initializes the QH if necessary.
3045 - * Returns null if it can't allocate a QH it needs to.
3046 - * If the QH has TDs (urbs) already, that's great.
3047 - */
3048 -static struct fotg210_qh *qh_append_tds(struct fotg210_hcd *fotg210,
3049 - struct urb *urb, struct list_head *qtd_list,
3050 - int epnum, void **ptr)
3051 -{
3052 - struct fotg210_qh *qh = NULL;
3053 - __hc32 qh_addr_mask = cpu_to_hc32(fotg210, 0x7f);
3054 -
3055 - qh = (struct fotg210_qh *) *ptr;
3056 - if (unlikely(qh == NULL)) {
3057 - /* can't sleep here, we have fotg210->lock... */
3058 - qh = qh_make(fotg210, urb, GFP_ATOMIC);
3059 - *ptr = qh;
3060 - }
3061 - if (likely(qh != NULL)) {
3062 - struct fotg210_qtd *qtd;
3063 -
3064 - if (unlikely(list_empty(qtd_list)))
3065 - qtd = NULL;
3066 - else
3067 - qtd = list_entry(qtd_list->next, struct fotg210_qtd,
3068 - qtd_list);
3069 -
3070 - /* control qh may need patching ... */
3071 - if (unlikely(epnum == 0)) {
3072 - /* usb_reset_device() briefly reverts to address 0 */
3073 - if (usb_pipedevice(urb->pipe) == 0)
3074 - qh->hw->hw_info1 &= ~qh_addr_mask;
3075 - }
3076 -
3077 - /* just one way to queue requests: swap with the dummy qtd.
3078 - * only hc or qh_refresh() ever modify the overlay.
3079 - */
3080 - if (likely(qtd != NULL)) {
3081 - struct fotg210_qtd *dummy;
3082 - dma_addr_t dma;
3083 - __hc32 token;
3084 -
3085 - /* to avoid racing the HC, use the dummy td instead of
3086 - * the first td of our list (becomes new dummy). both
3087 - * tds stay deactivated until we're done, when the
3088 - * HC is allowed to fetch the old dummy (4.10.2).
3089 - */
3090 - token = qtd->hw_token;
3091 - qtd->hw_token = HALT_BIT(fotg210);
3092 -
3093 - dummy = qh->dummy;
3094 -
3095 - dma = dummy->qtd_dma;
3096 - *dummy = *qtd;
3097 - dummy->qtd_dma = dma;
3098 -
3099 - list_del(&qtd->qtd_list);
3100 - list_add(&dummy->qtd_list, qtd_list);
3101 - list_splice_tail(qtd_list, &qh->qtd_list);
3102 -
3103 - fotg210_qtd_init(fotg210, qtd, qtd->qtd_dma);
3104 - qh->dummy = qtd;
3105 -
3106 - /* hc must see the new dummy at list end */
3107 - dma = qtd->qtd_dma;
3108 - qtd = list_entry(qh->qtd_list.prev,
3109 - struct fotg210_qtd, qtd_list);
3110 - qtd->hw_next = QTD_NEXT(fotg210, dma);
3111 -
3112 - /* let the hc process these next qtds */
3113 - wmb();
3114 - dummy->hw_token = token;
3115 -
3116 - urb->hcpriv = qh;
3117 - }
3118 - }
3119 - return qh;
3120 -}
3121 -
3122 -static int submit_async(struct fotg210_hcd *fotg210, struct urb *urb,
3123 - struct list_head *qtd_list, gfp_t mem_flags)
3124 -{
3125 - int epnum;
3126 - unsigned long flags;
3127 - struct fotg210_qh *qh = NULL;
3128 - int rc;
3129 -
3130 - epnum = urb->ep->desc.bEndpointAddress;
3131 -
3132 -#ifdef FOTG210_URB_TRACE
3133 - {
3134 - struct fotg210_qtd *qtd;
3135 -
3136 - qtd = list_entry(qtd_list->next, struct fotg210_qtd, qtd_list);
3137 - fotg210_dbg(fotg210,
3138 - "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
3139 - __func__, urb->dev->devpath, urb,
3140 - epnum & 0x0f, (epnum & USB_DIR_IN)
3141 - ? "in" : "out",
3142 - urb->transfer_buffer_length,
3143 - qtd, urb->ep->hcpriv);
3144 - }
3145 -#endif
3146 -
3147 - spin_lock_irqsave(&fotg210->lock, flags);
3148 - if (unlikely(!HCD_HW_ACCESSIBLE(fotg210_to_hcd(fotg210)))) {
3149 - rc = -ESHUTDOWN;
3150 - goto done;
3151 - }
3152 - rc = usb_hcd_link_urb_to_ep(fotg210_to_hcd(fotg210), urb);
3153 - if (unlikely(rc))
3154 - goto done;
3155 -
3156 - qh = qh_append_tds(fotg210, urb, qtd_list, epnum, &urb->ep->hcpriv);
3157 - if (unlikely(qh == NULL)) {
3158 - usb_hcd_unlink_urb_from_ep(fotg210_to_hcd(fotg210), urb);
3159 - rc = -ENOMEM;
3160 - goto done;
3161 - }
3162 -
3163 - /* Control/bulk operations through TTs don't need scheduling,
3164 - * the HC and TT handle it when the TT has a buffer ready.
3165 - */
3166 - if (likely(qh->qh_state == QH_STATE_IDLE))
3167 - qh_link_async(fotg210, qh);
3168 -done:
3169 - spin_unlock_irqrestore(&fotg210->lock, flags);
3170 - if (unlikely(qh == NULL))
3171 - qtd_list_free(fotg210, urb, qtd_list);
3172 - return rc;
3173 -}
3174 -
3175 -static void single_unlink_async(struct fotg210_hcd *fotg210,
3176 - struct fotg210_qh *qh)
3177 -{
3178 - struct fotg210_qh *prev;
3179 -
3180 - /* Add to the end of the list of QHs waiting for the next IAAD */
3181 - qh->qh_state = QH_STATE_UNLINK;
3182 - if (fotg210->async_unlink)
3183 - fotg210->async_unlink_last->unlink_next = qh;
3184 - else
3185 - fotg210->async_unlink = qh;
3186 - fotg210->async_unlink_last = qh;
3187 -
3188 - /* Unlink it from the schedule */
3189 - prev = fotg210->async;
3190 - while (prev->qh_next.qh != qh)
3191 - prev = prev->qh_next.qh;
3192 -
3193 - prev->hw->hw_next = qh->hw->hw_next;
3194 - prev->qh_next = qh->qh_next;
3195 - if (fotg210->qh_scan_next == qh)
3196 - fotg210->qh_scan_next = qh->qh_next.qh;
3197 -}
3198 -
3199 -static void start_iaa_cycle(struct fotg210_hcd *fotg210, bool nested)
3200 -{
3201 - /*
3202 - * Do nothing if an IAA cycle is already running or
3203 - * if one will be started shortly.
3204 - */
3205 - if (fotg210->async_iaa || fotg210->async_unlinking)
3206 - return;
3207 -
3208 - /* Do all the waiting QHs at once */
3209 - fotg210->async_iaa = fotg210->async_unlink;
3210 - fotg210->async_unlink = NULL;
3211 -
3212 - /* If the controller isn't running, we don't have to wait for it */
3213 - if (unlikely(fotg210->rh_state < FOTG210_RH_RUNNING)) {
3214 - if (!nested) /* Avoid recursion */
3215 - end_unlink_async(fotg210);
3216 -
3217 - /* Otherwise start a new IAA cycle */
3218 - } else if (likely(fotg210->rh_state == FOTG210_RH_RUNNING)) {
3219 - /* Make sure the unlinks are all visible to the hardware */
3220 - wmb();
3221 -
3222 - fotg210_writel(fotg210, fotg210->command | CMD_IAAD,
3223 - &fotg210->regs->command);
3224 - fotg210_readl(fotg210, &fotg210->regs->command);
3225 - fotg210_enable_event(fotg210, FOTG210_HRTIMER_IAA_WATCHDOG,
3226 - true);
3227 - }
3228 -}
3229 -
3230 -/* the async qh for the qtds being unlinked are now gone from the HC */
3231 -
3232 -static void end_unlink_async(struct fotg210_hcd *fotg210)
3233 -{
3234 - struct fotg210_qh *qh;
3235 -
3236 - /* Process the idle QHs */
3237 -restart:
3238 - fotg210->async_unlinking = true;
3239 - while (fotg210->async_iaa) {
3240 - qh = fotg210->async_iaa;
3241 - fotg210->async_iaa = qh->unlink_next;
3242 - qh->unlink_next = NULL;
3243 -
3244 - qh->qh_state = QH_STATE_IDLE;
3245 - qh->qh_next.qh = NULL;
3246 -
3247 - qh_completions(fotg210, qh);
3248 - if (!list_empty(&qh->qtd_list) &&
3249 - fotg210->rh_state == FOTG210_RH_RUNNING)
3250 - qh_link_async(fotg210, qh);
3251 - disable_async(fotg210);
3252 - }
3253 - fotg210->async_unlinking = false;
3254 -
3255 - /* Start a new IAA cycle if any QHs are waiting for it */
3256 - if (fotg210->async_unlink) {
3257 - start_iaa_cycle(fotg210, true);
3258 - if (unlikely(fotg210->rh_state < FOTG210_RH_RUNNING))
3259 - goto restart;
3260 - }
3261 -}
3262 -
3263 -static void unlink_empty_async(struct fotg210_hcd *fotg210)
3264 -{
3265 - struct fotg210_qh *qh, *next;
3266 - bool stopped = (fotg210->rh_state < FOTG210_RH_RUNNING);
3267 - bool check_unlinks_later = false;
3268 -
3269 - /* Unlink all the async QHs that have been empty for a timer cycle */
3270 - next = fotg210->async->qh_next.qh;
3271 - while (next) {
3272 - qh = next;
3273 - next = qh->qh_next.qh;
3274 -
3275 - if (list_empty(&qh->qtd_list) &&
3276 - qh->qh_state == QH_STATE_LINKED) {
3277 - if (!stopped && qh->unlink_cycle ==
3278 - fotg210->async_unlink_cycle)
3279 - check_unlinks_later = true;
3280 - else
3281 - single_unlink_async(fotg210, qh);
3282 - }
3283 - }
3284 -
3285 - /* Start a new IAA cycle if any QHs are waiting for it */
3286 - if (fotg210->async_unlink)
3287 - start_iaa_cycle(fotg210, false);
3288 -
3289 - /* QHs that haven't been empty for long enough will be handled later */
3290 - if (check_unlinks_later) {
3291 - fotg210_enable_event(fotg210, FOTG210_HRTIMER_ASYNC_UNLINKS,
3292 - true);
3293 - ++fotg210->async_unlink_cycle;
3294 - }
3295 -}
3296 -
3297 -/* makes sure the async qh will become idle */
3298 -/* caller must own fotg210->lock */
3299 -
3300 -static void start_unlink_async(struct fotg210_hcd *fotg210,
3301 - struct fotg210_qh *qh)
3302 -{
3303 - /*
3304 - * If the QH isn't linked then there's nothing we can do
3305 - * unless we were called during a giveback, in which case
3306 - * qh_completions() has to deal with it.
3307 - */
3308 - if (qh->qh_state != QH_STATE_LINKED) {
3309 - if (qh->qh_state == QH_STATE_COMPLETING)
3310 - qh->needs_rescan = 1;
3311 - return;
3312 - }
3313 -
3314 - single_unlink_async(fotg210, qh);
3315 - start_iaa_cycle(fotg210, false);
3316 -}
3317 -
3318 -static void scan_async(struct fotg210_hcd *fotg210)
3319 -{
3320 - struct fotg210_qh *qh;
3321 - bool check_unlinks_later = false;
3322 -
3323 - fotg210->qh_scan_next = fotg210->async->qh_next.qh;
3324 - while (fotg210->qh_scan_next) {
3325 - qh = fotg210->qh_scan_next;
3326 - fotg210->qh_scan_next = qh->qh_next.qh;
3327 -rescan:
3328 - /* clean any finished work for this qh */
3329 - if (!list_empty(&qh->qtd_list)) {
3330 - int temp;
3331 -
3332 - /*
3333 - * Unlinks could happen here; completion reporting
3334 - * drops the lock. That's why fotg210->qh_scan_next
3335 - * always holds the next qh to scan; if the next qh
3336 - * gets unlinked then fotg210->qh_scan_next is adjusted
3337 - * in single_unlink_async().
3338 - */
3339 - temp = qh_completions(fotg210, qh);
3340 - if (qh->needs_rescan) {
3341 - start_unlink_async(fotg210, qh);
3342 - } else if (list_empty(&qh->qtd_list)
3343 - && qh->qh_state == QH_STATE_LINKED) {
3344 - qh->unlink_cycle = fotg210->async_unlink_cycle;
3345 - check_unlinks_later = true;
3346 - } else if (temp != 0)
3347 - goto rescan;
3348 - }
3349 - }
3350 -
3351 - /*
3352 - * Unlink empty entries, reducing DMA usage as well
3353 - * as HCD schedule-scanning costs. Delay for any qh
3354 - * we just scanned, there's a not-unusual case that it
3355 - * doesn't stay idle for long.
3356 - */
3357 - if (check_unlinks_later && fotg210->rh_state == FOTG210_RH_RUNNING &&
3358 - !(fotg210->enabled_hrtimer_events &
3359 - BIT(FOTG210_HRTIMER_ASYNC_UNLINKS))) {
3360 - fotg210_enable_event(fotg210,
3361 - FOTG210_HRTIMER_ASYNC_UNLINKS, true);
3362 - ++fotg210->async_unlink_cycle;
3363 - }
3364 -}
3365 -/* EHCI scheduled transaction support: interrupt, iso, split iso
3366 - * These are called "periodic" transactions in the EHCI spec.
3367 - *
3368 - * Note that for interrupt transfers, the QH/QTD manipulation is shared
3369 - * with the "asynchronous" transaction support (control/bulk transfers).
3370 - * The only real difference is in how interrupt transfers are scheduled.
3371 - *
3372 - * For ISO, we make an "iso_stream" head to serve the same role as a QH.
3373 - * It keeps track of every ITD (or SITD) that's linked, and holds enough
3374 - * pre-calculated schedule data to make appending to the queue be quick.
3375 - */
3376 -static int fotg210_get_frame(struct usb_hcd *hcd);
3377 -
3378 -/* periodic_next_shadow - return "next" pointer on shadow list
3379 - * @periodic: host pointer to qh/itd
3380 - * @tag: hardware tag for type of this record
3381 - */
3382 -static union fotg210_shadow *periodic_next_shadow(struct fotg210_hcd *fotg210,
3383 - union fotg210_shadow *periodic, __hc32 tag)
3384 -{
3385 - switch (hc32_to_cpu(fotg210, tag)) {
3386 - case Q_TYPE_QH:
3387 - return &periodic->qh->qh_next;
3388 - case Q_TYPE_FSTN:
3389 - return &periodic->fstn->fstn_next;
3390 - default:
3391 - return &periodic->itd->itd_next;
3392 - }
3393 -}
3394 -
3395 -static __hc32 *shadow_next_periodic(struct fotg210_hcd *fotg210,
3396 - union fotg210_shadow *periodic, __hc32 tag)
3397 -{
3398 - switch (hc32_to_cpu(fotg210, tag)) {
3399 - /* our fotg210_shadow.qh is actually software part */
3400 - case Q_TYPE_QH:
3401 - return &periodic->qh->hw->hw_next;
3402 - /* others are hw parts */
3403 - default:
3404 - return periodic->hw_next;
3405 - }
3406 -}
3407 -
3408 -/* caller must hold fotg210->lock */
3409 -static void periodic_unlink(struct fotg210_hcd *fotg210, unsigned frame,
3410 - void *ptr)
3411 -{
3412 - union fotg210_shadow *prev_p = &fotg210->pshadow[frame];
3413 - __hc32 *hw_p = &fotg210->periodic[frame];
3414 - union fotg210_shadow here = *prev_p;
3415 -
3416 - /* find predecessor of "ptr"; hw and shadow lists are in sync */
3417 - while (here.ptr && here.ptr != ptr) {
3418 - prev_p = periodic_next_shadow(fotg210, prev_p,
3419 - Q_NEXT_TYPE(fotg210, *hw_p));
3420 - hw_p = shadow_next_periodic(fotg210, &here,
3421 - Q_NEXT_TYPE(fotg210, *hw_p));
3422 - here = *prev_p;
3423 - }
3424 - /* an interrupt entry (at list end) could have been shared */
3425 - if (!here.ptr)
3426 - return;
3427 -
3428 - /* update shadow and hardware lists ... the old "next" pointers
3429 - * from ptr may still be in use, the caller updates them.
3430 - */
3431 - *prev_p = *periodic_next_shadow(fotg210, &here,
3432 - Q_NEXT_TYPE(fotg210, *hw_p));
3433 -
3434 - *hw_p = *shadow_next_periodic(fotg210, &here,
3435 - Q_NEXT_TYPE(fotg210, *hw_p));
3436 -}
3437 -
3438 -/* how many of the uframe's 125 usecs are allocated? */
3439 -static unsigned short periodic_usecs(struct fotg210_hcd *fotg210,
3440 - unsigned frame, unsigned uframe)
3441 -{
3442 - __hc32 *hw_p = &fotg210->periodic[frame];
3443 - union fotg210_shadow *q = &fotg210->pshadow[frame];
3444 - unsigned usecs = 0;
3445 - struct fotg210_qh_hw *hw;
3446 -
3447 - while (q->ptr) {
3448 - switch (hc32_to_cpu(fotg210, Q_NEXT_TYPE(fotg210, *hw_p))) {
3449 - case Q_TYPE_QH:
3450 - hw = q->qh->hw;
3451 - /* is it in the S-mask? */
3452 - if (hw->hw_info2 & cpu_to_hc32(fotg210, 1 << uframe))
3453 - usecs += q->qh->usecs;
3454 - /* ... or C-mask? */
3455 - if (hw->hw_info2 & cpu_to_hc32(fotg210,
3456 - 1 << (8 + uframe)))
3457 - usecs += q->qh->c_usecs;
3458 - hw_p = &hw->hw_next;
3459 - q = &q->qh->qh_next;
3460 - break;
3461 - /* case Q_TYPE_FSTN: */
3462 - default:
3463 - /* for "save place" FSTNs, count the relevant INTR
3464 - * bandwidth from the previous frame
3465 - */
3466 - if (q->fstn->hw_prev != FOTG210_LIST_END(fotg210))
3467 - fotg210_dbg(fotg210, "ignoring FSTN cost ...\n");
3468 -
3469 - hw_p = &q->fstn->hw_next;
3470 - q = &q->fstn->fstn_next;
3471 - break;
3472 - case Q_TYPE_ITD:
3473 - if (q->itd->hw_transaction[uframe])
3474 - usecs += q->itd->stream->usecs;
3475 - hw_p = &q->itd->hw_next;
3476 - q = &q->itd->itd_next;
3477 - break;
3478 - }
3479 - }
3480 - if (usecs > fotg210->uframe_periodic_max)
3481 - fotg210_err(fotg210, "uframe %d sched overrun: %d usecs\n",
3482 - frame * 8 + uframe, usecs);
3483 - return usecs;
3484 -}
3485 -
3486 -static int same_tt(struct usb_device *dev1, struct usb_device *dev2)
3487 -{
3488 - if (!dev1->tt || !dev2->tt)
3489 - return 0;
3490 - if (dev1->tt != dev2->tt)
3491 - return 0;
3492 - if (dev1->tt->multi)
3493 - return dev1->ttport == dev2->ttport;
3494 - else
3495 - return 1;
3496 -}
3497 -
3498 -/* return true iff the device's transaction translator is available
3499 - * for a periodic transfer starting at the specified frame, using
3500 - * all the uframes in the mask.
3501 - */
3502 -static int tt_no_collision(struct fotg210_hcd *fotg210, unsigned period,
3503 - struct usb_device *dev, unsigned frame, u32 uf_mask)
3504 -{
3505 - if (period == 0) /* error */
3506 - return 0;
3507 -
3508 - /* note bandwidth wastage: split never follows csplit
3509 - * (different dev or endpoint) until the next uframe.
3510 - * calling convention doesn't make that distinction.
3511 - */
3512 - for (; frame < fotg210->periodic_size; frame += period) {
3513 - union fotg210_shadow here;
3514 - __hc32 type;
3515 - struct fotg210_qh_hw *hw;
3516 -
3517 - here = fotg210->pshadow[frame];
3518 - type = Q_NEXT_TYPE(fotg210, fotg210->periodic[frame]);
3519 - while (here.ptr) {
3520 - switch (hc32_to_cpu(fotg210, type)) {
3521 - case Q_TYPE_ITD:
3522 - type = Q_NEXT_TYPE(fotg210, here.itd->hw_next);
3523 - here = here.itd->itd_next;
3524 - continue;
3525 - case Q_TYPE_QH:
3526 - hw = here.qh->hw;
3527 - if (same_tt(dev, here.qh->dev)) {
3528 - u32 mask;
3529 -
3530 - mask = hc32_to_cpu(fotg210,
3531 - hw->hw_info2);
3532 - /* "knows" no gap is needed */
3533 - mask |= mask >> 8;
3534 - if (mask & uf_mask)
3535 - break;
3536 - }
3537 - type = Q_NEXT_TYPE(fotg210, hw->hw_next);
3538 - here = here.qh->qh_next;
3539 - continue;
3540 - /* case Q_TYPE_FSTN: */
3541 - default:
3542 - fotg210_dbg(fotg210,
3543 - "periodic frame %d bogus type %d\n",
3544 - frame, type);
3545 - }
3546 -
3547 - /* collision or error */
3548 - return 0;
3549 - }
3550 - }
3551 -
3552 - /* no collision */
3553 - return 1;
3554 -}
3555 -
3556 -static void enable_periodic(struct fotg210_hcd *fotg210)
3557 -{
3558 - if (fotg210->periodic_count++)
3559 - return;
3560 -
3561 - /* Stop waiting to turn off the periodic schedule */
3562 - fotg210->enabled_hrtimer_events &=
3563 - ~BIT(FOTG210_HRTIMER_DISABLE_PERIODIC);
3564 -
3565 - /* Don't start the schedule until PSS is 0 */
3566 - fotg210_poll_PSS(fotg210);
3567 - turn_on_io_watchdog(fotg210);
3568 -}
3569 -
3570 -static void disable_periodic(struct fotg210_hcd *fotg210)
3571 -{
3572 - if (--fotg210->periodic_count)
3573 - return;
3574 -
3575 - /* Don't turn off the schedule until PSS is 1 */
3576 - fotg210_poll_PSS(fotg210);
3577 -}
3578 -
3579 -/* periodic schedule slots have iso tds (normal or split) first, then a
3580 - * sparse tree for active interrupt transfers.
3581 - *
3582 - * this just links in a qh; caller guarantees uframe masks are set right.
3583 - * no FSTN support (yet; fotg210 0.96+)
3584 - */
3585 -static void qh_link_periodic(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
3586 -{
3587 - unsigned i;
3588 - unsigned period = qh->period;
3589 -
3590 - dev_dbg(&qh->dev->dev,
3591 - "link qh%d-%04x/%p start %d [%d/%d us]\n", period,
3592 - hc32_to_cpup(fotg210, &qh->hw->hw_info2) &
3593 - (QH_CMASK | QH_SMASK), qh, qh->start, qh->usecs,
3594 - qh->c_usecs);
3595 -
3596 - /* high bandwidth, or otherwise every microframe */
3597 - if (period == 0)
3598 - period = 1;
3599 -
3600 - for (i = qh->start; i < fotg210->periodic_size; i += period) {
3601 - union fotg210_shadow *prev = &fotg210->pshadow[i];
3602 - __hc32 *hw_p = &fotg210->periodic[i];
3603 - union fotg210_shadow here = *prev;
3604 - __hc32 type = 0;
3605 -
3606 - /* skip the iso nodes at list head */
3607 - while (here.ptr) {
3608 - type = Q_NEXT_TYPE(fotg210, *hw_p);
3609 - if (type == cpu_to_hc32(fotg210, Q_TYPE_QH))
3610 - break;
3611 - prev = periodic_next_shadow(fotg210, prev, type);
3612 - hw_p = shadow_next_periodic(fotg210, &here, type);
3613 - here = *prev;
3614 - }
3615 -
3616 - /* sorting each branch by period (slow-->fast)
3617 - * enables sharing interior tree nodes
3618 - */
3619 - while (here.ptr && qh != here.qh) {
3620 - if (qh->period > here.qh->period)
3621 - break;
3622 - prev = &here.qh->qh_next;
3623 - hw_p = &here.qh->hw->hw_next;
3624 - here = *prev;
3625 - }
3626 - /* link in this qh, unless some earlier pass did that */
3627 - if (qh != here.qh) {
3628 - qh->qh_next = here;
3629 - if (here.qh)
3630 - qh->hw->hw_next = *hw_p;
3631 - wmb();
3632 - prev->qh = qh;
3633 - *hw_p = QH_NEXT(fotg210, qh->qh_dma);
3634 - }
3635 - }
3636 - qh->qh_state = QH_STATE_LINKED;
3637 - qh->xacterrs = 0;
3638 -
3639 - /* update per-qh bandwidth for usbfs */
3640 - fotg210_to_hcd(fotg210)->self.bandwidth_allocated += qh->period
3641 - ? ((qh->usecs + qh->c_usecs) / qh->period)
3642 - : (qh->usecs * 8);
3643 -
3644 - list_add(&qh->intr_node, &fotg210->intr_qh_list);
3645 -
3646 - /* maybe enable periodic schedule processing */
3647 - ++fotg210->intr_count;
3648 - enable_periodic(fotg210);
3649 -}
3650 -
3651 -static void qh_unlink_periodic(struct fotg210_hcd *fotg210,
3652 - struct fotg210_qh *qh)
3653 -{
3654 - unsigned i;
3655 - unsigned period;
3656 -
3657 - /*
3658 - * If qh is for a low/full-speed device, simply unlinking it
3659 - * could interfere with an ongoing split transaction. To unlink
3660 - * it safely would require setting the QH_INACTIVATE bit and
3661 - * waiting at least one frame, as described in EHCI 4.12.2.5.
3662 - *
3663 - * We won't bother with any of this. Instead, we assume that the
3664 - * only reason for unlinking an interrupt QH while the current URB
3665 - * is still active is to dequeue all the URBs (flush the whole
3666 - * endpoint queue).
3667 - *
3668 - * If rebalancing the periodic schedule is ever implemented, this
3669 - * approach will no longer be valid.
3670 - */
3671 -
3672 - /* high bandwidth, or otherwise part of every microframe */
3673 - period = qh->period;
3674 - if (!period)
3675 - period = 1;
3676 -
3677 - for (i = qh->start; i < fotg210->periodic_size; i += period)
3678 - periodic_unlink(fotg210, i, qh);
3679 -
3680 - /* update per-qh bandwidth for usbfs */
3681 - fotg210_to_hcd(fotg210)->self.bandwidth_allocated -= qh->period
3682 - ? ((qh->usecs + qh->c_usecs) / qh->period)
3683 - : (qh->usecs * 8);
3684 -
3685 - dev_dbg(&qh->dev->dev,
3686 - "unlink qh%d-%04x/%p start %d [%d/%d us]\n",
3687 - qh->period, hc32_to_cpup(fotg210, &qh->hw->hw_info2) &
3688 - (QH_CMASK | QH_SMASK), qh, qh->start, qh->usecs,
3689 - qh->c_usecs);
3690 -
3691 - /* qh->qh_next still "live" to HC */
3692 - qh->qh_state = QH_STATE_UNLINK;
3693 - qh->qh_next.ptr = NULL;
3694 -
3695 - if (fotg210->qh_scan_next == qh)
3696 - fotg210->qh_scan_next = list_entry(qh->intr_node.next,
3697 - struct fotg210_qh, intr_node);
3698 - list_del(&qh->intr_node);
3699 -}
3700 -
3701 -static void start_unlink_intr(struct fotg210_hcd *fotg210,
3702 - struct fotg210_qh *qh)
3703 -{
3704 - /* If the QH isn't linked then there's nothing we can do
3705 - * unless we were called during a giveback, in which case
3706 - * qh_completions() has to deal with it.
3707 - */
3708 - if (qh->qh_state != QH_STATE_LINKED) {
3709 - if (qh->qh_state == QH_STATE_COMPLETING)
3710 - qh->needs_rescan = 1;
3711 - return;
3712 - }
3713 -
3714 - qh_unlink_periodic(fotg210, qh);
3715 -
3716 - /* Make sure the unlinks are visible before starting the timer */
3717 - wmb();
3718 -
3719 - /*
3720 - * The EHCI spec doesn't say how long it takes the controller to
3721 - * stop accessing an unlinked interrupt QH. The timer delay is
3722 - * 9 uframes; presumably that will be long enough.
3723 - */
3724 - qh->unlink_cycle = fotg210->intr_unlink_cycle;
3725 -
3726 - /* New entries go at the end of the intr_unlink list */
3727 - if (fotg210->intr_unlink)
3728 - fotg210->intr_unlink_last->unlink_next = qh;
3729 - else
3730 - fotg210->intr_unlink = qh;
3731 - fotg210->intr_unlink_last = qh;
3732 -
3733 - if (fotg210->intr_unlinking)
3734 - ; /* Avoid recursive calls */
3735 - else if (fotg210->rh_state < FOTG210_RH_RUNNING)
3736 - fotg210_handle_intr_unlinks(fotg210);
3737 - else if (fotg210->intr_unlink == qh) {
3738 - fotg210_enable_event(fotg210, FOTG210_HRTIMER_UNLINK_INTR,
3739 - true);
3740 - ++fotg210->intr_unlink_cycle;
3741 - }
3742 -}
3743 -
3744 -static void end_unlink_intr(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
3745 -{
3746 - struct fotg210_qh_hw *hw = qh->hw;
3747 - int rc;
3748 -
3749 - qh->qh_state = QH_STATE_IDLE;
3750 - hw->hw_next = FOTG210_LIST_END(fotg210);
3751 -
3752 - qh_completions(fotg210, qh);
3753 -
3754 - /* reschedule QH iff another request is queued */
3755 - if (!list_empty(&qh->qtd_list) &&
3756 - fotg210->rh_state == FOTG210_RH_RUNNING) {
3757 - rc = qh_schedule(fotg210, qh);
3758 -
3759 - /* An error here likely indicates handshake failure
3760 - * or no space left in the schedule. Neither fault
3761 - * should happen often ...
3762 - *
3763 - * FIXME kill the now-dysfunctional queued urbs
3764 - */
3765 - if (rc != 0)
3766 - fotg210_err(fotg210, "can't reschedule qh %p, err %d\n",
3767 - qh, rc);
3768 - }
3769 -
3770 - /* maybe turn off periodic schedule */
3771 - --fotg210->intr_count;
3772 - disable_periodic(fotg210);
3773 -}
3774 -
3775 -static int check_period(struct fotg210_hcd *fotg210, unsigned frame,
3776 - unsigned uframe, unsigned period, unsigned usecs)
3777 -{
3778 - int claimed;
3779 -
3780 - /* complete split running into next frame?
3781 - * given FSTN support, we could sometimes check...
3782 - */
3783 - if (uframe >= 8)
3784 - return 0;
3785 -
3786 - /* convert "usecs we need" to "max already claimed" */
3787 - usecs = fotg210->uframe_periodic_max - usecs;
3788 -
3789 - /* we "know" 2 and 4 uframe intervals were rejected; so
3790 - * for period 0, check _every_ microframe in the schedule.
3791 - */
3792 - if (unlikely(period == 0)) {
3793 - do {
3794 - for (uframe = 0; uframe < 7; uframe++) {
3795 - claimed = periodic_usecs(fotg210, frame,
3796 - uframe);
3797 - if (claimed > usecs)
3798 - return 0;
3799 - }
3800 - } while ((frame += 1) < fotg210->periodic_size);
3801 -
3802 - /* just check the specified uframe, at that period */
3803 - } else {
3804 - do {
3805 - claimed = periodic_usecs(fotg210, frame, uframe);
3806 - if (claimed > usecs)
3807 - return 0;
3808 - } while ((frame += period) < fotg210->periodic_size);
3809 - }
3810 -
3811 - /* success! */
3812 - return 1;
3813 -}
3814 -
3815 -static int check_intr_schedule(struct fotg210_hcd *fotg210, unsigned frame,
3816 - unsigned uframe, const struct fotg210_qh *qh, __hc32 *c_maskp)
3817 -{
3818 - int retval = -ENOSPC;
3819 - u8 mask = 0;
3820 -
3821 - if (qh->c_usecs && uframe >= 6) /* FSTN territory? */
3822 - goto done;
3823 -
3824 - if (!check_period(fotg210, frame, uframe, qh->period, qh->usecs))
3825 - goto done;
3826 - if (!qh->c_usecs) {
3827 - retval = 0;
3828 - *c_maskp = 0;
3829 - goto done;
3830 - }
3831 -
3832 - /* Make sure this tt's buffer is also available for CSPLITs.
3833 - * We pessimize a bit; probably the typical full speed case
3834 - * doesn't need the second CSPLIT.
3835 - *
3836 - * NOTE: both SPLIT and CSPLIT could be checked in just
3837 - * one smart pass...
3838 - */
3839 - mask = 0x03 << (uframe + qh->gap_uf);
3840 - *c_maskp = cpu_to_hc32(fotg210, mask << 8);
3841 -
3842 - mask |= 1 << uframe;
3843 - if (tt_no_collision(fotg210, qh->period, qh->dev, frame, mask)) {
3844 - if (!check_period(fotg210, frame, uframe + qh->gap_uf + 1,
3845 - qh->period, qh->c_usecs))
3846 - goto done;
3847 - if (!check_period(fotg210, frame, uframe + qh->gap_uf,
3848 - qh->period, qh->c_usecs))
3849 - goto done;
3850 - retval = 0;
3851 - }
3852 -done:
3853 - return retval;
3854 -}
3855 -
3856 -/* "first fit" scheduling policy used the first time through,
3857 - * or when the previous schedule slot can't be re-used.
3858 - */
3859 -static int qh_schedule(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
3860 -{
3861 - int status;
3862 - unsigned uframe;
3863 - __hc32 c_mask;
3864 - unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */
3865 - struct fotg210_qh_hw *hw = qh->hw;
3866 -
3867 - qh_refresh(fotg210, qh);
3868 - hw->hw_next = FOTG210_LIST_END(fotg210);
3869 - frame = qh->start;
3870 -
3871 - /* reuse the previous schedule slots, if we can */
3872 - if (frame < qh->period) {
3873 - uframe = ffs(hc32_to_cpup(fotg210, &hw->hw_info2) & QH_SMASK);
3874 - status = check_intr_schedule(fotg210, frame, --uframe,
3875 - qh, &c_mask);
3876 - } else {
3877 - uframe = 0;
3878 - c_mask = 0;
3879 - status = -ENOSPC;
3880 - }
3881 -
3882 - /* else scan the schedule to find a group of slots such that all
3883 - * uframes have enough periodic bandwidth available.
3884 - */
3885 - if (status) {
3886 - /* "normal" case, uframing flexible except with splits */
3887 - if (qh->period) {
3888 - int i;
3889 -
3890 - for (i = qh->period; status && i > 0; --i) {
3891 - frame = ++fotg210->random_frame % qh->period;
3892 - for (uframe = 0; uframe < 8; uframe++) {
3893 - status = check_intr_schedule(fotg210,
3894 - frame, uframe, qh,
3895 - &c_mask);
3896 - if (status == 0)
3897 - break;
3898 - }
3899 - }
3900 -
3901 - /* qh->period == 0 means every uframe */
3902 - } else {
3903 - frame = 0;
3904 - status = check_intr_schedule(fotg210, 0, 0, qh,
3905 - &c_mask);
3906 - }
3907 - if (status)
3908 - goto done;
3909 - qh->start = frame;
3910 -
3911 - /* reset S-frame and (maybe) C-frame masks */
3912 - hw->hw_info2 &= cpu_to_hc32(fotg210, ~(QH_CMASK | QH_SMASK));
3913 - hw->hw_info2 |= qh->period
3914 - ? cpu_to_hc32(fotg210, 1 << uframe)
3915 - : cpu_to_hc32(fotg210, QH_SMASK);
3916 - hw->hw_info2 |= c_mask;
3917 - } else
3918 - fotg210_dbg(fotg210, "reused qh %p schedule\n", qh);
3919 -
3920 - /* stuff into the periodic schedule */
3921 - qh_link_periodic(fotg210, qh);
3922 -done:
3923 - return status;
3924 -}
3925 -
3926 -static int intr_submit(struct fotg210_hcd *fotg210, struct urb *urb,
3927 - struct list_head *qtd_list, gfp_t mem_flags)
3928 -{
3929 - unsigned epnum;
3930 - unsigned long flags;
3931 - struct fotg210_qh *qh;
3932 - int status;
3933 - struct list_head empty;
3934 -
3935 - /* get endpoint and transfer/schedule data */
3936 - epnum = urb->ep->desc.bEndpointAddress;
3937 -
3938 - spin_lock_irqsave(&fotg210->lock, flags);
3939 -
3940 - if (unlikely(!HCD_HW_ACCESSIBLE(fotg210_to_hcd(fotg210)))) {
3941 - status = -ESHUTDOWN;
3942 - goto done_not_linked;
3943 - }
3944 - status = usb_hcd_link_urb_to_ep(fotg210_to_hcd(fotg210), urb);
3945 - if (unlikely(status))
3946 - goto done_not_linked;
3947 -
3948 - /* get qh and force any scheduling errors */
3949 - INIT_LIST_HEAD(&empty);
3950 - qh = qh_append_tds(fotg210, urb, &empty, epnum, &urb->ep->hcpriv);
3951 - if (qh == NULL) {
3952 - status = -ENOMEM;
3953 - goto done;
3954 - }
3955 - if (qh->qh_state == QH_STATE_IDLE) {
3956 - status = qh_schedule(fotg210, qh);
3957 - if (status)
3958 - goto done;
3959 - }
3960 -
3961 - /* then queue the urb's tds to the qh */
3962 - qh = qh_append_tds(fotg210, urb, qtd_list, epnum, &urb->ep->hcpriv);
3963 - BUG_ON(qh == NULL);
3964 -
3965 - /* ... update usbfs periodic stats */
3966 - fotg210_to_hcd(fotg210)->self.bandwidth_int_reqs++;
3967 -
3968 -done:
3969 - if (unlikely(status))
3970 - usb_hcd_unlink_urb_from_ep(fotg210_to_hcd(fotg210), urb);
3971 -done_not_linked:
3972 - spin_unlock_irqrestore(&fotg210->lock, flags);
3973 - if (status)
3974 - qtd_list_free(fotg210, urb, qtd_list);
3975 -
3976 - return status;
3977 -}
3978 -
3979 -static void scan_intr(struct fotg210_hcd *fotg210)
3980 -{
3981 - struct fotg210_qh *qh;
3982 -
3983 - list_for_each_entry_safe(qh, fotg210->qh_scan_next,
3984 - &fotg210->intr_qh_list, intr_node) {
3985 -rescan:
3986 - /* clean any finished work for this qh */
3987 - if (!list_empty(&qh->qtd_list)) {
3988 - int temp;
3989 -
3990 - /*
3991 - * Unlinks could happen here; completion reporting
3992 - * drops the lock. That's why fotg210->qh_scan_next
3993 - * always holds the next qh to scan; if the next qh
3994 - * gets unlinked then fotg210->qh_scan_next is adjusted
3995 - * in qh_unlink_periodic().
3996 - */
3997 - temp = qh_completions(fotg210, qh);
3998 - if (unlikely(qh->needs_rescan ||
3999 - (list_empty(&qh->qtd_list) &&
4000 - qh->qh_state == QH_STATE_LINKED)))
4001 - start_unlink_intr(fotg210, qh);
4002 - else if (temp != 0)
4003 - goto rescan;
4004 - }
4005 - }
4006 -}
4007 -
4008 -/* fotg210_iso_stream ops work with both ITD and SITD */
4009 -
4010 -static struct fotg210_iso_stream *iso_stream_alloc(gfp_t mem_flags)
4011 -{
4012 - struct fotg210_iso_stream *stream;
4013 -
4014 - stream = kzalloc(sizeof(*stream), mem_flags);
4015 - if (likely(stream != NULL)) {
4016 - INIT_LIST_HEAD(&stream->td_list);
4017 - INIT_LIST_HEAD(&stream->free_list);
4018 - stream->next_uframe = -1;
4019 - }
4020 - return stream;
4021 -}
4022 -
4023 -static void iso_stream_init(struct fotg210_hcd *fotg210,
4024 - struct fotg210_iso_stream *stream, struct usb_device *dev,
4025 - int pipe, unsigned interval)
4026 -{
4027 - u32 buf1;
4028 - unsigned epnum, maxp;
4029 - int is_input;
4030 - long bandwidth;
4031 - unsigned multi;
4032 - struct usb_host_endpoint *ep;
4033 -
4034 - /*
4035 - * this might be a "high bandwidth" highspeed endpoint,
4036 - * as encoded in the ep descriptor's wMaxPacket field
4037 - */
4038 - epnum = usb_pipeendpoint(pipe);
4039 - is_input = usb_pipein(pipe) ? USB_DIR_IN : 0;
4040 - ep = usb_pipe_endpoint(dev, pipe);
4041 - maxp = usb_endpoint_maxp(&ep->desc);
4042 - if (is_input)
4043 - buf1 = (1 << 11);
4044 - else
4045 - buf1 = 0;
4046 -
4047 - multi = usb_endpoint_maxp_mult(&ep->desc);
4048 - buf1 |= maxp;
4049 - maxp *= multi;
4050 -
4051 - stream->buf0 = cpu_to_hc32(fotg210, (epnum << 8) | dev->devnum);
4052 - stream->buf1 = cpu_to_hc32(fotg210, buf1);
4053 - stream->buf2 = cpu_to_hc32(fotg210, multi);
4054 -
4055 - /* usbfs wants to report the average usecs per frame tied up
4056 - * when transfers on this endpoint are scheduled ...
4057 - */
4058 - if (dev->speed == USB_SPEED_FULL) {
4059 - interval <<= 3;
4060 - stream->usecs = NS_TO_US(usb_calc_bus_time(dev->speed,
4061 - is_input, 1, maxp));
4062 - stream->usecs /= 8;
4063 - } else {
4064 - stream->highspeed = 1;
4065 - stream->usecs = HS_USECS_ISO(maxp);
4066 - }
4067 - bandwidth = stream->usecs * 8;
4068 - bandwidth /= interval;
4069 -
4070 - stream->bandwidth = bandwidth;
4071 - stream->udev = dev;
4072 - stream->bEndpointAddress = is_input | epnum;
4073 - stream->interval = interval;
4074 - stream->maxp = maxp;
4075 -}
4076 -
4077 -static struct fotg210_iso_stream *iso_stream_find(struct fotg210_hcd *fotg210,
4078 - struct urb *urb)
4079 -{
4080 - unsigned epnum;
4081 - struct fotg210_iso_stream *stream;
4082 - struct usb_host_endpoint *ep;
4083 - unsigned long flags;
4084 -
4085 - epnum = usb_pipeendpoint(urb->pipe);
4086 - if (usb_pipein(urb->pipe))
4087 - ep = urb->dev->ep_in[epnum];
4088 - else
4089 - ep = urb->dev->ep_out[epnum];
4090 -
4091 - spin_lock_irqsave(&fotg210->lock, flags);
4092 - stream = ep->hcpriv;
4093 -
4094 - if (unlikely(stream == NULL)) {
4095 - stream = iso_stream_alloc(GFP_ATOMIC);
4096 - if (likely(stream != NULL)) {
4097 - ep->hcpriv = stream;
4098 - stream->ep = ep;
4099 - iso_stream_init(fotg210, stream, urb->dev, urb->pipe,
4100 - urb->interval);
4101 - }
4102 -
4103 - /* if dev->ep[epnum] is a QH, hw is set */
4104 - } else if (unlikely(stream->hw != NULL)) {
4105 - fotg210_dbg(fotg210, "dev %s ep%d%s, not iso??\n",
4106 - urb->dev->devpath, epnum,
4107 - usb_pipein(urb->pipe) ? "in" : "out");
4108 - stream = NULL;
4109 - }
4110 -
4111 - spin_unlock_irqrestore(&fotg210->lock, flags);
4112 - return stream;
4113 -}
4114 -
4115 -/* fotg210_iso_sched ops can be ITD-only or SITD-only */
4116 -
4117 -static struct fotg210_iso_sched *iso_sched_alloc(unsigned packets,
4118 - gfp_t mem_flags)
4119 -{
4120 - struct fotg210_iso_sched *iso_sched;
4121 -
4122 - iso_sched = kzalloc(struct_size(iso_sched, packet, packets), mem_flags);
4123 - if (likely(iso_sched != NULL))
4124 - INIT_LIST_HEAD(&iso_sched->td_list);
4125 -
4126 - return iso_sched;
4127 -}
4128 -
4129 -static inline void itd_sched_init(struct fotg210_hcd *fotg210,
4130 - struct fotg210_iso_sched *iso_sched,
4131 - struct fotg210_iso_stream *stream, struct urb *urb)
4132 -{
4133 - unsigned i;
4134 - dma_addr_t dma = urb->transfer_dma;
4135 -
4136 - /* how many uframes are needed for these transfers */
4137 - iso_sched->span = urb->number_of_packets * stream->interval;
4138 -
4139 - /* figure out per-uframe itd fields that we'll need later
4140 - * when we fit new itds into the schedule.
4141 - */
4142 - for (i = 0; i < urb->number_of_packets; i++) {
4143 - struct fotg210_iso_packet *uframe = &iso_sched->packet[i];
4144 - unsigned length;
4145 - dma_addr_t buf;
4146 - u32 trans;
4147 -
4148 - length = urb->iso_frame_desc[i].length;
4149 - buf = dma + urb->iso_frame_desc[i].offset;
4150 -
4151 - trans = FOTG210_ISOC_ACTIVE;
4152 - trans |= buf & 0x0fff;
4153 - if (unlikely(((i + 1) == urb->number_of_packets))
4154 - && !(urb->transfer_flags & URB_NO_INTERRUPT))
4155 - trans |= FOTG210_ITD_IOC;
4156 - trans |= length << 16;
4157 - uframe->transaction = cpu_to_hc32(fotg210, trans);
4158 -
4159 - /* might need to cross a buffer page within a uframe */
4160 - uframe->bufp = (buf & ~(u64)0x0fff);
4161 - buf += length;
4162 - if (unlikely((uframe->bufp != (buf & ~(u64)0x0fff))))
4163 - uframe->cross = 1;
4164 - }
4165 -}
4166 -
4167 -static void iso_sched_free(struct fotg210_iso_stream *stream,
4168 - struct fotg210_iso_sched *iso_sched)
4169 -{
4170 - if (!iso_sched)
4171 - return;
4172 - /* caller must hold fotg210->lock!*/
4173 - list_splice(&iso_sched->td_list, &stream->free_list);
4174 - kfree(iso_sched);
4175 -}
4176 -
4177 -static int itd_urb_transaction(struct fotg210_iso_stream *stream,
4178 - struct fotg210_hcd *fotg210, struct urb *urb, gfp_t mem_flags)
4179 -{
4180 - struct fotg210_itd *itd;
4181 - dma_addr_t itd_dma;
4182 - int i;
4183 - unsigned num_itds;
4184 - struct fotg210_iso_sched *sched;
4185 - unsigned long flags;
4186 -
4187 - sched = iso_sched_alloc(urb->number_of_packets, mem_flags);
4188 - if (unlikely(sched == NULL))
4189 - return -ENOMEM;
4190 -
4191 - itd_sched_init(fotg210, sched, stream, urb);
4192 -
4193 - if (urb->interval < 8)
4194 - num_itds = 1 + (sched->span + 7) / 8;
4195 - else
4196 - num_itds = urb->number_of_packets;
4197 -
4198 - /* allocate/init ITDs */
4199 - spin_lock_irqsave(&fotg210->lock, flags);
4200 - for (i = 0; i < num_itds; i++) {
4201 -
4202 - /*
4203 - * Use iTDs from the free list, but not iTDs that may
4204 - * still be in use by the hardware.
4205 - */
4206 - if (likely(!list_empty(&stream->free_list))) {
4207 - itd = list_first_entry(&stream->free_list,
4208 - struct fotg210_itd, itd_list);
4209 - if (itd->frame == fotg210->now_frame)
4210 - goto alloc_itd;
4211 - list_del(&itd->itd_list);
4212 - itd_dma = itd->itd_dma;
4213 - } else {
4214 -alloc_itd:
4215 - spin_unlock_irqrestore(&fotg210->lock, flags);
4216 - itd = dma_pool_alloc(fotg210->itd_pool, mem_flags,
4217 - &itd_dma);
4218 - spin_lock_irqsave(&fotg210->lock, flags);
4219 - if (!itd) {
4220 - iso_sched_free(stream, sched);
4221 - spin_unlock_irqrestore(&fotg210->lock, flags);
4222 - return -ENOMEM;
4223 - }
4224 - }
4225 -
4226 - memset(itd, 0, sizeof(*itd));
4227 - itd->itd_dma = itd_dma;
4228 - list_add(&itd->itd_list, &sched->td_list);
4229 - }
4230 - spin_unlock_irqrestore(&fotg210->lock, flags);
4231 -
4232 - /* temporarily store schedule info in hcpriv */
4233 - urb->hcpriv = sched;
4234 - urb->error_count = 0;
4235 - return 0;
4236 -}
4237 -
4238 -static inline int itd_slot_ok(struct fotg210_hcd *fotg210, u32 mod, u32 uframe,
4239 - u8 usecs, u32 period)
4240 -{
4241 - uframe %= period;
4242 - do {
4243 - /* can't commit more than uframe_periodic_max usec */
4244 - if (periodic_usecs(fotg210, uframe >> 3, uframe & 0x7)
4245 - > (fotg210->uframe_periodic_max - usecs))
4246 - return 0;
4247 -
4248 - /* we know urb->interval is 2^N uframes */
4249 - uframe += period;
4250 - } while (uframe < mod);
4251 - return 1;
4252 -}
4253 -
4254 -/* This scheduler plans almost as far into the future as it has actual
4255 - * periodic schedule slots. (Affected by TUNE_FLS, which defaults to
4256 - * "as small as possible" to be cache-friendlier.) That limits the size
4257 - * transfers you can stream reliably; avoid more than 64 msec per urb.
4258 - * Also avoid queue depths of less than fotg210's worst irq latency (affected
4259 - * by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter,
4260 - * and other factors); or more than about 230 msec total (for portability,
4261 - * given FOTG210_TUNE_FLS and the slop). Or, write a smarter scheduler!
4262 - */
4263 -
4264 -#define SCHEDULE_SLOP 80 /* microframes */
4265 -
4266 -static int iso_stream_schedule(struct fotg210_hcd *fotg210, struct urb *urb,
4267 - struct fotg210_iso_stream *stream)
4268 -{
4269 - u32 now, next, start, period, span;
4270 - int status;
4271 - unsigned mod = fotg210->periodic_size << 3;
4272 - struct fotg210_iso_sched *sched = urb->hcpriv;
4273 -
4274 - period = urb->interval;
4275 - span = sched->span;
4276 -
4277 - if (span > mod - SCHEDULE_SLOP) {
4278 - fotg210_dbg(fotg210, "iso request %p too long\n", urb);
4279 - status = -EFBIG;
4280 - goto fail;
4281 - }
4282 -
4283 - now = fotg210_read_frame_index(fotg210) & (mod - 1);
4284 -
4285 - /* Typical case: reuse current schedule, stream is still active.
4286 - * Hopefully there are no gaps from the host falling behind
4287 - * (irq delays etc), but if there are we'll take the next
4288 - * slot in the schedule, implicitly assuming URB_ISO_ASAP.
4289 - */
4290 - if (likely(!list_empty(&stream->td_list))) {
4291 - u32 excess;
4292 -
4293 - /* For high speed devices, allow scheduling within the
4294 - * isochronous scheduling threshold. For full speed devices
4295 - * and Intel PCI-based controllers, don't (work around for
4296 - * Intel ICH9 bug).
4297 - */
4298 - if (!stream->highspeed && fotg210->fs_i_thresh)
4299 - next = now + fotg210->i_thresh;
4300 - else
4301 - next = now;
4302 -
4303 - /* Fell behind (by up to twice the slop amount)?
4304 - * We decide based on the time of the last currently-scheduled
4305 - * slot, not the time of the next available slot.
4306 - */
4307 - excess = (stream->next_uframe - period - next) & (mod - 1);
4308 - if (excess >= mod - 2 * SCHEDULE_SLOP)
4309 - start = next + excess - mod + period *
4310 - DIV_ROUND_UP(mod - excess, period);
4311 - else
4312 - start = next + excess + period;
4313 - if (start - now >= mod) {
4314 - fotg210_dbg(fotg210, "request %p would overflow (%d+%d >= %d)\n",
4315 - urb, start - now - period, period,
4316 - mod);
4317 - status = -EFBIG;
4318 - goto fail;
4319 - }
4320 - }
4321 -
4322 - /* need to schedule; when's the next (u)frame we could start?
4323 - * this is bigger than fotg210->i_thresh allows; scheduling itself
4324 - * isn't free, the slop should handle reasonably slow cpus. it
4325 - * can also help high bandwidth if the dma and irq loads don't
4326 - * jump until after the queue is primed.
4327 - */
4328 - else {
4329 - int done = 0;
4330 -
4331 - start = SCHEDULE_SLOP + (now & ~0x07);
4332 -
4333 - /* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */
4334 -
4335 - /* find a uframe slot with enough bandwidth.
4336 - * Early uframes are more precious because full-speed
4337 - * iso IN transfers can't use late uframes,
4338 - * and therefore they should be allocated last.
4339 - */
4340 - next = start;
4341 - start += period;
4342 - do {
4343 - start--;
4344 - /* check schedule: enough space? */
4345 - if (itd_slot_ok(fotg210, mod, start,
4346 - stream->usecs, period))
4347 - done = 1;
4348 - } while (start > next && !done);
4349 -
4350 - /* no room in the schedule */
4351 - if (!done) {
4352 - fotg210_dbg(fotg210, "iso resched full %p (now %d max %d)\n",
4353 - urb, now, now + mod);
4354 - status = -ENOSPC;
4355 - goto fail;
4356 - }
4357 - }
4358 -
4359 - /* Tried to schedule too far into the future? */
4360 - if (unlikely(start - now + span - period >=
4361 - mod - 2 * SCHEDULE_SLOP)) {
4362 - fotg210_dbg(fotg210, "request %p would overflow (%d+%d >= %d)\n",
4363 - urb, start - now, span - period,
4364 - mod - 2 * SCHEDULE_SLOP);
4365 - status = -EFBIG;
4366 - goto fail;
4367 - }
4368 -
4369 - stream->next_uframe = start & (mod - 1);
4370 -
4371 - /* report high speed start in uframes; full speed, in frames */
4372 - urb->start_frame = stream->next_uframe;
4373 - if (!stream->highspeed)
4374 - urb->start_frame >>= 3;
4375 -
4376 - /* Make sure scan_isoc() sees these */
4377 - if (fotg210->isoc_count == 0)
4378 - fotg210->next_frame = now >> 3;
4379 - return 0;
4380 -
4381 -fail:
4382 - iso_sched_free(stream, sched);
4383 - urb->hcpriv = NULL;
4384 - return status;
4385 -}
4386 -
4387 -static inline void itd_init(struct fotg210_hcd *fotg210,
4388 - struct fotg210_iso_stream *stream, struct fotg210_itd *itd)
4389 -{
4390 - int i;
4391 -
4392 - /* it's been recently zeroed */
4393 - itd->hw_next = FOTG210_LIST_END(fotg210);
4394 - itd->hw_bufp[0] = stream->buf0;
4395 - itd->hw_bufp[1] = stream->buf1;
4396 - itd->hw_bufp[2] = stream->buf2;
4397 -
4398 - for (i = 0; i < 8; i++)
4399 - itd->index[i] = -1;
4400 -
4401 - /* All other fields are filled when scheduling */
4402 -}
4403 -
4404 -static inline void itd_patch(struct fotg210_hcd *fotg210,
4405 - struct fotg210_itd *itd, struct fotg210_iso_sched *iso_sched,
4406 - unsigned index, u16 uframe)
4407 -{
4408 - struct fotg210_iso_packet *uf = &iso_sched->packet[index];
4409 - unsigned pg = itd->pg;
4410 -
4411 - uframe &= 0x07;
4412 - itd->index[uframe] = index;
4413 -
4414 - itd->hw_transaction[uframe] = uf->transaction;
4415 - itd->hw_transaction[uframe] |= cpu_to_hc32(fotg210, pg << 12);
4416 - itd->hw_bufp[pg] |= cpu_to_hc32(fotg210, uf->bufp & ~(u32)0);
4417 - itd->hw_bufp_hi[pg] |= cpu_to_hc32(fotg210, (u32)(uf->bufp >> 32));
4418 -
4419 - /* iso_frame_desc[].offset must be strictly increasing */
4420 - if (unlikely(uf->cross)) {
4421 - u64 bufp = uf->bufp + 4096;
4422 -
4423 - itd->pg = ++pg;
4424 - itd->hw_bufp[pg] |= cpu_to_hc32(fotg210, bufp & ~(u32)0);
4425 - itd->hw_bufp_hi[pg] |= cpu_to_hc32(fotg210, (u32)(bufp >> 32));
4426 - }
4427 -}
4428 -
4429 -static inline void itd_link(struct fotg210_hcd *fotg210, unsigned frame,
4430 - struct fotg210_itd *itd)
4431 -{
4432 - union fotg210_shadow *prev = &fotg210->pshadow[frame];
4433 - __hc32 *hw_p = &fotg210->periodic[frame];
4434 - union fotg210_shadow here = *prev;
4435 - __hc32 type = 0;
4436 -
4437 - /* skip any iso nodes which might belong to previous microframes */
4438 - while (here.ptr) {
4439 - type = Q_NEXT_TYPE(fotg210, *hw_p);
4440 - if (type == cpu_to_hc32(fotg210, Q_TYPE_QH))
4441 - break;
4442 - prev = periodic_next_shadow(fotg210, prev, type);
4443 - hw_p = shadow_next_periodic(fotg210, &here, type);
4444 - here = *prev;
4445 - }
4446 -
4447 - itd->itd_next = here;
4448 - itd->hw_next = *hw_p;
4449 - prev->itd = itd;
4450 - itd->frame = frame;
4451 - wmb();
4452 - *hw_p = cpu_to_hc32(fotg210, itd->itd_dma | Q_TYPE_ITD);
4453 -}
4454 -
4455 -/* fit urb's itds into the selected schedule slot; activate as needed */
4456 -static void itd_link_urb(struct fotg210_hcd *fotg210, struct urb *urb,
4457 - unsigned mod, struct fotg210_iso_stream *stream)
4458 -{
4459 - int packet;
4460 - unsigned next_uframe, uframe, frame;
4461 - struct fotg210_iso_sched *iso_sched = urb->hcpriv;
4462 - struct fotg210_itd *itd;
4463 -
4464 - next_uframe = stream->next_uframe & (mod - 1);
4465 -
4466 - if (unlikely(list_empty(&stream->td_list))) {
4467 - fotg210_to_hcd(fotg210)->self.bandwidth_allocated
4468 - += stream->bandwidth;
4469 - fotg210_dbg(fotg210,
4470 - "schedule devp %s ep%d%s-iso period %d start %d.%d\n",
4471 - urb->dev->devpath, stream->bEndpointAddress & 0x0f,
4472 - (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
4473 - urb->interval,
4474 - next_uframe >> 3, next_uframe & 0x7);
4475 - }
4476 -
4477 - /* fill iTDs uframe by uframe */
4478 - for (packet = 0, itd = NULL; packet < urb->number_of_packets;) {
4479 - if (itd == NULL) {
4480 - /* ASSERT: we have all necessary itds */
4481 -
4482 - /* ASSERT: no itds for this endpoint in this uframe */
4483 -
4484 - itd = list_entry(iso_sched->td_list.next,
4485 - struct fotg210_itd, itd_list);
4486 - list_move_tail(&itd->itd_list, &stream->td_list);
4487 - itd->stream = stream;
4488 - itd->urb = urb;
4489 - itd_init(fotg210, stream, itd);
4490 - }
4491 -
4492 - uframe = next_uframe & 0x07;
4493 - frame = next_uframe >> 3;
4494 -
4495 - itd_patch(fotg210, itd, iso_sched, packet, uframe);
4496 -
4497 - next_uframe += stream->interval;
4498 - next_uframe &= mod - 1;
4499 - packet++;
4500 -
4501 - /* link completed itds into the schedule */
4502 - if (((next_uframe >> 3) != frame)
4503 - || packet == urb->number_of_packets) {
4504 - itd_link(fotg210, frame & (fotg210->periodic_size - 1),
4505 - itd);
4506 - itd = NULL;
4507 - }
4508 - }
4509 - stream->next_uframe = next_uframe;
4510 -
4511 - /* don't need that schedule data any more */
4512 - iso_sched_free(stream, iso_sched);
4513 - urb->hcpriv = NULL;
4514 -
4515 - ++fotg210->isoc_count;
4516 - enable_periodic(fotg210);
4517 -}
4518 -
4519 -#define ISO_ERRS (FOTG210_ISOC_BUF_ERR | FOTG210_ISOC_BABBLE |\
4520 - FOTG210_ISOC_XACTERR)
4521 -
4522 -/* Process and recycle a completed ITD. Return true iff its urb completed,
4523 - * and hence its completion callback probably added things to the hardware
4524 - * schedule.
4525 - *
4526 - * Note that we carefully avoid recycling this descriptor until after any
4527 - * completion callback runs, so that it won't be reused quickly. That is,
4528 - * assuming (a) no more than two urbs per frame on this endpoint, and also
4529 - * (b) only this endpoint's completions submit URBs. It seems some silicon
4530 - * corrupts things if you reuse completed descriptors very quickly...
4531 - */
4532 -static bool itd_complete(struct fotg210_hcd *fotg210, struct fotg210_itd *itd)
4533 -{
4534 - struct urb *urb = itd->urb;
4535 - struct usb_iso_packet_descriptor *desc;
4536 - u32 t;
4537 - unsigned uframe;
4538 - int urb_index = -1;
4539 - struct fotg210_iso_stream *stream = itd->stream;
4540 - struct usb_device *dev;
4541 - bool retval = false;
4542 -
4543 - /* for each uframe with a packet */
4544 - for (uframe = 0; uframe < 8; uframe++) {
4545 - if (likely(itd->index[uframe] == -1))
4546 - continue;
4547 - urb_index = itd->index[uframe];
4548 - desc = &urb->iso_frame_desc[urb_index];
4549 -
4550 - t = hc32_to_cpup(fotg210, &itd->hw_transaction[uframe]);
4551 - itd->hw_transaction[uframe] = 0;
4552 -
4553 - /* report transfer status */
4554 - if (unlikely(t & ISO_ERRS)) {
4555 - urb->error_count++;
4556 - if (t & FOTG210_ISOC_BUF_ERR)
4557 - desc->status = usb_pipein(urb->pipe)
4558 - ? -ENOSR /* hc couldn't read */
4559 - : -ECOMM; /* hc couldn't write */
4560 - else if (t & FOTG210_ISOC_BABBLE)
4561 - desc->status = -EOVERFLOW;
4562 - else /* (t & FOTG210_ISOC_XACTERR) */
4563 - desc->status = -EPROTO;
4564 -
4565 - /* HC need not update length with this error */
4566 - if (!(t & FOTG210_ISOC_BABBLE)) {
4567 - desc->actual_length = FOTG210_ITD_LENGTH(t);
4568 - urb->actual_length += desc->actual_length;
4569 - }
4570 - } else if (likely((t & FOTG210_ISOC_ACTIVE) == 0)) {
4571 - desc->status = 0;
4572 - desc->actual_length = FOTG210_ITD_LENGTH(t);
4573 - urb->actual_length += desc->actual_length;
4574 - } else {
4575 - /* URB was too late */
4576 - desc->status = -EXDEV;
4577 - }
4578 - }
4579 -
4580 - /* handle completion now? */
4581 - if (likely((urb_index + 1) != urb->number_of_packets))
4582 - goto done;
4583 -
4584 - /* ASSERT: it's really the last itd for this urb
4585 - * list_for_each_entry (itd, &stream->td_list, itd_list)
4586 - * BUG_ON (itd->urb == urb);
4587 - */
4588 -
4589 - /* give urb back to the driver; completion often (re)submits */
4590 - dev = urb->dev;
4591 - fotg210_urb_done(fotg210, urb, 0);
4592 - retval = true;
4593 - urb = NULL;
4594 -
4595 - --fotg210->isoc_count;
4596 - disable_periodic(fotg210);
4597 -
4598 - if (unlikely(list_is_singular(&stream->td_list))) {
4599 - fotg210_to_hcd(fotg210)->self.bandwidth_allocated
4600 - -= stream->bandwidth;
4601 - fotg210_dbg(fotg210,
4602 - "deschedule devp %s ep%d%s-iso\n",
4603 - dev->devpath, stream->bEndpointAddress & 0x0f,
4604 - (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
4605 - }
4606 -
4607 -done:
4608 - itd->urb = NULL;
4609 -
4610 - /* Add to the end of the free list for later reuse */
4611 - list_move_tail(&itd->itd_list, &stream->free_list);
4612 -
4613 - /* Recycle the iTDs when the pipeline is empty (ep no longer in use) */
4614 - if (list_empty(&stream->td_list)) {
4615 - list_splice_tail_init(&stream->free_list,
4616 - &fotg210->cached_itd_list);
4617 - start_free_itds(fotg210);
4618 - }
4619 -
4620 - return retval;
4621 -}
4622 -
4623 -static int itd_submit(struct fotg210_hcd *fotg210, struct urb *urb,
4624 - gfp_t mem_flags)
4625 -{
4626 - int status = -EINVAL;
4627 - unsigned long flags;
4628 - struct fotg210_iso_stream *stream;
4629 -
4630 - /* Get iso_stream head */
4631 - stream = iso_stream_find(fotg210, urb);
4632 - if (unlikely(stream == NULL)) {
4633 - fotg210_dbg(fotg210, "can't get iso stream\n");
4634 - return -ENOMEM;
4635 - }
4636 - if (unlikely(urb->interval != stream->interval &&
4637 - fotg210_port_speed(fotg210, 0) ==
4638 - USB_PORT_STAT_HIGH_SPEED)) {
4639 - fotg210_dbg(fotg210, "can't change iso interval %d --> %d\n",
4640 - stream->interval, urb->interval);
4641 - goto done;
4642 - }
4643 -
4644 -#ifdef FOTG210_URB_TRACE
4645 - fotg210_dbg(fotg210,
4646 - "%s %s urb %p ep%d%s len %d, %d pkts %d uframes[%p]\n",
4647 - __func__, urb->dev->devpath, urb,
4648 - usb_pipeendpoint(urb->pipe),
4649 - usb_pipein(urb->pipe) ? "in" : "out",
4650 - urb->transfer_buffer_length,
4651 - urb->number_of_packets, urb->interval,
4652 - stream);
4653 -#endif
4654 -
4655 - /* allocate ITDs w/o locking anything */
4656 - status = itd_urb_transaction(stream, fotg210, urb, mem_flags);
4657 - if (unlikely(status < 0)) {
4658 - fotg210_dbg(fotg210, "can't init itds\n");
4659 - goto done;
4660 - }
4661 -
4662 - /* schedule ... need to lock */
4663 - spin_lock_irqsave(&fotg210->lock, flags);
4664 - if (unlikely(!HCD_HW_ACCESSIBLE(fotg210_to_hcd(fotg210)))) {
4665 - status = -ESHUTDOWN;
4666 - goto done_not_linked;
4667 - }
4668 - status = usb_hcd_link_urb_to_ep(fotg210_to_hcd(fotg210), urb);
4669 - if (unlikely(status))
4670 - goto done_not_linked;
4671 - status = iso_stream_schedule(fotg210, urb, stream);
4672 - if (likely(status == 0))
4673 - itd_link_urb(fotg210, urb, fotg210->periodic_size << 3, stream);
4674 - else
4675 - usb_hcd_unlink_urb_from_ep(fotg210_to_hcd(fotg210), urb);
4676 -done_not_linked:
4677 - spin_unlock_irqrestore(&fotg210->lock, flags);
4678 -done:
4679 - return status;
4680 -}
4681 -
4682 -static inline int scan_frame_queue(struct fotg210_hcd *fotg210, unsigned frame,
4683 - unsigned now_frame, bool live)
4684 -{
4685 - unsigned uf;
4686 - bool modified;
4687 - union fotg210_shadow q, *q_p;
4688 - __hc32 type, *hw_p;
4689 -
4690 - /* scan each element in frame's queue for completions */
4691 - q_p = &fotg210->pshadow[frame];
4692 - hw_p = &fotg210->periodic[frame];
4693 - q.ptr = q_p->ptr;
4694 - type = Q_NEXT_TYPE(fotg210, *hw_p);
4695 - modified = false;
4696 -
4697 - while (q.ptr) {
4698 - switch (hc32_to_cpu(fotg210, type)) {
4699 - case Q_TYPE_ITD:
4700 - /* If this ITD is still active, leave it for
4701 - * later processing ... check the next entry.
4702 - * No need to check for activity unless the
4703 - * frame is current.
4704 - */
4705 - if (frame == now_frame && live) {
4706 - rmb();
4707 - for (uf = 0; uf < 8; uf++) {
4708 - if (q.itd->hw_transaction[uf] &
4709 - ITD_ACTIVE(fotg210))
4710 - break;
4711 - }
4712 - if (uf < 8) {
4713 - q_p = &q.itd->itd_next;
4714 - hw_p = &q.itd->hw_next;
4715 - type = Q_NEXT_TYPE(fotg210,
4716 - q.itd->hw_next);
4717 - q = *q_p;
4718 - break;
4719 - }
4720 - }
4721 -
4722 - /* Take finished ITDs out of the schedule
4723 - * and process them: recycle, maybe report
4724 - * URB completion. HC won't cache the
4725 - * pointer for much longer, if at all.
4726 - */
4727 - *q_p = q.itd->itd_next;
4728 - *hw_p = q.itd->hw_next;
4729 - type = Q_NEXT_TYPE(fotg210, q.itd->hw_next);
4730 - wmb();
4731 - modified = itd_complete(fotg210, q.itd);
4732 - q = *q_p;
4733 - break;
4734 - default:
4735 - fotg210_dbg(fotg210, "corrupt type %d frame %d shadow %p\n",
4736 - type, frame, q.ptr);
4737 - fallthrough;
4738 - case Q_TYPE_QH:
4739 - case Q_TYPE_FSTN:
4740 - /* End of the iTDs and siTDs */
4741 - q.ptr = NULL;
4742 - break;
4743 - }
4744 -
4745 - /* assume completion callbacks modify the queue */
4746 - if (unlikely(modified && fotg210->isoc_count > 0))
4747 - return -EINVAL;
4748 - }
4749 - return 0;
4750 -}
4751 -
4752 -static void scan_isoc(struct fotg210_hcd *fotg210)
4753 -{
4754 - unsigned uf, now_frame, frame, ret;
4755 - unsigned fmask = fotg210->periodic_size - 1;
4756 - bool live;
4757 -
4758 - /*
4759 - * When running, scan from last scan point up to "now"
4760 - * else clean up by scanning everything that's left.
4761 - * Touches as few pages as possible: cache-friendly.
4762 - */
4763 - if (fotg210->rh_state >= FOTG210_RH_RUNNING) {
4764 - uf = fotg210_read_frame_index(fotg210);
4765 - now_frame = (uf >> 3) & fmask;
4766 - live = true;
4767 - } else {
4768 - now_frame = (fotg210->next_frame - 1) & fmask;
4769 - live = false;
4770 - }
4771 - fotg210->now_frame = now_frame;
4772 -
4773 - frame = fotg210->next_frame;
4774 - for (;;) {
4775 - ret = 1;
4776 - while (ret != 0)
4777 - ret = scan_frame_queue(fotg210, frame,
4778 - now_frame, live);
4779 -
4780 - /* Stop when we have reached the current frame */
4781 - if (frame == now_frame)
4782 - break;
4783 - frame = (frame + 1) & fmask;
4784 - }
4785 - fotg210->next_frame = now_frame;
4786 -}
4787 -
4788 -/* Display / Set uframe_periodic_max
4789 - */
4790 -static ssize_t uframe_periodic_max_show(struct device *dev,
4791 - struct device_attribute *attr, char *buf)
4792 -{
4793 - struct fotg210_hcd *fotg210;
4794 - int n;
4795 -
4796 - fotg210 = hcd_to_fotg210(bus_to_hcd(dev_get_drvdata(dev)));
4797 - n = scnprintf(buf, PAGE_SIZE, "%d\n", fotg210->uframe_periodic_max);
4798 - return n;
4799 -}
4800 -
4801 -
4802 -static ssize_t uframe_periodic_max_store(struct device *dev,
4803 - struct device_attribute *attr, const char *buf, size_t count)
4804 -{
4805 - struct fotg210_hcd *fotg210;
4806 - unsigned uframe_periodic_max;
4807 - unsigned frame, uframe;
4808 - unsigned short allocated_max;
4809 - unsigned long flags;
4810 - ssize_t ret;
4811 -
4812 - fotg210 = hcd_to_fotg210(bus_to_hcd(dev_get_drvdata(dev)));
4813 - if (kstrtouint(buf, 0, &uframe_periodic_max) < 0)
4814 - return -EINVAL;
4815 -
4816 - if (uframe_periodic_max < 100 || uframe_periodic_max >= 125) {
4817 - fotg210_info(fotg210, "rejecting invalid request for uframe_periodic_max=%u\n",
4818 - uframe_periodic_max);
4819 - return -EINVAL;
4820 - }
4821 -
4822 - ret = -EINVAL;
4823 -
4824 - /*
4825 - * lock, so that our checking does not race with possible periodic
4826 - * bandwidth allocation through submitting new urbs.
4827 - */
4828 - spin_lock_irqsave(&fotg210->lock, flags);
4829 -
4830 - /*
4831 - * for request to decrease max periodic bandwidth, we have to check
4832 - * every microframe in the schedule to see whether the decrease is
4833 - * possible.
4834 - */
4835 - if (uframe_periodic_max < fotg210->uframe_periodic_max) {
4836 - allocated_max = 0;
4837 -
4838 - for (frame = 0; frame < fotg210->periodic_size; ++frame)
4839 - for (uframe = 0; uframe < 7; ++uframe)
4840 - allocated_max = max(allocated_max,
4841 - periodic_usecs(fotg210, frame,
4842 - uframe));
4843 -
4844 - if (allocated_max > uframe_periodic_max) {
4845 - fotg210_info(fotg210,
4846 - "cannot decrease uframe_periodic_max because periodic bandwidth is already allocated (%u > %u)\n",
4847 - allocated_max, uframe_periodic_max);
4848 - goto out_unlock;
4849 - }
4850 - }
4851 -
4852 - /* increasing is always ok */
4853 -
4854 - fotg210_info(fotg210,
4855 - "setting max periodic bandwidth to %u%% (== %u usec/uframe)\n",
4856 - 100 * uframe_periodic_max/125, uframe_periodic_max);
4857 -
4858 - if (uframe_periodic_max != 100)
4859 - fotg210_warn(fotg210, "max periodic bandwidth set is non-standard\n");
4860 -
4861 - fotg210->uframe_periodic_max = uframe_periodic_max;
4862 - ret = count;
4863 -
4864 -out_unlock:
4865 - spin_unlock_irqrestore(&fotg210->lock, flags);
4866 - return ret;
4867 -}
4868 -
4869 -static DEVICE_ATTR_RW(uframe_periodic_max);
4870 -
4871 -static inline int create_sysfs_files(struct fotg210_hcd *fotg210)
4872 -{
4873 - struct device *controller = fotg210_to_hcd(fotg210)->self.controller;
4874 -
4875 - return device_create_file(controller, &dev_attr_uframe_periodic_max);
4876 -}
4877 -
4878 -static inline void remove_sysfs_files(struct fotg210_hcd *fotg210)
4879 -{
4880 - struct device *controller = fotg210_to_hcd(fotg210)->self.controller;
4881 -
4882 - device_remove_file(controller, &dev_attr_uframe_periodic_max);
4883 -}
4884 -/* On some systems, leaving remote wakeup enabled prevents system shutdown.
4885 - * The firmware seems to think that powering off is a wakeup event!
4886 - * This routine turns off remote wakeup and everything else, on all ports.
4887 - */
4888 -static void fotg210_turn_off_all_ports(struct fotg210_hcd *fotg210)
4889 -{
4890 - u32 __iomem *status_reg = &fotg210->regs->port_status;
4891 -
4892 - fotg210_writel(fotg210, PORT_RWC_BITS, status_reg);
4893 -}
4894 -
4895 -/* Halt HC, turn off all ports, and let the BIOS use the companion controllers.
4896 - * Must be called with interrupts enabled and the lock not held.
4897 - */
4898 -static void fotg210_silence_controller(struct fotg210_hcd *fotg210)
4899 -{
4900 - fotg210_halt(fotg210);
4901 -
4902 - spin_lock_irq(&fotg210->lock);
4903 - fotg210->rh_state = FOTG210_RH_HALTED;
4904 - fotg210_turn_off_all_ports(fotg210);
4905 - spin_unlock_irq(&fotg210->lock);
4906 -}
4907 -
4908 -/* fotg210_shutdown kick in for silicon on any bus (not just pci, etc).
4909 - * This forcibly disables dma and IRQs, helping kexec and other cases
4910 - * where the next system software may expect clean state.
4911 - */
4912 -static void fotg210_shutdown(struct usb_hcd *hcd)
4913 -{
4914 - struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
4915 -
4916 - spin_lock_irq(&fotg210->lock);
4917 - fotg210->shutdown = true;
4918 - fotg210->rh_state = FOTG210_RH_STOPPING;
4919 - fotg210->enabled_hrtimer_events = 0;
4920 - spin_unlock_irq(&fotg210->lock);
4921 -
4922 - fotg210_silence_controller(fotg210);
4923 -
4924 - hrtimer_cancel(&fotg210->hrtimer);
4925 -}
4926 -
4927 -/* fotg210_work is called from some interrupts, timers, and so on.
4928 - * it calls driver completion functions, after dropping fotg210->lock.
4929 - */
4930 -static void fotg210_work(struct fotg210_hcd *fotg210)
4931 -{
4932 - /* another CPU may drop fotg210->lock during a schedule scan while
4933 - * it reports urb completions. this flag guards against bogus
4934 - * attempts at re-entrant schedule scanning.
4935 - */
4936 - if (fotg210->scanning) {
4937 - fotg210->need_rescan = true;
4938 - return;
4939 - }
4940 - fotg210->scanning = true;
4941 -
4942 -rescan:
4943 - fotg210->need_rescan = false;
4944 - if (fotg210->async_count)
4945 - scan_async(fotg210);
4946 - if (fotg210->intr_count > 0)
4947 - scan_intr(fotg210);
4948 - if (fotg210->isoc_count > 0)
4949 - scan_isoc(fotg210);
4950 - if (fotg210->need_rescan)
4951 - goto rescan;
4952 - fotg210->scanning = false;
4953 -
4954 - /* the IO watchdog guards against hardware or driver bugs that
4955 - * misplace IRQs, and should let us run completely without IRQs.
4956 - * such lossage has been observed on both VT6202 and VT8235.
4957 - */
4958 - turn_on_io_watchdog(fotg210);
4959 -}
4960 -
4961 -/* Called when the fotg210_hcd module is removed.
4962 - */
4963 -static void fotg210_stop(struct usb_hcd *hcd)
4964 -{
4965 - struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
4966 -
4967 - fotg210_dbg(fotg210, "stop\n");
4968 -
4969 - /* no more interrupts ... */
4970 -
4971 - spin_lock_irq(&fotg210->lock);
4972 - fotg210->enabled_hrtimer_events = 0;
4973 - spin_unlock_irq(&fotg210->lock);
4974 -
4975 - fotg210_quiesce(fotg210);
4976 - fotg210_silence_controller(fotg210);
4977 - fotg210_reset(fotg210);
4978 -
4979 - hrtimer_cancel(&fotg210->hrtimer);
4980 - remove_sysfs_files(fotg210);
4981 - remove_debug_files(fotg210);
4982 -
4983 - /* root hub is shut down separately (first, when possible) */
4984 - spin_lock_irq(&fotg210->lock);
4985 - end_free_itds(fotg210);
4986 - spin_unlock_irq(&fotg210->lock);
4987 - fotg210_mem_cleanup(fotg210);
4988 -
4989 -#ifdef FOTG210_STATS
4990 - fotg210_dbg(fotg210, "irq normal %ld err %ld iaa %ld (lost %ld)\n",
4991 - fotg210->stats.normal, fotg210->stats.error,
4992 - fotg210->stats.iaa, fotg210->stats.lost_iaa);
4993 - fotg210_dbg(fotg210, "complete %ld unlink %ld\n",
4994 - fotg210->stats.complete, fotg210->stats.unlink);
4995 -#endif
4996 -
4997 - dbg_status(fotg210, "fotg210_stop completed",
4998 - fotg210_readl(fotg210, &fotg210->regs->status));
4999 -}
5000 -
5001 -/* one-time init, only for memory state */
5002 -static int hcd_fotg210_init(struct usb_hcd *hcd)
5003 -{
5004 - struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
5005 - u32 temp;
5006 - int retval;
5007 - u32 hcc_params;
5008 - struct fotg210_qh_hw *hw;
5009 -
5010 - spin_lock_init(&fotg210->lock);
5011 -
5012 - /*
5013 - * keep io watchdog by default, those good HCDs could turn off it later
5014 - */
5015 - fotg210->need_io_watchdog = 1;
5016 -
5017 - hrtimer_init(&fotg210->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
5018 - fotg210->hrtimer.function = fotg210_hrtimer_func;
5019 - fotg210->next_hrtimer_event = FOTG210_HRTIMER_NO_EVENT;
5020 -
5021 - hcc_params = fotg210_readl(fotg210, &fotg210->caps->hcc_params);
5022 -
5023 - /*
5024 - * by default set standard 80% (== 100 usec/uframe) max periodic
5025 - * bandwidth as required by USB 2.0
5026 - */
5027 - fotg210->uframe_periodic_max = 100;
5028 -
5029 - /*
5030 - * hw default: 1K periodic list heads, one per frame.
5031 - * periodic_size can shrink by USBCMD update if hcc_params allows.
5032 - */
5033 - fotg210->periodic_size = DEFAULT_I_TDPS;
5034 - INIT_LIST_HEAD(&fotg210->intr_qh_list);
5035 - INIT_LIST_HEAD(&fotg210->cached_itd_list);
5036 -
5037 - if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
5038 - /* periodic schedule size can be smaller than default */
5039 - switch (FOTG210_TUNE_FLS) {
5040 - case 0:
5041 - fotg210->periodic_size = 1024;
5042 - break;
5043 - case 1:
5044 - fotg210->periodic_size = 512;
5045 - break;
5046 - case 2:
5047 - fotg210->periodic_size = 256;
5048 - break;
5049 - default:
5050 - BUG();
5051 - }
5052 - }
5053 - retval = fotg210_mem_init(fotg210, GFP_KERNEL);
5054 - if (retval < 0)
5055 - return retval;
5056 -
5057 - /* controllers may cache some of the periodic schedule ... */
5058 - fotg210->i_thresh = 2;
5059 -
5060 - /*
5061 - * dedicate a qh for the async ring head, since we couldn't unlink
5062 - * a 'real' qh without stopping the async schedule [4.8]. use it
5063 - * as the 'reclamation list head' too.
5064 - * its dummy is used in hw_alt_next of many tds, to prevent the qh
5065 - * from automatically advancing to the next td after short reads.
5066 - */
5067 - fotg210->async->qh_next.qh = NULL;
5068 - hw = fotg210->async->hw;
5069 - hw->hw_next = QH_NEXT(fotg210, fotg210->async->qh_dma);
5070 - hw->hw_info1 = cpu_to_hc32(fotg210, QH_HEAD);
5071 - hw->hw_token = cpu_to_hc32(fotg210, QTD_STS_HALT);
5072 - hw->hw_qtd_next = FOTG210_LIST_END(fotg210);
5073 - fotg210->async->qh_state = QH_STATE_LINKED;
5074 - hw->hw_alt_next = QTD_NEXT(fotg210, fotg210->async->dummy->qtd_dma);
5075 -
5076 - /* clear interrupt enables, set irq latency */
5077 - if (log2_irq_thresh < 0 || log2_irq_thresh > 6)
5078 - log2_irq_thresh = 0;
5079 - temp = 1 << (16 + log2_irq_thresh);
5080 - if (HCC_CANPARK(hcc_params)) {
5081 - /* HW default park == 3, on hardware that supports it (like
5082 - * NVidia and ALI silicon), maximizes throughput on the async
5083 - * schedule by avoiding QH fetches between transfers.
5084 - *
5085 - * With fast usb storage devices and NForce2, "park" seems to
5086 - * make problems: throughput reduction (!), data errors...
5087 - */
5088 - if (park) {
5089 - park = min_t(unsigned, park, 3);
5090 - temp |= CMD_PARK;
5091 - temp |= park << 8;
5092 - }
5093 - fotg210_dbg(fotg210, "park %d\n", park);
5094 - }
5095 - if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
5096 - /* periodic schedule size can be smaller than default */
5097 - temp &= ~(3 << 2);
5098 - temp |= (FOTG210_TUNE_FLS << 2);
5099 - }
5100 - fotg210->command = temp;
5101 -
5102 - /* Accept arbitrarily long scatter-gather lists */
5103 - if (!hcd->localmem_pool)
5104 - hcd->self.sg_tablesize = ~0;
5105 - return 0;
5106 -}
5107 -
5108 -/* start HC running; it's halted, hcd_fotg210_init() has been run (once) */
5109 -static int fotg210_run(struct usb_hcd *hcd)
5110 -{
5111 - struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
5112 - u32 temp;
5113 -
5114 - hcd->uses_new_polling = 1;
5115 -
5116 - /* EHCI spec section 4.1 */
5117 -
5118 - fotg210_writel(fotg210, fotg210->periodic_dma,
5119 - &fotg210->regs->frame_list);
5120 - fotg210_writel(fotg210, (u32)fotg210->async->qh_dma,
5121 - &fotg210->regs->async_next);
5122 -
5123 - /*
5124 - * hcc_params controls whether fotg210->regs->segment must (!!!)
5125 - * be used; it constrains QH/ITD/SITD and QTD locations.
5126 - * dma_pool consistent memory always uses segment zero.
5127 - * streaming mappings for I/O buffers, like dma_map_single(),
5128 - * can return segments above 4GB, if the device allows.
5129 - *
5130 - * NOTE: the dma mask is visible through dev->dma_mask, so
5131 - * drivers can pass this info along ... like NETIF_F_HIGHDMA,
5132 - * Scsi_Host.highmem_io, and so forth. It's readonly to all
5133 - * host side drivers though.
5134 - */
5135 - fotg210_readl(fotg210, &fotg210->caps->hcc_params);
5136 -
5137 - /*
5138 - * Philips, Intel, and maybe others need CMD_RUN before the
5139 - * root hub will detect new devices (why?); NEC doesn't
5140 - */
5141 - fotg210->command &= ~(CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET);
5142 - fotg210->command |= CMD_RUN;
5143 - fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command);
5144 - dbg_cmd(fotg210, "init", fotg210->command);
5145 -
5146 - /*
5147 - * Start, enabling full USB 2.0 functionality ... usb 1.1 devices
5148 - * are explicitly handed to companion controller(s), so no TT is
5149 - * involved with the root hub. (Except where one is integrated,
5150 - * and there's no companion controller unless maybe for USB OTG.)
5151 - *
5152 - * Turning on the CF flag will transfer ownership of all ports
5153 - * from the companions to the EHCI controller. If any of the
5154 - * companions are in the middle of a port reset at the time, it
5155 - * could cause trouble. Write-locking ehci_cf_port_reset_rwsem
5156 - * guarantees that no resets are in progress. After we set CF,
5157 - * a short delay lets the hardware catch up; new resets shouldn't
5158 - * be started before the port switching actions could complete.
5159 - */
5160 - down_write(&ehci_cf_port_reset_rwsem);
5161 - fotg210->rh_state = FOTG210_RH_RUNNING;
5162 - /* unblock posted writes */
5163 - fotg210_readl(fotg210, &fotg210->regs->command);
5164 - usleep_range(5000, 10000);
5165 - up_write(&ehci_cf_port_reset_rwsem);
5166 - fotg210->last_periodic_enable = ktime_get_real();
5167 -
5168 - temp = HC_VERSION(fotg210,
5169 - fotg210_readl(fotg210, &fotg210->caps->hc_capbase));
5170 - fotg210_info(fotg210,
5171 - "USB %x.%x started, EHCI %x.%02x\n",
5172 - ((fotg210->sbrn & 0xf0) >> 4), (fotg210->sbrn & 0x0f),
5173 - temp >> 8, temp & 0xff);
5174 -
5175 - fotg210_writel(fotg210, INTR_MASK,
5176 - &fotg210->regs->intr_enable); /* Turn On Interrupts */
5177 -
5178 - /* GRR this is run-once init(), being done every time the HC starts.
5179 - * So long as they're part of class devices, we can't do it init()
5180 - * since the class device isn't created that early.
5181 - */
5182 - create_debug_files(fotg210);
5183 - create_sysfs_files(fotg210);
5184 -
5185 - return 0;
5186 -}
5187 -
5188 -static int fotg210_setup(struct usb_hcd *hcd)
5189 -{
5190 - struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
5191 - int retval;
5192 -
5193 - fotg210->regs = (void __iomem *)fotg210->caps +
5194 - HC_LENGTH(fotg210,
5195 - fotg210_readl(fotg210, &fotg210->caps->hc_capbase));
5196 - dbg_hcs_params(fotg210, "reset");
5197 - dbg_hcc_params(fotg210, "reset");
5198 -
5199 - /* cache this readonly data; minimize chip reads */
5200 - fotg210->hcs_params = fotg210_readl(fotg210,
5201 - &fotg210->caps->hcs_params);
5202 -
5203 - fotg210->sbrn = HCD_USB2;
5204 -
5205 - /* data structure init */
5206 - retval = hcd_fotg210_init(hcd);
5207 - if (retval)
5208 - return retval;
5209 -
5210 - retval = fotg210_halt(fotg210);
5211 - if (retval)
5212 - return retval;
5213 -
5214 - fotg210_reset(fotg210);
5215 -
5216 - return 0;
5217 -}
5218 -
5219 -static irqreturn_t fotg210_irq(struct usb_hcd *hcd)
5220 -{
5221 - struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
5222 - u32 status, masked_status, pcd_status = 0, cmd;
5223 - int bh;
5224 -
5225 - spin_lock(&fotg210->lock);
5226 -
5227 - status = fotg210_readl(fotg210, &fotg210->regs->status);
5228 -
5229 - /* e.g. cardbus physical eject */
5230 - if (status == ~(u32) 0) {
5231 - fotg210_dbg(fotg210, "device removed\n");
5232 - goto dead;
5233 - }
5234 -
5235 - /*
5236 - * We don't use STS_FLR, but some controllers don't like it to
5237 - * remain on, so mask it out along with the other status bits.
5238 - */
5239 - masked_status = status & (INTR_MASK | STS_FLR);
5240 -
5241 - /* Shared IRQ? */
5242 - if (!masked_status ||
5243 - unlikely(fotg210->rh_state == FOTG210_RH_HALTED)) {
5244 - spin_unlock(&fotg210->lock);
5245 - return IRQ_NONE;
5246 - }
5247 -
5248 - /* clear (just) interrupts */
5249 - fotg210_writel(fotg210, masked_status, &fotg210->regs->status);
5250 - cmd = fotg210_readl(fotg210, &fotg210->regs->command);
5251 - bh = 0;
5252 -
5253 - /* unrequested/ignored: Frame List Rollover */
5254 - dbg_status(fotg210, "irq", status);
5255 -
5256 - /* INT, ERR, and IAA interrupt rates can be throttled */
5257 -
5258 - /* normal [4.15.1.2] or error [4.15.1.1] completion */
5259 - if (likely((status & (STS_INT|STS_ERR)) != 0)) {
5260 - if (likely((status & STS_ERR) == 0))
5261 - INCR(fotg210->stats.normal);
5262 - else
5263 - INCR(fotg210->stats.error);
5264 - bh = 1;
5265 - }
5266 -
5267 - /* complete the unlinking of some qh [4.15.2.3] */
5268 - if (status & STS_IAA) {
5269 -
5270 - /* Turn off the IAA watchdog */
5271 - fotg210->enabled_hrtimer_events &=
5272 - ~BIT(FOTG210_HRTIMER_IAA_WATCHDOG);
5273 -
5274 - /*
5275 - * Mild optimization: Allow another IAAD to reset the
5276 - * hrtimer, if one occurs before the next expiration.
5277 - * In theory we could always cancel the hrtimer, but
5278 - * tests show that about half the time it will be reset
5279 - * for some other event anyway.
5280 - */
5281 - if (fotg210->next_hrtimer_event == FOTG210_HRTIMER_IAA_WATCHDOG)
5282 - ++fotg210->next_hrtimer_event;
5283 -
5284 - /* guard against (alleged) silicon errata */
5285 - if (cmd & CMD_IAAD)
5286 - fotg210_dbg(fotg210, "IAA with IAAD still set?\n");
5287 - if (fotg210->async_iaa) {
5288 - INCR(fotg210->stats.iaa);
5289 - end_unlink_async(fotg210);
5290 - } else
5291 - fotg210_dbg(fotg210, "IAA with nothing unlinked?\n");
5292 - }
5293 -
5294 - /* remote wakeup [4.3.1] */
5295 - if (status & STS_PCD) {
5296 - int pstatus;
5297 - u32 __iomem *status_reg = &fotg210->regs->port_status;
5298 -
5299 - /* kick root hub later */
5300 - pcd_status = status;
5301 -
5302 - /* resume root hub? */
5303 - if (fotg210->rh_state == FOTG210_RH_SUSPENDED)
5304 - usb_hcd_resume_root_hub(hcd);
5305 -
5306 - pstatus = fotg210_readl(fotg210, status_reg);
5307 -
5308 - if (test_bit(0, &fotg210->suspended_ports) &&
5309 - ((pstatus & PORT_RESUME) ||
5310 - !(pstatus & PORT_SUSPEND)) &&
5311 - (pstatus & PORT_PE) &&
5312 - fotg210->reset_done[0] == 0) {
5313 -
5314 - /* start 20 msec resume signaling from this port,
5315 - * and make hub_wq collect PORT_STAT_C_SUSPEND to
5316 - * stop that signaling. Use 5 ms extra for safety,
5317 - * like usb_port_resume() does.
5318 - */
5319 - fotg210->reset_done[0] = jiffies + msecs_to_jiffies(25);
5320 - set_bit(0, &fotg210->resuming_ports);
5321 - fotg210_dbg(fotg210, "port 1 remote wakeup\n");
5322 - mod_timer(&hcd->rh_timer, fotg210->reset_done[0]);
5323 - }
5324 - }
5325 -
5326 - /* PCI errors [4.15.2.4] */
5327 - if (unlikely((status & STS_FATAL) != 0)) {
5328 - fotg210_err(fotg210, "fatal error\n");
5329 - dbg_cmd(fotg210, "fatal", cmd);
5330 - dbg_status(fotg210, "fatal", status);
5331 -dead:
5332 - usb_hc_died(hcd);
5333 -
5334 - /* Don't let the controller do anything more */
5335 - fotg210->shutdown = true;
5336 - fotg210->rh_state = FOTG210_RH_STOPPING;
5337 - fotg210->command &= ~(CMD_RUN | CMD_ASE | CMD_PSE);
5338 - fotg210_writel(fotg210, fotg210->command,
5339 - &fotg210->regs->command);
5340 - fotg210_writel(fotg210, 0, &fotg210->regs->intr_enable);
5341 - fotg210_handle_controller_death(fotg210);
5342 -
5343 - /* Handle completions when the controller stops */
5344 - bh = 0;
5345 - }
5346 -
5347 - if (bh)
5348 - fotg210_work(fotg210);
5349 - spin_unlock(&fotg210->lock);
5350 - if (pcd_status)
5351 - usb_hcd_poll_rh_status(hcd);
5352 - return IRQ_HANDLED;
5353 -}
5354 -
5355 -/* non-error returns are a promise to giveback() the urb later
5356 - * we drop ownership so next owner (or urb unlink) can get it
5357 - *
5358 - * urb + dev is in hcd.self.controller.urb_list
5359 - * we're queueing TDs onto software and hardware lists
5360 - *
5361 - * hcd-specific init for hcpriv hasn't been done yet
5362 - *
5363 - * NOTE: control, bulk, and interrupt share the same code to append TDs
5364 - * to a (possibly active) QH, and the same QH scanning code.
5365 - */
5366 -static int fotg210_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
5367 - gfp_t mem_flags)
5368 -{
5369 - struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
5370 - struct list_head qtd_list;
5371 -
5372 - INIT_LIST_HEAD(&qtd_list);
5373 -
5374 - switch (usb_pipetype(urb->pipe)) {
5375 - case PIPE_CONTROL:
5376 - /* qh_completions() code doesn't handle all the fault cases
5377 - * in multi-TD control transfers. Even 1KB is rare anyway.
5378 - */
5379 - if (urb->transfer_buffer_length > (16 * 1024))
5380 - return -EMSGSIZE;
5381 - fallthrough;
5382 - /* case PIPE_BULK: */
5383 - default:
5384 - if (!qh_urb_transaction(fotg210, urb, &qtd_list, mem_flags))
5385 - return -ENOMEM;
5386 - return submit_async(fotg210, urb, &qtd_list, mem_flags);
5387 -
5388 - case PIPE_INTERRUPT:
5389 - if (!qh_urb_transaction(fotg210, urb, &qtd_list, mem_flags))
5390 - return -ENOMEM;
5391 - return intr_submit(fotg210, urb, &qtd_list, mem_flags);
5392 -
5393 - case PIPE_ISOCHRONOUS:
5394 - return itd_submit(fotg210, urb, mem_flags);
5395 - }
5396 -}
5397 -
5398 -/* remove from hardware lists
5399 - * completions normally happen asynchronously
5400 - */
5401 -
5402 -static int fotg210_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
5403 -{
5404 - struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
5405 - struct fotg210_qh *qh;
5406 - unsigned long flags;
5407 - int rc;
5408 -
5409 - spin_lock_irqsave(&fotg210->lock, flags);
5410 - rc = usb_hcd_check_unlink_urb(hcd, urb, status);
5411 - if (rc)
5412 - goto done;
5413 -
5414 - switch (usb_pipetype(urb->pipe)) {
5415 - /* case PIPE_CONTROL: */
5416 - /* case PIPE_BULK:*/
5417 - default:
5418 - qh = (struct fotg210_qh *) urb->hcpriv;
5419 - if (!qh)
5420 - break;
5421 - switch (qh->qh_state) {
5422 - case QH_STATE_LINKED:
5423 - case QH_STATE_COMPLETING:
5424 - start_unlink_async(fotg210, qh);
5425 - break;
5426 - case QH_STATE_UNLINK:
5427 - case QH_STATE_UNLINK_WAIT:
5428 - /* already started */
5429 - break;
5430 - case QH_STATE_IDLE:
5431 - /* QH might be waiting for a Clear-TT-Buffer */
5432 - qh_completions(fotg210, qh);
5433 - break;
5434 - }
5435 - break;
5436 -
5437 - case PIPE_INTERRUPT:
5438 - qh = (struct fotg210_qh *) urb->hcpriv;
5439 - if (!qh)
5440 - break;
5441 - switch (qh->qh_state) {
5442 - case QH_STATE_LINKED:
5443 - case QH_STATE_COMPLETING:
5444 - start_unlink_intr(fotg210, qh);
5445 - break;
5446 - case QH_STATE_IDLE:
5447 - qh_completions(fotg210, qh);
5448 - break;
5449 - default:
5450 - fotg210_dbg(fotg210, "bogus qh %p state %d\n",
5451 - qh, qh->qh_state);
5452 - goto done;
5453 - }
5454 - break;
5455 -
5456 - case PIPE_ISOCHRONOUS:
5457 - /* itd... */
5458 -
5459 - /* wait till next completion, do it then. */
5460 - /* completion irqs can wait up to 1024 msec, */
5461 - break;
5462 - }
5463 -done:
5464 - spin_unlock_irqrestore(&fotg210->lock, flags);
5465 - return rc;
5466 -}
5467 -
5468 -/* bulk qh holds the data toggle */
5469 -
5470 -static void fotg210_endpoint_disable(struct usb_hcd *hcd,
5471 - struct usb_host_endpoint *ep)
5472 -{
5473 - struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
5474 - unsigned long flags;
5475 - struct fotg210_qh *qh, *tmp;
5476 -
5477 - /* ASSERT: any requests/urbs are being unlinked */
5478 - /* ASSERT: nobody can be submitting urbs for this any more */
5479 -
5480 -rescan:
5481 - spin_lock_irqsave(&fotg210->lock, flags);
5482 - qh = ep->hcpriv;
5483 - if (!qh)
5484 - goto done;
5485 -
5486 - /* endpoints can be iso streams. for now, we don't
5487 - * accelerate iso completions ... so spin a while.
5488 - */
5489 - if (qh->hw == NULL) {
5490 - struct fotg210_iso_stream *stream = ep->hcpriv;
5491 -
5492 - if (!list_empty(&stream->td_list))
5493 - goto idle_timeout;
5494 -
5495 - /* BUG_ON(!list_empty(&stream->free_list)); */
5496 - kfree(stream);
5497 - goto done;
5498 - }
5499 -
5500 - if (fotg210->rh_state < FOTG210_RH_RUNNING)
5501 - qh->qh_state = QH_STATE_IDLE;
5502 - switch (qh->qh_state) {
5503 - case QH_STATE_LINKED:
5504 - case QH_STATE_COMPLETING:
5505 - for (tmp = fotg210->async->qh_next.qh;
5506 - tmp && tmp != qh;
5507 - tmp = tmp->qh_next.qh)
5508 - continue;
5509 - /* periodic qh self-unlinks on empty, and a COMPLETING qh
5510 - * may already be unlinked.
5511 - */
5512 - if (tmp)
5513 - start_unlink_async(fotg210, qh);
5514 - fallthrough;
5515 - case QH_STATE_UNLINK: /* wait for hw to finish? */
5516 - case QH_STATE_UNLINK_WAIT:
5517 -idle_timeout:
5518 - spin_unlock_irqrestore(&fotg210->lock, flags);
5519 - schedule_timeout_uninterruptible(1);
5520 - goto rescan;
5521 - case QH_STATE_IDLE: /* fully unlinked */
5522 - if (qh->clearing_tt)
5523 - goto idle_timeout;
5524 - if (list_empty(&qh->qtd_list)) {
5525 - qh_destroy(fotg210, qh);
5526 - break;
5527 - }
5528 - fallthrough;
5529 - default:
5530 - /* caller was supposed to have unlinked any requests;
5531 - * that's not our job. just leak this memory.
5532 - */
5533 - fotg210_err(fotg210, "qh %p (#%02x) state %d%s\n",
5534 - qh, ep->desc.bEndpointAddress, qh->qh_state,
5535 - list_empty(&qh->qtd_list) ? "" : "(has tds)");
5536 - break;
5537 - }
5538 -done:
5539 - ep->hcpriv = NULL;
5540 - spin_unlock_irqrestore(&fotg210->lock, flags);
5541 -}
5542 -
5543 -static void fotg210_endpoint_reset(struct usb_hcd *hcd,
5544 - struct usb_host_endpoint *ep)
5545 -{
5546 - struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
5547 - struct fotg210_qh *qh;
5548 - int eptype = usb_endpoint_type(&ep->desc);
5549 - int epnum = usb_endpoint_num(&ep->desc);
5550 - int is_out = usb_endpoint_dir_out(&ep->desc);
5551 - unsigned long flags;
5552 -
5553 - if (eptype != USB_ENDPOINT_XFER_BULK && eptype != USB_ENDPOINT_XFER_INT)
5554 - return;
5555 -
5556 - spin_lock_irqsave(&fotg210->lock, flags);
5557 - qh = ep->hcpriv;
5558 -
5559 - /* For Bulk and Interrupt endpoints we maintain the toggle state
5560 - * in the hardware; the toggle bits in udev aren't used at all.
5561 - * When an endpoint is reset by usb_clear_halt() we must reset
5562 - * the toggle bit in the QH.
5563 - */
5564 - if (qh) {
5565 - usb_settoggle(qh->dev, epnum, is_out, 0);
5566 - if (!list_empty(&qh->qtd_list)) {
5567 - WARN_ONCE(1, "clear_halt for a busy endpoint\n");
5568 - } else if (qh->qh_state == QH_STATE_LINKED ||
5569 - qh->qh_state == QH_STATE_COMPLETING) {
5570 -
5571 - /* The toggle value in the QH can't be updated
5572 - * while the QH is active. Unlink it now;
5573 - * re-linking will call qh_refresh().
5574 - */
5575 - if (eptype == USB_ENDPOINT_XFER_BULK)
5576 - start_unlink_async(fotg210, qh);
5577 - else
5578 - start_unlink_intr(fotg210, qh);
5579 - }
5580 - }
5581 - spin_unlock_irqrestore(&fotg210->lock, flags);
5582 -}
5583 -
5584 -static int fotg210_get_frame(struct usb_hcd *hcd)
5585 -{
5586 - struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
5587 -
5588 - return (fotg210_read_frame_index(fotg210) >> 3) %
5589 - fotg210->periodic_size;
5590 -}
5591 -
5592 -/* The EHCI in ChipIdea HDRC cannot be a separate module or device,
5593 - * because its registers (and irq) are shared between host/gadget/otg
5594 - * functions and in order to facilitate role switching we cannot
5595 - * give the fotg210 driver exclusive access to those.
5596 - */
5597 -MODULE_DESCRIPTION(DRIVER_DESC);
5598 -MODULE_AUTHOR(DRIVER_AUTHOR);
5599 -MODULE_LICENSE("GPL");
5600 -
5601 -static const struct hc_driver fotg210_fotg210_hc_driver = {
5602 - .description = hcd_name,
5603 - .product_desc = "Faraday USB2.0 Host Controller",
5604 - .hcd_priv_size = sizeof(struct fotg210_hcd),
5605 -
5606 - /*
5607 - * generic hardware linkage
5608 - */
5609 - .irq = fotg210_irq,
5610 - .flags = HCD_MEMORY | HCD_DMA | HCD_USB2,
5611 -
5612 - /*
5613 - * basic lifecycle operations
5614 - */
5615 - .reset = hcd_fotg210_init,
5616 - .start = fotg210_run,
5617 - .stop = fotg210_stop,
5618 - .shutdown = fotg210_shutdown,
5619 -
5620 - /*
5621 - * managing i/o requests and associated device resources
5622 - */
5623 - .urb_enqueue = fotg210_urb_enqueue,
5624 - .urb_dequeue = fotg210_urb_dequeue,
5625 - .endpoint_disable = fotg210_endpoint_disable,
5626 - .endpoint_reset = fotg210_endpoint_reset,
5627 -
5628 - /*
5629 - * scheduling support
5630 - */
5631 - .get_frame_number = fotg210_get_frame,
5632 -
5633 - /*
5634 - * root hub support
5635 - */
5636 - .hub_status_data = fotg210_hub_status_data,
5637 - .hub_control = fotg210_hub_control,
5638 - .bus_suspend = fotg210_bus_suspend,
5639 - .bus_resume = fotg210_bus_resume,
5640 -
5641 - .relinquish_port = fotg210_relinquish_port,
5642 - .port_handed_over = fotg210_port_handed_over,
5643 -
5644 - .clear_tt_buffer_complete = fotg210_clear_tt_buffer_complete,
5645 -};
5646 -
5647 -static void fotg210_init(struct fotg210_hcd *fotg210)
5648 -{
5649 - u32 value;
5650 -
5651 - iowrite32(GMIR_MDEV_INT | GMIR_MOTG_INT | GMIR_INT_POLARITY,
5652 - &fotg210->regs->gmir);
5653 -
5654 - value = ioread32(&fotg210->regs->otgcsr);
5655 - value &= ~OTGCSR_A_BUS_DROP;
5656 - value |= OTGCSR_A_BUS_REQ;
5657 - iowrite32(value, &fotg210->regs->otgcsr);
5658 -}
5659 -
5660 -/*
5661 - * fotg210_hcd_probe - initialize faraday FOTG210 HCDs
5662 - *
5663 - * Allocates basic resources for this USB host controller, and
5664 - * then invokes the start() method for the HCD associated with it
5665 - * through the hotplug entry's driver_data.
5666 - */
5667 -static int fotg210_hcd_probe(struct platform_device *pdev)
5668 -{
5669 - struct device *dev = &pdev->dev;
5670 - struct usb_hcd *hcd;
5671 - struct resource *res;
5672 - int irq;
5673 - int retval;
5674 - struct fotg210_hcd *fotg210;
5675 -
5676 - if (usb_disabled())
5677 - return -ENODEV;
5678 -
5679 - pdev->dev.power.power_state = PMSG_ON;
5680 -
5681 - irq = platform_get_irq(pdev, 0);
5682 - if (irq < 0)
5683 - return irq;
5684 -
5685 - hcd = usb_create_hcd(&fotg210_fotg210_hc_driver, dev,
5686 - dev_name(dev));
5687 - if (!hcd) {
5688 - dev_err(dev, "failed to create hcd\n");
5689 - retval = -ENOMEM;
5690 - goto fail_create_hcd;
5691 - }
5692 -
5693 - hcd->has_tt = 1;
5694 -
5695 - res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
5696 - hcd->regs = devm_ioremap_resource(&pdev->dev, res);
5697 - if (IS_ERR(hcd->regs)) {
5698 - retval = PTR_ERR(hcd->regs);
5699 - goto failed_put_hcd;
5700 - }
5701 -
5702 - hcd->rsrc_start = res->start;
5703 - hcd->rsrc_len = resource_size(res);
5704 -
5705 - fotg210 = hcd_to_fotg210(hcd);
5706 -
5707 - fotg210->caps = hcd->regs;
5708 -
5709 - /* It's OK not to supply this clock */
5710 - fotg210->pclk = clk_get(dev, "PCLK");
5711 - if (!IS_ERR(fotg210->pclk)) {
5712 - retval = clk_prepare_enable(fotg210->pclk);
5713 - if (retval) {
5714 - dev_err(dev, "failed to enable PCLK\n");
5715 - goto failed_put_hcd;
5716 - }
5717 - } else if (PTR_ERR(fotg210->pclk) == -EPROBE_DEFER) {
5718 - /*
5719 - * Percolate deferrals, for anything else,
5720 - * just live without the clocking.
5721 - */
5722 - retval = PTR_ERR(fotg210->pclk);
5723 - goto failed_dis_clk;
5724 - }
5725 -
5726 - retval = fotg210_setup(hcd);
5727 - if (retval)
5728 - goto failed_dis_clk;
5729 -
5730 - fotg210_init(fotg210);
5731 -
5732 - retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
5733 - if (retval) {
5734 - dev_err(dev, "failed to add hcd with err %d\n", retval);
5735 - goto failed_dis_clk;
5736 - }
5737 - device_wakeup_enable(hcd->self.controller);
5738 - platform_set_drvdata(pdev, hcd);
5739 -
5740 - return retval;
5741 -
5742 -failed_dis_clk:
5743 - if (!IS_ERR(fotg210->pclk)) {
5744 - clk_disable_unprepare(fotg210->pclk);
5745 - clk_put(fotg210->pclk);
5746 - }
5747 -failed_put_hcd:
5748 - usb_put_hcd(hcd);
5749 -fail_create_hcd:
5750 - dev_err(dev, "init %s fail, %d\n", dev_name(dev), retval);
5751 - return retval;
5752 -}
5753 -
5754 -/*
5755 - * fotg210_hcd_remove - shutdown processing for EHCI HCDs
5756 - * @dev: USB Host Controller being removed
5757 - *
5758 - */
5759 -static int fotg210_hcd_remove(struct platform_device *pdev)
5760 -{
5761 - struct usb_hcd *hcd = platform_get_drvdata(pdev);
5762 - struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
5763 -
5764 - if (!IS_ERR(fotg210->pclk)) {
5765 - clk_disable_unprepare(fotg210->pclk);
5766 - clk_put(fotg210->pclk);
5767 - }
5768 -
5769 - usb_remove_hcd(hcd);
5770 - usb_put_hcd(hcd);
5771 -
5772 - return 0;
5773 -}
5774 -
5775 -#ifdef CONFIG_OF
5776 -static const struct of_device_id fotg210_of_match[] = {
5777 - { .compatible = "faraday,fotg210" },
5778 - {},
5779 -};
5780 -MODULE_DEVICE_TABLE(of, fotg210_of_match);
5781 -#endif
5782 -
5783 -static struct platform_driver fotg210_hcd_driver = {
5784 - .driver = {
5785 - .name = "fotg210-hcd",
5786 - .of_match_table = of_match_ptr(fotg210_of_match),
5787 - },
5788 - .probe = fotg210_hcd_probe,
5789 - .remove = fotg210_hcd_remove,
5790 -};
5791 -
5792 -static int __init fotg210_hcd_init(void)
5793 -{
5794 - int retval = 0;
5795 -
5796 - if (usb_disabled())
5797 - return -ENODEV;
5798 -
5799 - set_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
5800 - if (test_bit(USB_UHCI_LOADED, &usb_hcds_loaded) ||
5801 - test_bit(USB_OHCI_LOADED, &usb_hcds_loaded))
5802 - pr_warn("Warning! fotg210_hcd should always be loaded before uhci_hcd and ohci_hcd, not after\n");
5803 -
5804 - pr_debug("%s: block sizes: qh %zd qtd %zd itd %zd\n",
5805 - hcd_name, sizeof(struct fotg210_qh),
5806 - sizeof(struct fotg210_qtd),
5807 - sizeof(struct fotg210_itd));
5808 -
5809 - fotg210_debug_root = debugfs_create_dir("fotg210", usb_debug_root);
5810 -
5811 - retval = platform_driver_register(&fotg210_hcd_driver);
5812 - if (retval < 0)
5813 - goto clean;
5814 - return retval;
5815 -
5816 -clean:
5817 - debugfs_remove(fotg210_debug_root);
5818 - fotg210_debug_root = NULL;
5819 -
5820 - clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
5821 - return retval;
5822 -}
5823 -module_init(fotg210_hcd_init);
5824 -
5825 -static void __exit fotg210_hcd_cleanup(void)
5826 -{
5827 - platform_driver_unregister(&fotg210_hcd_driver);
5828 - debugfs_remove(fotg210_debug_root);
5829 - clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
5830 -}
5831 -module_exit(fotg210_hcd_cleanup);
5832 --- /dev/null
5833 +++ b/drivers/usb/fotg210/fotg210-hcd.c
5834 @@ -0,0 +1,5727 @@
5835 +// SPDX-License-Identifier: GPL-2.0+
5836 +/* Faraday FOTG210 EHCI-like driver
5837 + *
5838 + * Copyright (c) 2013 Faraday Technology Corporation
5839 + *
5840 + * Author: Yuan-Hsin Chen <yhchen@faraday-tech.com>
5841 + * Feng-Hsin Chiang <john453@faraday-tech.com>
5842 + * Po-Yu Chuang <ratbert.chuang@gmail.com>
5843 + *
5844 + * Most of code borrowed from the Linux-3.7 EHCI driver
5845 + */
5846 +#include <linux/module.h>
5847 +#include <linux/of.h>
5848 +#include <linux/device.h>
5849 +#include <linux/dmapool.h>
5850 +#include <linux/kernel.h>
5851 +#include <linux/delay.h>
5852 +#include <linux/ioport.h>
5853 +#include <linux/sched.h>
5854 +#include <linux/vmalloc.h>
5855 +#include <linux/errno.h>
5856 +#include <linux/init.h>
5857 +#include <linux/hrtimer.h>
5858 +#include <linux/list.h>
5859 +#include <linux/interrupt.h>
5860 +#include <linux/usb.h>
5861 +#include <linux/usb/hcd.h>
5862 +#include <linux/moduleparam.h>
5863 +#include <linux/dma-mapping.h>
5864 +#include <linux/debugfs.h>
5865 +#include <linux/slab.h>
5866 +#include <linux/uaccess.h>
5867 +#include <linux/platform_device.h>
5868 +#include <linux/io.h>
5869 +#include <linux/iopoll.h>
5870 +#include <linux/clk.h>
5871 +
5872 +#include <asm/byteorder.h>
5873 +#include <asm/irq.h>
5874 +#include <asm/unaligned.h>
5875 +
5876 +#define DRIVER_AUTHOR "Yuan-Hsin Chen"
5877 +#define DRIVER_DESC "FOTG210 Host Controller (EHCI) Driver"
5878 +static const char hcd_name[] = "fotg210_hcd";
5879 +
5880 +#undef FOTG210_URB_TRACE
5881 +#define FOTG210_STATS
5882 +
5883 +/* magic numbers that can affect system performance */
5884 +#define FOTG210_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */
5885 +#define FOTG210_TUNE_RL_HS 4 /* nak throttle; see 4.9 */
5886 +#define FOTG210_TUNE_RL_TT 0
5887 +#define FOTG210_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */
5888 +#define FOTG210_TUNE_MULT_TT 1
5889 +
5890 +/* Some drivers think it's safe to schedule isochronous transfers more than 256
5891 + * ms into the future (partly as a result of an old bug in the scheduling
5892 + * code). In an attempt to avoid trouble, we will use a minimum scheduling
5893 + * length of 512 frames instead of 256.
5894 + */
5895 +#define FOTG210_TUNE_FLS 1 /* (medium) 512-frame schedule */
5896 +
5897 +/* Initial IRQ latency: faster than hw default */
5898 +static int log2_irq_thresh; /* 0 to 6 */
5899 +module_param(log2_irq_thresh, int, S_IRUGO);
5900 +MODULE_PARM_DESC(log2_irq_thresh, "log2 IRQ latency, 1-64 microframes");
5901 +
5902 +/* initial park setting: slower than hw default */
5903 +static unsigned park;
5904 +module_param(park, uint, S_IRUGO);
5905 +MODULE_PARM_DESC(park, "park setting; 1-3 back-to-back async packets");
5906 +
5907 +/* for link power management(LPM) feature */
5908 +static unsigned int hird;
5909 +module_param(hird, int, S_IRUGO);
5910 +MODULE_PARM_DESC(hird, "host initiated resume duration, +1 for each 75us");
5911 +
5912 +#define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
5913 +
5914 +#include "fotg210-hcd.h"
5915 +
5916 +#define fotg210_dbg(fotg210, fmt, args...) \
5917 + dev_dbg(fotg210_to_hcd(fotg210)->self.controller, fmt, ## args)
5918 +#define fotg210_err(fotg210, fmt, args...) \
5919 + dev_err(fotg210_to_hcd(fotg210)->self.controller, fmt, ## args)
5920 +#define fotg210_info(fotg210, fmt, args...) \
5921 + dev_info(fotg210_to_hcd(fotg210)->self.controller, fmt, ## args)
5922 +#define fotg210_warn(fotg210, fmt, args...) \
5923 + dev_warn(fotg210_to_hcd(fotg210)->self.controller, fmt, ## args)
5924 +
5925 +/* check the values in the HCSPARAMS register (host controller _Structural_
5926 + * parameters) see EHCI spec, Table 2-4 for each value
5927 + */
5928 +static void dbg_hcs_params(struct fotg210_hcd *fotg210, char *label)
5929 +{
5930 + u32 params = fotg210_readl(fotg210, &fotg210->caps->hcs_params);
5931 +
5932 + fotg210_dbg(fotg210, "%s hcs_params 0x%x ports=%d\n", label, params,
5933 + HCS_N_PORTS(params));
5934 +}
5935 +
5936 +/* check the values in the HCCPARAMS register (host controller _Capability_
5937 + * parameters) see EHCI Spec, Table 2-5 for each value
5938 + */
5939 +static void dbg_hcc_params(struct fotg210_hcd *fotg210, char *label)
5940 +{
5941 + u32 params = fotg210_readl(fotg210, &fotg210->caps->hcc_params);
5942 +
5943 + fotg210_dbg(fotg210, "%s hcc_params %04x uframes %s%s\n", label,
5944 + params,
5945 + HCC_PGM_FRAMELISTLEN(params) ? "256/512/1024" : "1024",
5946 + HCC_CANPARK(params) ? " park" : "");
5947 +}
5948 +
5949 +static void __maybe_unused
5950 +dbg_qtd(const char *label, struct fotg210_hcd *fotg210, struct fotg210_qtd *qtd)
5951 +{
5952 + fotg210_dbg(fotg210, "%s td %p n%08x %08x t%08x p0=%08x\n", label, qtd,
5953 + hc32_to_cpup(fotg210, &qtd->hw_next),
5954 + hc32_to_cpup(fotg210, &qtd->hw_alt_next),
5955 + hc32_to_cpup(fotg210, &qtd->hw_token),
5956 + hc32_to_cpup(fotg210, &qtd->hw_buf[0]));
5957 + if (qtd->hw_buf[1])
5958 + fotg210_dbg(fotg210, " p1=%08x p2=%08x p3=%08x p4=%08x\n",
5959 + hc32_to_cpup(fotg210, &qtd->hw_buf[1]),
5960 + hc32_to_cpup(fotg210, &qtd->hw_buf[2]),
5961 + hc32_to_cpup(fotg210, &qtd->hw_buf[3]),
5962 + hc32_to_cpup(fotg210, &qtd->hw_buf[4]));
5963 +}
5964 +
5965 +static void __maybe_unused
5966 +dbg_qh(const char *label, struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
5967 +{
5968 + struct fotg210_qh_hw *hw = qh->hw;
5969 +
5970 + fotg210_dbg(fotg210, "%s qh %p n%08x info %x %x qtd %x\n", label, qh,
5971 + hw->hw_next, hw->hw_info1, hw->hw_info2,
5972 + hw->hw_current);
5973 +
5974 + dbg_qtd("overlay", fotg210, (struct fotg210_qtd *) &hw->hw_qtd_next);
5975 +}
5976 +
5977 +static void __maybe_unused
5978 +dbg_itd(const char *label, struct fotg210_hcd *fotg210, struct fotg210_itd *itd)
5979 +{
5980 + fotg210_dbg(fotg210, "%s[%d] itd %p, next %08x, urb %p\n", label,
5981 + itd->frame, itd, hc32_to_cpu(fotg210, itd->hw_next),
5982 + itd->urb);
5983 +
5984 + fotg210_dbg(fotg210,
5985 + " trans: %08x %08x %08x %08x %08x %08x %08x %08x\n",
5986 + hc32_to_cpu(fotg210, itd->hw_transaction[0]),
5987 + hc32_to_cpu(fotg210, itd->hw_transaction[1]),
5988 + hc32_to_cpu(fotg210, itd->hw_transaction[2]),
5989 + hc32_to_cpu(fotg210, itd->hw_transaction[3]),
5990 + hc32_to_cpu(fotg210, itd->hw_transaction[4]),
5991 + hc32_to_cpu(fotg210, itd->hw_transaction[5]),
5992 + hc32_to_cpu(fotg210, itd->hw_transaction[6]),
5993 + hc32_to_cpu(fotg210, itd->hw_transaction[7]));
5994 +
5995 + fotg210_dbg(fotg210,
5996 + " buf: %08x %08x %08x %08x %08x %08x %08x\n",
5997 + hc32_to_cpu(fotg210, itd->hw_bufp[0]),
5998 + hc32_to_cpu(fotg210, itd->hw_bufp[1]),
5999 + hc32_to_cpu(fotg210, itd->hw_bufp[2]),
6000 + hc32_to_cpu(fotg210, itd->hw_bufp[3]),
6001 + hc32_to_cpu(fotg210, itd->hw_bufp[4]),
6002 + hc32_to_cpu(fotg210, itd->hw_bufp[5]),
6003 + hc32_to_cpu(fotg210, itd->hw_bufp[6]));
6004 +
6005 + fotg210_dbg(fotg210, " index: %d %d %d %d %d %d %d %d\n",
6006 + itd->index[0], itd->index[1], itd->index[2],
6007 + itd->index[3], itd->index[4], itd->index[5],
6008 + itd->index[6], itd->index[7]);
6009 +}
6010 +
6011 +static int __maybe_unused
6012 +dbg_status_buf(char *buf, unsigned len, const char *label, u32 status)
6013 +{
6014 + return scnprintf(buf, len, "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s",
6015 + label, label[0] ? " " : "", status,
6016 + (status & STS_ASS) ? " Async" : "",
6017 + (status & STS_PSS) ? " Periodic" : "",
6018 + (status & STS_RECL) ? " Recl" : "",
6019 + (status & STS_HALT) ? " Halt" : "",
6020 + (status & STS_IAA) ? " IAA" : "",
6021 + (status & STS_FATAL) ? " FATAL" : "",
6022 + (status & STS_FLR) ? " FLR" : "",
6023 + (status & STS_PCD) ? " PCD" : "",
6024 + (status & STS_ERR) ? " ERR" : "",
6025 + (status & STS_INT) ? " INT" : "");
6026 +}
6027 +
6028 +static int __maybe_unused
6029 +dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable)
6030 +{
6031 + return scnprintf(buf, len, "%s%sintrenable %02x%s%s%s%s%s%s",
6032 + label, label[0] ? " " : "", enable,
6033 + (enable & STS_IAA) ? " IAA" : "",
6034 + (enable & STS_FATAL) ? " FATAL" : "",
6035 + (enable & STS_FLR) ? " FLR" : "",
6036 + (enable & STS_PCD) ? " PCD" : "",
6037 + (enable & STS_ERR) ? " ERR" : "",
6038 + (enable & STS_INT) ? " INT" : "");
6039 +}
6040 +
6041 +static const char *const fls_strings[] = { "1024", "512", "256", "??" };
6042 +
6043 +static int dbg_command_buf(char *buf, unsigned len, const char *label,
6044 + u32 command)
6045 +{
6046 + return scnprintf(buf, len,
6047 + "%s%scommand %07x %s=%d ithresh=%d%s%s%s period=%s%s %s",
6048 + label, label[0] ? " " : "", command,
6049 + (command & CMD_PARK) ? " park" : "(park)",
6050 + CMD_PARK_CNT(command),
6051 + (command >> 16) & 0x3f,
6052 + (command & CMD_IAAD) ? " IAAD" : "",
6053 + (command & CMD_ASE) ? " Async" : "",
6054 + (command & CMD_PSE) ? " Periodic" : "",
6055 + fls_strings[(command >> 2) & 0x3],
6056 + (command & CMD_RESET) ? " Reset" : "",
6057 + (command & CMD_RUN) ? "RUN" : "HALT");
6058 +}
6059 +
6060 +static char *dbg_port_buf(char *buf, unsigned len, const char *label, int port,
6061 + u32 status)
6062 +{
6063 + char *sig;
6064 +
6065 + /* signaling state */
6066 + switch (status & (3 << 10)) {
6067 + case 0 << 10:
6068 + sig = "se0";
6069 + break;
6070 + case 1 << 10:
6071 + sig = "k";
6072 + break; /* low speed */
6073 + case 2 << 10:
6074 + sig = "j";
6075 + break;
6076 + default:
6077 + sig = "?";
6078 + break;
6079 + }
6080 +
6081 + scnprintf(buf, len, "%s%sport:%d status %06x %d sig=%s%s%s%s%s%s%s%s",
6082 + label, label[0] ? " " : "", port, status,
6083 + status >> 25, /*device address */
6084 + sig,
6085 + (status & PORT_RESET) ? " RESET" : "",
6086 + (status & PORT_SUSPEND) ? " SUSPEND" : "",
6087 + (status & PORT_RESUME) ? " RESUME" : "",
6088 + (status & PORT_PEC) ? " PEC" : "",
6089 + (status & PORT_PE) ? " PE" : "",
6090 + (status & PORT_CSC) ? " CSC" : "",
6091 + (status & PORT_CONNECT) ? " CONNECT" : "");
6092 +
6093 + return buf;
6094 +}
6095 +
6096 +/* functions have the "wrong" filename when they're output... */
6097 +#define dbg_status(fotg210, label, status) { \
6098 + char _buf[80]; \
6099 + dbg_status_buf(_buf, sizeof(_buf), label, status); \
6100 + fotg210_dbg(fotg210, "%s\n", _buf); \
6101 +}
6102 +
6103 +#define dbg_cmd(fotg210, label, command) { \
6104 + char _buf[80]; \
6105 + dbg_command_buf(_buf, sizeof(_buf), label, command); \
6106 + fotg210_dbg(fotg210, "%s\n", _buf); \
6107 +}
6108 +
6109 +#define dbg_port(fotg210, label, port, status) { \
6110 + char _buf[80]; \
6111 + fotg210_dbg(fotg210, "%s\n", \
6112 + dbg_port_buf(_buf, sizeof(_buf), label, port, status));\
6113 +}
6114 +
6115 +/* troubleshooting help: expose state in debugfs */
6116 +static int debug_async_open(struct inode *, struct file *);
6117 +static int debug_periodic_open(struct inode *, struct file *);
6118 +static int debug_registers_open(struct inode *, struct file *);
6119 +static int debug_async_open(struct inode *, struct file *);
6120 +
6121 +static ssize_t debug_output(struct file*, char __user*, size_t, loff_t*);
6122 +static int debug_close(struct inode *, struct file *);
6123 +
6124 +static const struct file_operations debug_async_fops = {
6125 + .owner = THIS_MODULE,
6126 + .open = debug_async_open,
6127 + .read = debug_output,
6128 + .release = debug_close,
6129 + .llseek = default_llseek,
6130 +};
6131 +static const struct file_operations debug_periodic_fops = {
6132 + .owner = THIS_MODULE,
6133 + .open = debug_periodic_open,
6134 + .read = debug_output,
6135 + .release = debug_close,
6136 + .llseek = default_llseek,
6137 +};
6138 +static const struct file_operations debug_registers_fops = {
6139 + .owner = THIS_MODULE,
6140 + .open = debug_registers_open,
6141 + .read = debug_output,
6142 + .release = debug_close,
6143 + .llseek = default_llseek,
6144 +};
6145 +
6146 +static struct dentry *fotg210_debug_root;
6147 +
6148 +struct debug_buffer {
6149 + ssize_t (*fill_func)(struct debug_buffer *); /* fill method */
6150 + struct usb_bus *bus;
6151 + struct mutex mutex; /* protect filling of buffer */
6152 + size_t count; /* number of characters filled into buffer */
6153 + char *output_buf;
6154 + size_t alloc_size;
6155 +};
6156 +
6157 +static inline char speed_char(u32 scratch)
6158 +{
6159 + switch (scratch & (3 << 12)) {
6160 + case QH_FULL_SPEED:
6161 + return 'f';
6162 +
6163 + case QH_LOW_SPEED:
6164 + return 'l';
6165 +
6166 + case QH_HIGH_SPEED:
6167 + return 'h';
6168 +
6169 + default:
6170 + return '?';
6171 + }
6172 +}
6173 +
6174 +static inline char token_mark(struct fotg210_hcd *fotg210, __hc32 token)
6175 +{
6176 + __u32 v = hc32_to_cpu(fotg210, token);
6177 +
6178 + if (v & QTD_STS_ACTIVE)
6179 + return '*';
6180 + if (v & QTD_STS_HALT)
6181 + return '-';
6182 + if (!IS_SHORT_READ(v))
6183 + return ' ';
6184 + /* tries to advance through hw_alt_next */
6185 + return '/';
6186 +}
6187 +
6188 +static void qh_lines(struct fotg210_hcd *fotg210, struct fotg210_qh *qh,
6189 + char **nextp, unsigned *sizep)
6190 +{
6191 + u32 scratch;
6192 + u32 hw_curr;
6193 + struct fotg210_qtd *td;
6194 + unsigned temp;
6195 + unsigned size = *sizep;
6196 + char *next = *nextp;
6197 + char mark;
6198 + __le32 list_end = FOTG210_LIST_END(fotg210);
6199 + struct fotg210_qh_hw *hw = qh->hw;
6200 +
6201 + if (hw->hw_qtd_next == list_end) /* NEC does this */
6202 + mark = '@';
6203 + else
6204 + mark = token_mark(fotg210, hw->hw_token);
6205 + if (mark == '/') { /* qh_alt_next controls qh advance? */
6206 + if ((hw->hw_alt_next & QTD_MASK(fotg210)) ==
6207 + fotg210->async->hw->hw_alt_next)
6208 + mark = '#'; /* blocked */
6209 + else if (hw->hw_alt_next == list_end)
6210 + mark = '.'; /* use hw_qtd_next */
6211 + /* else alt_next points to some other qtd */
6212 + }
6213 + scratch = hc32_to_cpup(fotg210, &hw->hw_info1);
6214 + hw_curr = (mark == '*') ? hc32_to_cpup(fotg210, &hw->hw_current) : 0;
6215 + temp = scnprintf(next, size,
6216 + "qh/%p dev%d %cs ep%d %08x %08x(%08x%c %s nak%d)",
6217 + qh, scratch & 0x007f,
6218 + speed_char(scratch),
6219 + (scratch >> 8) & 0x000f,
6220 + scratch, hc32_to_cpup(fotg210, &hw->hw_info2),
6221 + hc32_to_cpup(fotg210, &hw->hw_token), mark,
6222 + (cpu_to_hc32(fotg210, QTD_TOGGLE) & hw->hw_token)
6223 + ? "data1" : "data0",
6224 + (hc32_to_cpup(fotg210, &hw->hw_alt_next) >> 1) & 0x0f);
6225 + size -= temp;
6226 + next += temp;
6227 +
6228 + /* hc may be modifying the list as we read it ... */
6229 + list_for_each_entry(td, &qh->qtd_list, qtd_list) {
6230 + scratch = hc32_to_cpup(fotg210, &td->hw_token);
6231 + mark = ' ';
6232 + if (hw_curr == td->qtd_dma)
6233 + mark = '*';
6234 + else if (hw->hw_qtd_next == cpu_to_hc32(fotg210, td->qtd_dma))
6235 + mark = '+';
6236 + else if (QTD_LENGTH(scratch)) {
6237 + if (td->hw_alt_next == fotg210->async->hw->hw_alt_next)
6238 + mark = '#';
6239 + else if (td->hw_alt_next != list_end)
6240 + mark = '/';
6241 + }
6242 + temp = snprintf(next, size,
6243 + "\n\t%p%c%s len=%d %08x urb %p",
6244 + td, mark, ({ char *tmp;
6245 + switch ((scratch>>8)&0x03) {
6246 + case 0:
6247 + tmp = "out";
6248 + break;
6249 + case 1:
6250 + tmp = "in";
6251 + break;
6252 + case 2:
6253 + tmp = "setup";
6254 + break;
6255 + default:
6256 + tmp = "?";
6257 + break;
6258 + } tmp; }),
6259 + (scratch >> 16) & 0x7fff,
6260 + scratch,
6261 + td->urb);
6262 + if (size < temp)
6263 + temp = size;
6264 + size -= temp;
6265 + next += temp;
6266 + if (temp == size)
6267 + goto done;
6268 + }
6269 +
6270 + temp = snprintf(next, size, "\n");
6271 + if (size < temp)
6272 + temp = size;
6273 +
6274 + size -= temp;
6275 + next += temp;
6276 +
6277 +done:
6278 + *sizep = size;
6279 + *nextp = next;
6280 +}
6281 +
6282 +static ssize_t fill_async_buffer(struct debug_buffer *buf)
6283 +{
6284 + struct usb_hcd *hcd;
6285 + struct fotg210_hcd *fotg210;
6286 + unsigned long flags;
6287 + unsigned temp, size;
6288 + char *next;
6289 + struct fotg210_qh *qh;
6290 +
6291 + hcd = bus_to_hcd(buf->bus);
6292 + fotg210 = hcd_to_fotg210(hcd);
6293 + next = buf->output_buf;
6294 + size = buf->alloc_size;
6295 +
6296 + *next = 0;
6297 +
6298 + /* dumps a snapshot of the async schedule.
6299 + * usually empty except for long-term bulk reads, or head.
6300 + * one QH per line, and TDs we know about
6301 + */
6302 + spin_lock_irqsave(&fotg210->lock, flags);
6303 + for (qh = fotg210->async->qh_next.qh; size > 0 && qh;
6304 + qh = qh->qh_next.qh)
6305 + qh_lines(fotg210, qh, &next, &size);
6306 + if (fotg210->async_unlink && size > 0) {
6307 + temp = scnprintf(next, size, "\nunlink =\n");
6308 + size -= temp;
6309 + next += temp;
6310 +
6311 + for (qh = fotg210->async_unlink; size > 0 && qh;
6312 + qh = qh->unlink_next)
6313 + qh_lines(fotg210, qh, &next, &size);
6314 + }
6315 + spin_unlock_irqrestore(&fotg210->lock, flags);
6316 +
6317 + return strlen(buf->output_buf);
6318 +}
6319 +
6320 +/* count tds, get ep direction */
6321 +static unsigned output_buf_tds_dir(char *buf, struct fotg210_hcd *fotg210,
6322 + struct fotg210_qh_hw *hw, struct fotg210_qh *qh, unsigned size)
6323 +{
6324 + u32 scratch = hc32_to_cpup(fotg210, &hw->hw_info1);
6325 + struct fotg210_qtd *qtd;
6326 + char *type = "";
6327 + unsigned temp = 0;
6328 +
6329 + /* count tds, get ep direction */
6330 + list_for_each_entry(qtd, &qh->qtd_list, qtd_list) {
6331 + temp++;
6332 + switch ((hc32_to_cpu(fotg210, qtd->hw_token) >> 8) & 0x03) {
6333 + case 0:
6334 + type = "out";
6335 + continue;
6336 + case 1:
6337 + type = "in";
6338 + continue;
6339 + }
6340 + }
6341 +
6342 + return scnprintf(buf, size, "(%c%d ep%d%s [%d/%d] q%d p%d)",
6343 + speed_char(scratch), scratch & 0x007f,
6344 + (scratch >> 8) & 0x000f, type, qh->usecs,
6345 + qh->c_usecs, temp, (scratch >> 16) & 0x7ff);
6346 +}
6347 +
6348 +#define DBG_SCHED_LIMIT 64
6349 +static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
6350 +{
6351 + struct usb_hcd *hcd;
6352 + struct fotg210_hcd *fotg210;
6353 + unsigned long flags;
6354 + union fotg210_shadow p, *seen;
6355 + unsigned temp, size, seen_count;
6356 + char *next;
6357 + unsigned i;
6358 + __hc32 tag;
6359 +
6360 + seen = kmalloc_array(DBG_SCHED_LIMIT, sizeof(*seen), GFP_ATOMIC);
6361 + if (!seen)
6362 + return 0;
6363 +
6364 + seen_count = 0;
6365 +
6366 + hcd = bus_to_hcd(buf->bus);
6367 + fotg210 = hcd_to_fotg210(hcd);
6368 + next = buf->output_buf;
6369 + size = buf->alloc_size;
6370 +
6371 + temp = scnprintf(next, size, "size = %d\n", fotg210->periodic_size);
6372 + size -= temp;
6373 + next += temp;
6374 +
6375 + /* dump a snapshot of the periodic schedule.
6376 + * iso changes, interrupt usually doesn't.
6377 + */
6378 + spin_lock_irqsave(&fotg210->lock, flags);
6379 + for (i = 0; i < fotg210->periodic_size; i++) {
6380 + p = fotg210->pshadow[i];
6381 + if (likely(!p.ptr))
6382 + continue;
6383 +
6384 + tag = Q_NEXT_TYPE(fotg210, fotg210->periodic[i]);
6385 +
6386 + temp = scnprintf(next, size, "%4d: ", i);
6387 + size -= temp;
6388 + next += temp;
6389 +
6390 + do {
6391 + struct fotg210_qh_hw *hw;
6392 +
6393 + switch (hc32_to_cpu(fotg210, tag)) {
6394 + case Q_TYPE_QH:
6395 + hw = p.qh->hw;
6396 + temp = scnprintf(next, size, " qh%d-%04x/%p",
6397 + p.qh->period,
6398 + hc32_to_cpup(fotg210,
6399 + &hw->hw_info2)
6400 + /* uframe masks */
6401 + & (QH_CMASK | QH_SMASK),
6402 + p.qh);
6403 + size -= temp;
6404 + next += temp;
6405 + /* don't repeat what follows this qh */
6406 + for (temp = 0; temp < seen_count; temp++) {
6407 + if (seen[temp].ptr != p.ptr)
6408 + continue;
6409 + if (p.qh->qh_next.ptr) {
6410 + temp = scnprintf(next, size,
6411 + " ...");
6412 + size -= temp;
6413 + next += temp;
6414 + }
6415 + break;
6416 + }
6417 + /* show more info the first time around */
6418 + if (temp == seen_count) {
6419 + temp = output_buf_tds_dir(next,
6420 + fotg210, hw,
6421 + p.qh, size);
6422 +
6423 + if (seen_count < DBG_SCHED_LIMIT)
6424 + seen[seen_count++].qh = p.qh;
6425 + } else
6426 + temp = 0;
6427 + tag = Q_NEXT_TYPE(fotg210, hw->hw_next);
6428 + p = p.qh->qh_next;
6429 + break;
6430 + case Q_TYPE_FSTN:
6431 + temp = scnprintf(next, size,
6432 + " fstn-%8x/%p",
6433 + p.fstn->hw_prev, p.fstn);
6434 + tag = Q_NEXT_TYPE(fotg210, p.fstn->hw_next);
6435 + p = p.fstn->fstn_next;
6436 + break;
6437 + case Q_TYPE_ITD:
6438 + temp = scnprintf(next, size,
6439 + " itd/%p", p.itd);
6440 + tag = Q_NEXT_TYPE(fotg210, p.itd->hw_next);
6441 + p = p.itd->itd_next;
6442 + break;
6443 + }
6444 + size -= temp;
6445 + next += temp;
6446 + } while (p.ptr);
6447 +
6448 + temp = scnprintf(next, size, "\n");
6449 + size -= temp;
6450 + next += temp;
6451 + }
6452 + spin_unlock_irqrestore(&fotg210->lock, flags);
6453 + kfree(seen);
6454 +
6455 + return buf->alloc_size - size;
6456 +}
6457 +#undef DBG_SCHED_LIMIT
6458 +
6459 +static const char *rh_state_string(struct fotg210_hcd *fotg210)
6460 +{
6461 + switch (fotg210->rh_state) {
6462 + case FOTG210_RH_HALTED:
6463 + return "halted";
6464 + case FOTG210_RH_SUSPENDED:
6465 + return "suspended";
6466 + case FOTG210_RH_RUNNING:
6467 + return "running";
6468 + case FOTG210_RH_STOPPING:
6469 + return "stopping";
6470 + }
6471 + return "?";
6472 +}
6473 +
6474 +static ssize_t fill_registers_buffer(struct debug_buffer *buf)
6475 +{
6476 + struct usb_hcd *hcd;
6477 + struct fotg210_hcd *fotg210;
6478 + unsigned long flags;
6479 + unsigned temp, size, i;
6480 + char *next, scratch[80];
6481 + static const char fmt[] = "%*s\n";
6482 + static const char label[] = "";
6483 +
6484 + hcd = bus_to_hcd(buf->bus);
6485 + fotg210 = hcd_to_fotg210(hcd);
6486 + next = buf->output_buf;
6487 + size = buf->alloc_size;
6488 +
6489 + spin_lock_irqsave(&fotg210->lock, flags);
6490 +
6491 + if (!HCD_HW_ACCESSIBLE(hcd)) {
6492 + size = scnprintf(next, size,
6493 + "bus %s, device %s\n"
6494 + "%s\n"
6495 + "SUSPENDED(no register access)\n",
6496 + hcd->self.controller->bus->name,
6497 + dev_name(hcd->self.controller),
6498 + hcd->product_desc);
6499 + goto done;
6500 + }
6501 +
6502 + /* Capability Registers */
6503 + i = HC_VERSION(fotg210, fotg210_readl(fotg210,
6504 + &fotg210->caps->hc_capbase));
6505 + temp = scnprintf(next, size,
6506 + "bus %s, device %s\n"
6507 + "%s\n"
6508 + "EHCI %x.%02x, rh state %s\n",
6509 + hcd->self.controller->bus->name,
6510 + dev_name(hcd->self.controller),
6511 + hcd->product_desc,
6512 + i >> 8, i & 0x0ff, rh_state_string(fotg210));
6513 + size -= temp;
6514 + next += temp;
6515 +
6516 + /* FIXME interpret both types of params */
6517 + i = fotg210_readl(fotg210, &fotg210->caps->hcs_params);
6518 + temp = scnprintf(next, size, "structural params 0x%08x\n", i);
6519 + size -= temp;
6520 + next += temp;
6521 +
6522 + i = fotg210_readl(fotg210, &fotg210->caps->hcc_params);
6523 + temp = scnprintf(next, size, "capability params 0x%08x\n", i);
6524 + size -= temp;
6525 + next += temp;
6526 +
6527 + /* Operational Registers */
6528 + temp = dbg_status_buf(scratch, sizeof(scratch), label,
6529 + fotg210_readl(fotg210, &fotg210->regs->status));
6530 + temp = scnprintf(next, size, fmt, temp, scratch);
6531 + size -= temp;
6532 + next += temp;
6533 +
6534 + temp = dbg_command_buf(scratch, sizeof(scratch), label,
6535 + fotg210_readl(fotg210, &fotg210->regs->command));
6536 + temp = scnprintf(next, size, fmt, temp, scratch);
6537 + size -= temp;
6538 + next += temp;
6539 +
6540 + temp = dbg_intr_buf(scratch, sizeof(scratch), label,
6541 + fotg210_readl(fotg210, &fotg210->regs->intr_enable));
6542 + temp = scnprintf(next, size, fmt, temp, scratch);
6543 + size -= temp;
6544 + next += temp;
6545 +
6546 + temp = scnprintf(next, size, "uframe %04x\n",
6547 + fotg210_read_frame_index(fotg210));
6548 + size -= temp;
6549 + next += temp;
6550 +
6551 + if (fotg210->async_unlink) {
6552 + temp = scnprintf(next, size, "async unlink qh %p\n",
6553 + fotg210->async_unlink);
6554 + size -= temp;
6555 + next += temp;
6556 + }
6557 +
6558 +#ifdef FOTG210_STATS
6559 + temp = scnprintf(next, size,
6560 + "irq normal %ld err %ld iaa %ld(lost %ld)\n",
6561 + fotg210->stats.normal, fotg210->stats.error,
6562 + fotg210->stats.iaa, fotg210->stats.lost_iaa);
6563 + size -= temp;
6564 + next += temp;
6565 +
6566 + temp = scnprintf(next, size, "complete %ld unlink %ld\n",
6567 + fotg210->stats.complete, fotg210->stats.unlink);
6568 + size -= temp;
6569 + next += temp;
6570 +#endif
6571 +
6572 +done:
6573 + spin_unlock_irqrestore(&fotg210->lock, flags);
6574 +
6575 + return buf->alloc_size - size;
6576 +}
6577 +
6578 +static struct debug_buffer
6579 +*alloc_buffer(struct usb_bus *bus, ssize_t (*fill_func)(struct debug_buffer *))
6580 +{
6581 + struct debug_buffer *buf;
6582 +
6583 + buf = kzalloc(sizeof(struct debug_buffer), GFP_KERNEL);
6584 +
6585 + if (buf) {
6586 + buf->bus = bus;
6587 + buf->fill_func = fill_func;
6588 + mutex_init(&buf->mutex);
6589 + buf->alloc_size = PAGE_SIZE;
6590 + }
6591 +
6592 + return buf;
6593 +}
6594 +
6595 +static int fill_buffer(struct debug_buffer *buf)
6596 +{
6597 + int ret = 0;
6598 +
6599 + if (!buf->output_buf)
6600 + buf->output_buf = vmalloc(buf->alloc_size);
6601 +
6602 + if (!buf->output_buf) {
6603 + ret = -ENOMEM;
6604 + goto out;
6605 + }
6606 +
6607 + ret = buf->fill_func(buf);
6608 +
6609 + if (ret >= 0) {
6610 + buf->count = ret;
6611 + ret = 0;
6612 + }
6613 +
6614 +out:
6615 + return ret;
6616 +}
6617 +
6618 +static ssize_t debug_output(struct file *file, char __user *user_buf,
6619 + size_t len, loff_t *offset)
6620 +{
6621 + struct debug_buffer *buf = file->private_data;
6622 + int ret = 0;
6623 +
6624 + mutex_lock(&buf->mutex);
6625 + if (buf->count == 0) {
6626 + ret = fill_buffer(buf);
6627 + if (ret != 0) {
6628 + mutex_unlock(&buf->mutex);
6629 + goto out;
6630 + }
6631 + }
6632 + mutex_unlock(&buf->mutex);
6633 +
6634 + ret = simple_read_from_buffer(user_buf, len, offset,
6635 + buf->output_buf, buf->count);
6636 +
6637 +out:
6638 + return ret;
6639 +
6640 +}
6641 +
6642 +static int debug_close(struct inode *inode, struct file *file)
6643 +{
6644 + struct debug_buffer *buf = file->private_data;
6645 +
6646 + if (buf) {
6647 + vfree(buf->output_buf);
6648 + kfree(buf);
6649 + }
6650 +
6651 + return 0;
6652 +}
6653 +static int debug_async_open(struct inode *inode, struct file *file)
6654 +{
6655 + file->private_data = alloc_buffer(inode->i_private, fill_async_buffer);
6656 +
6657 + return file->private_data ? 0 : -ENOMEM;
6658 +}
6659 +
6660 +static int debug_periodic_open(struct inode *inode, struct file *file)
6661 +{
6662 + struct debug_buffer *buf;
6663 +
6664 + buf = alloc_buffer(inode->i_private, fill_periodic_buffer);
6665 + if (!buf)
6666 + return -ENOMEM;
6667 +
6668 + buf->alloc_size = (sizeof(void *) == 4 ? 6 : 8)*PAGE_SIZE;
6669 + file->private_data = buf;
6670 + return 0;
6671 +}
6672 +
6673 +static int debug_registers_open(struct inode *inode, struct file *file)
6674 +{
6675 + file->private_data = alloc_buffer(inode->i_private,
6676 + fill_registers_buffer);
6677 +
6678 + return file->private_data ? 0 : -ENOMEM;
6679 +}
6680 +
6681 +static inline void create_debug_files(struct fotg210_hcd *fotg210)
6682 +{
6683 + struct usb_bus *bus = &fotg210_to_hcd(fotg210)->self;
6684 + struct dentry *root;
6685 +
6686 + root = debugfs_create_dir(bus->bus_name, fotg210_debug_root);
6687 +
6688 + debugfs_create_file("async", S_IRUGO, root, bus, &debug_async_fops);
6689 + debugfs_create_file("periodic", S_IRUGO, root, bus,
6690 + &debug_periodic_fops);
6691 + debugfs_create_file("registers", S_IRUGO, root, bus,
6692 + &debug_registers_fops);
6693 +}
6694 +
6695 +static inline void remove_debug_files(struct fotg210_hcd *fotg210)
6696 +{
6697 + struct usb_bus *bus = &fotg210_to_hcd(fotg210)->self;
6698 +
6699 + debugfs_lookup_and_remove(bus->bus_name, fotg210_debug_root);
6700 +}
6701 +
6702 +/* handshake - spin reading hc until handshake completes or fails
6703 + * @ptr: address of hc register to be read
6704 + * @mask: bits to look at in result of read
6705 + * @done: value of those bits when handshake succeeds
6706 + * @usec: timeout in microseconds
6707 + *
6708 + * Returns negative errno, or zero on success
6709 + *
6710 + * Success happens when the "mask" bits have the specified value (hardware
6711 + * handshake done). There are two failure modes: "usec" have passed (major
6712 + * hardware flakeout), or the register reads as all-ones (hardware removed).
6713 + *
6714 + * That last failure should_only happen in cases like physical cardbus eject
6715 + * before driver shutdown. But it also seems to be caused by bugs in cardbus
6716 + * bridge shutdown: shutting down the bridge before the devices using it.
6717 + */
6718 +static int handshake(struct fotg210_hcd *fotg210, void __iomem *ptr,
6719 + u32 mask, u32 done, int usec)
6720 +{
6721 + u32 result;
6722 + int ret;
6723 +
6724 + ret = readl_poll_timeout_atomic(ptr, result,
6725 + ((result & mask) == done ||
6726 + result == U32_MAX), 1, usec);
6727 + if (result == U32_MAX) /* card removed */
6728 + return -ENODEV;
6729 +
6730 + return ret;
6731 +}
6732 +
6733 +/* Force HC to halt state from unknown (EHCI spec section 2.3).
6734 + * Must be called with interrupts enabled and the lock not held.
6735 + */
6736 +static int fotg210_halt(struct fotg210_hcd *fotg210)
6737 +{
6738 + u32 temp;
6739 +
6740 + spin_lock_irq(&fotg210->lock);
6741 +
6742 + /* disable any irqs left enabled by previous code */
6743 + fotg210_writel(fotg210, 0, &fotg210->regs->intr_enable);
6744 +
6745 + /*
6746 + * This routine gets called during probe before fotg210->command
6747 + * has been initialized, so we can't rely on its value.
6748 + */
6749 + fotg210->command &= ~CMD_RUN;
6750 + temp = fotg210_readl(fotg210, &fotg210->regs->command);
6751 + temp &= ~(CMD_RUN | CMD_IAAD);
6752 + fotg210_writel(fotg210, temp, &fotg210->regs->command);
6753 +
6754 + spin_unlock_irq(&fotg210->lock);
6755 + synchronize_irq(fotg210_to_hcd(fotg210)->irq);
6756 +
6757 + return handshake(fotg210, &fotg210->regs->status,
6758 + STS_HALT, STS_HALT, 16 * 125);
6759 +}
6760 +
6761 +/* Reset a non-running (STS_HALT == 1) controller.
6762 + * Must be called with interrupts enabled and the lock not held.
6763 + */
6764 +static int fotg210_reset(struct fotg210_hcd *fotg210)
6765 +{
6766 + int retval;
6767 + u32 command = fotg210_readl(fotg210, &fotg210->regs->command);
6768 +
6769 + /* If the EHCI debug controller is active, special care must be
6770 + * taken before and after a host controller reset
6771 + */
6772 + if (fotg210->debug && !dbgp_reset_prep(fotg210_to_hcd(fotg210)))
6773 + fotg210->debug = NULL;
6774 +
6775 + command |= CMD_RESET;
6776 + dbg_cmd(fotg210, "reset", command);
6777 + fotg210_writel(fotg210, command, &fotg210->regs->command);
6778 + fotg210->rh_state = FOTG210_RH_HALTED;
6779 + fotg210->next_statechange = jiffies;
6780 + retval = handshake(fotg210, &fotg210->regs->command,
6781 + CMD_RESET, 0, 250 * 1000);
6782 +
6783 + if (retval)
6784 + return retval;
6785 +
6786 + if (fotg210->debug)
6787 + dbgp_external_startup(fotg210_to_hcd(fotg210));
6788 +
6789 + fotg210->port_c_suspend = fotg210->suspended_ports =
6790 + fotg210->resuming_ports = 0;
6791 + return retval;
6792 +}
6793 +
6794 +/* Idle the controller (turn off the schedules).
6795 + * Must be called with interrupts enabled and the lock not held.
6796 + */
6797 +static void fotg210_quiesce(struct fotg210_hcd *fotg210)
6798 +{
6799 + u32 temp;
6800 +
6801 + if (fotg210->rh_state != FOTG210_RH_RUNNING)
6802 + return;
6803 +
6804 + /* wait for any schedule enables/disables to take effect */
6805 + temp = (fotg210->command << 10) & (STS_ASS | STS_PSS);
6806 + handshake(fotg210, &fotg210->regs->status, STS_ASS | STS_PSS, temp,
6807 + 16 * 125);
6808 +
6809 + /* then disable anything that's still active */
6810 + spin_lock_irq(&fotg210->lock);
6811 + fotg210->command &= ~(CMD_ASE | CMD_PSE);
6812 + fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command);
6813 + spin_unlock_irq(&fotg210->lock);
6814 +
6815 + /* hardware can take 16 microframes to turn off ... */
6816 + handshake(fotg210, &fotg210->regs->status, STS_ASS | STS_PSS, 0,
6817 + 16 * 125);
6818 +}
6819 +
6820 +static void end_unlink_async(struct fotg210_hcd *fotg210);
6821 +static void unlink_empty_async(struct fotg210_hcd *fotg210);
6822 +static void fotg210_work(struct fotg210_hcd *fotg210);
6823 +static void start_unlink_intr(struct fotg210_hcd *fotg210,
6824 + struct fotg210_qh *qh);
6825 +static void end_unlink_intr(struct fotg210_hcd *fotg210, struct fotg210_qh *qh);
6826 +
6827 +/* Set a bit in the USBCMD register */
6828 +static void fotg210_set_command_bit(struct fotg210_hcd *fotg210, u32 bit)
6829 +{
6830 + fotg210->command |= bit;
6831 + fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command);
6832 +
6833 + /* unblock posted write */
6834 + fotg210_readl(fotg210, &fotg210->regs->command);
6835 +}
6836 +
6837 +/* Clear a bit in the USBCMD register */
6838 +static void fotg210_clear_command_bit(struct fotg210_hcd *fotg210, u32 bit)
6839 +{
6840 + fotg210->command &= ~bit;
6841 + fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command);
6842 +
6843 + /* unblock posted write */
6844 + fotg210_readl(fotg210, &fotg210->regs->command);
6845 +}
6846 +
6847 +/* EHCI timer support... Now using hrtimers.
6848 + *
6849 + * Lots of different events are triggered from fotg210->hrtimer. Whenever
6850 + * the timer routine runs, it checks each possible event; events that are
6851 + * currently enabled and whose expiration time has passed get handled.
6852 + * The set of enabled events is stored as a collection of bitflags in
6853 + * fotg210->enabled_hrtimer_events, and they are numbered in order of
6854 + * increasing delay values (ranging between 1 ms and 100 ms).
6855 + *
6856 + * Rather than implementing a sorted list or tree of all pending events,
6857 + * we keep track only of the lowest-numbered pending event, in
6858 + * fotg210->next_hrtimer_event. Whenever fotg210->hrtimer gets restarted, its
6859 + * expiration time is set to the timeout value for this event.
6860 + *
6861 + * As a result, events might not get handled right away; the actual delay
6862 + * could be anywhere up to twice the requested delay. This doesn't
6863 + * matter, because none of the events are especially time-critical. The
6864 + * ones that matter most all have a delay of 1 ms, so they will be
6865 + * handled after 2 ms at most, which is okay. In addition to this, we
6866 + * allow for an expiration range of 1 ms.
6867 + */
6868 +
6869 +/* Delay lengths for the hrtimer event types.
6870 + * Keep this list sorted by delay length, in the same order as
6871 + * the event types indexed by enum fotg210_hrtimer_event in fotg210.h.
6872 + */
6873 +static unsigned event_delays_ns[] = {
6874 + 1 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_POLL_ASS */
6875 + 1 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_POLL_PSS */
6876 + 1 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_POLL_DEAD */
6877 + 1125 * NSEC_PER_USEC, /* FOTG210_HRTIMER_UNLINK_INTR */
6878 + 2 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_FREE_ITDS */
6879 + 6 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_ASYNC_UNLINKS */
6880 + 10 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_IAA_WATCHDOG */
6881 + 10 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_DISABLE_PERIODIC */
6882 + 15 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_DISABLE_ASYNC */
6883 + 100 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_IO_WATCHDOG */
6884 +};
6885 +
6886 +/* Enable a pending hrtimer event */
6887 +static void fotg210_enable_event(struct fotg210_hcd *fotg210, unsigned event,
6888 + bool resched)
6889 +{
6890 + ktime_t *timeout = &fotg210->hr_timeouts[event];
6891 +
6892 + if (resched)
6893 + *timeout = ktime_add(ktime_get(), event_delays_ns[event]);
6894 + fotg210->enabled_hrtimer_events |= (1 << event);
6895 +
6896 + /* Track only the lowest-numbered pending event */
6897 + if (event < fotg210->next_hrtimer_event) {
6898 + fotg210->next_hrtimer_event = event;
6899 + hrtimer_start_range_ns(&fotg210->hrtimer, *timeout,
6900 + NSEC_PER_MSEC, HRTIMER_MODE_ABS);
6901 + }
6902 +}
6903 +
6904 +
6905 +/* Poll the STS_ASS status bit; see when it agrees with CMD_ASE */
6906 +static void fotg210_poll_ASS(struct fotg210_hcd *fotg210)
6907 +{
6908 + unsigned actual, want;
6909 +
6910 + /* Don't enable anything if the controller isn't running (e.g., died) */
6911 + if (fotg210->rh_state != FOTG210_RH_RUNNING)
6912 + return;
6913 +
6914 + want = (fotg210->command & CMD_ASE) ? STS_ASS : 0;
6915 + actual = fotg210_readl(fotg210, &fotg210->regs->status) & STS_ASS;
6916 +
6917 + if (want != actual) {
6918 +
6919 + /* Poll again later, but give up after about 20 ms */
6920 + if (fotg210->ASS_poll_count++ < 20) {
6921 + fotg210_enable_event(fotg210, FOTG210_HRTIMER_POLL_ASS,
6922 + true);
6923 + return;
6924 + }
6925 + fotg210_dbg(fotg210, "Waited too long for the async schedule status (%x/%x), giving up\n",
6926 + want, actual);
6927 + }
6928 + fotg210->ASS_poll_count = 0;
6929 +
6930 + /* The status is up-to-date; restart or stop the schedule as needed */
6931 + if (want == 0) { /* Stopped */
6932 + if (fotg210->async_count > 0)
6933 + fotg210_set_command_bit(fotg210, CMD_ASE);
6934 +
6935 + } else { /* Running */
6936 + if (fotg210->async_count == 0) {
6937 +
6938 + /* Turn off the schedule after a while */
6939 + fotg210_enable_event(fotg210,
6940 + FOTG210_HRTIMER_DISABLE_ASYNC,
6941 + true);
6942 + }
6943 + }
6944 +}
6945 +
6946 +/* Turn off the async schedule after a brief delay */
6947 +static void fotg210_disable_ASE(struct fotg210_hcd *fotg210)
6948 +{
6949 + fotg210_clear_command_bit(fotg210, CMD_ASE);
6950 +}
6951 +
6952 +
6953 +/* Poll the STS_PSS status bit; see when it agrees with CMD_PSE */
6954 +static void fotg210_poll_PSS(struct fotg210_hcd *fotg210)
6955 +{
6956 + unsigned actual, want;
6957 +
6958 + /* Don't do anything if the controller isn't running (e.g., died) */
6959 + if (fotg210->rh_state != FOTG210_RH_RUNNING)
6960 + return;
6961 +
6962 + want = (fotg210->command & CMD_PSE) ? STS_PSS : 0;
6963 + actual = fotg210_readl(fotg210, &fotg210->regs->status) & STS_PSS;
6964 +
6965 + if (want != actual) {
6966 +
6967 + /* Poll again later, but give up after about 20 ms */
6968 + if (fotg210->PSS_poll_count++ < 20) {
6969 + fotg210_enable_event(fotg210, FOTG210_HRTIMER_POLL_PSS,
6970 + true);
6971 + return;
6972 + }
6973 + fotg210_dbg(fotg210, "Waited too long for the periodic schedule status (%x/%x), giving up\n",
6974 + want, actual);
6975 + }
6976 + fotg210->PSS_poll_count = 0;
6977 +
6978 + /* The status is up-to-date; restart or stop the schedule as needed */
6979 + if (want == 0) { /* Stopped */
6980 + if (fotg210->periodic_count > 0)
6981 + fotg210_set_command_bit(fotg210, CMD_PSE);
6982 +
6983 + } else { /* Running */
6984 + if (fotg210->periodic_count == 0) {
6985 +
6986 + /* Turn off the schedule after a while */
6987 + fotg210_enable_event(fotg210,
6988 + FOTG210_HRTIMER_DISABLE_PERIODIC,
6989 + true);
6990 + }
6991 + }
6992 +}
6993 +
6994 +/* Turn off the periodic schedule after a brief delay */
6995 +static void fotg210_disable_PSE(struct fotg210_hcd *fotg210)
6996 +{
6997 + fotg210_clear_command_bit(fotg210, CMD_PSE);
6998 +}
6999 +
7000 +
7001 +/* Poll the STS_HALT status bit; see when a dead controller stops */
7002 +static void fotg210_handle_controller_death(struct fotg210_hcd *fotg210)
7003 +{
7004 + if (!(fotg210_readl(fotg210, &fotg210->regs->status) & STS_HALT)) {
7005 +
7006 + /* Give up after a few milliseconds */
7007 + if (fotg210->died_poll_count++ < 5) {
7008 + /* Try again later */
7009 + fotg210_enable_event(fotg210,
7010 + FOTG210_HRTIMER_POLL_DEAD, true);
7011 + return;
7012 + }
7013 + fotg210_warn(fotg210, "Waited too long for the controller to stop, giving up\n");
7014 + }
7015 +
7016 + /* Clean up the mess */
7017 + fotg210->rh_state = FOTG210_RH_HALTED;
7018 + fotg210_writel(fotg210, 0, &fotg210->regs->intr_enable);
7019 + fotg210_work(fotg210);
7020 + end_unlink_async(fotg210);
7021 +
7022 + /* Not in process context, so don't try to reset the controller */
7023 +}
7024 +
7025 +
7026 +/* Handle unlinked interrupt QHs once they are gone from the hardware */
7027 +static void fotg210_handle_intr_unlinks(struct fotg210_hcd *fotg210)
7028 +{
7029 + bool stopped = (fotg210->rh_state < FOTG210_RH_RUNNING);
7030 +
7031 + /*
7032 + * Process all the QHs on the intr_unlink list that were added
7033 + * before the current unlink cycle began. The list is in
7034 + * temporal order, so stop when we reach the first entry in the
7035 + * current cycle. But if the root hub isn't running then
7036 + * process all the QHs on the list.
7037 + */
7038 + fotg210->intr_unlinking = true;
7039 + while (fotg210->intr_unlink) {
7040 + struct fotg210_qh *qh = fotg210->intr_unlink;
7041 +
7042 + if (!stopped && qh->unlink_cycle == fotg210->intr_unlink_cycle)
7043 + break;
7044 + fotg210->intr_unlink = qh->unlink_next;
7045 + qh->unlink_next = NULL;
7046 + end_unlink_intr(fotg210, qh);
7047 + }
7048 +
7049 + /* Handle remaining entries later */
7050 + if (fotg210->intr_unlink) {
7051 + fotg210_enable_event(fotg210, FOTG210_HRTIMER_UNLINK_INTR,
7052 + true);
7053 + ++fotg210->intr_unlink_cycle;
7054 + }
7055 + fotg210->intr_unlinking = false;
7056 +}
7057 +
7058 +
7059 +/* Start another free-iTDs/siTDs cycle */
7060 +static void start_free_itds(struct fotg210_hcd *fotg210)
7061 +{
7062 + if (!(fotg210->enabled_hrtimer_events &
7063 + BIT(FOTG210_HRTIMER_FREE_ITDS))) {
7064 + fotg210->last_itd_to_free = list_entry(
7065 + fotg210->cached_itd_list.prev,
7066 + struct fotg210_itd, itd_list);
7067 + fotg210_enable_event(fotg210, FOTG210_HRTIMER_FREE_ITDS, true);
7068 + }
7069 +}
7070 +
7071 +/* Wait for controller to stop using old iTDs and siTDs */
7072 +static void end_free_itds(struct fotg210_hcd *fotg210)
7073 +{
7074 + struct fotg210_itd *itd, *n;
7075 +
7076 + if (fotg210->rh_state < FOTG210_RH_RUNNING)
7077 + fotg210->last_itd_to_free = NULL;
7078 +
7079 + list_for_each_entry_safe(itd, n, &fotg210->cached_itd_list, itd_list) {
7080 + list_del(&itd->itd_list);
7081 + dma_pool_free(fotg210->itd_pool, itd, itd->itd_dma);
7082 + if (itd == fotg210->last_itd_to_free)
7083 + break;
7084 + }
7085 +
7086 + if (!list_empty(&fotg210->cached_itd_list))
7087 + start_free_itds(fotg210);
7088 +}
7089 +
7090 +
7091 +/* Handle lost (or very late) IAA interrupts */
7092 +static void fotg210_iaa_watchdog(struct fotg210_hcd *fotg210)
7093 +{
7094 + if (fotg210->rh_state != FOTG210_RH_RUNNING)
7095 + return;
7096 +
7097 + /*
7098 + * Lost IAA irqs wedge things badly; seen first with a vt8235.
7099 + * So we need this watchdog, but must protect it against both
7100 + * (a) SMP races against real IAA firing and retriggering, and
7101 + * (b) clean HC shutdown, when IAA watchdog was pending.
7102 + */
7103 + if (fotg210->async_iaa) {
7104 + u32 cmd, status;
7105 +
7106 + /* If we get here, IAA is *REALLY* late. It's barely
7107 + * conceivable that the system is so busy that CMD_IAAD
7108 + * is still legitimately set, so let's be sure it's
7109 + * clear before we read STS_IAA. (The HC should clear
7110 + * CMD_IAAD when it sets STS_IAA.)
7111 + */
7112 + cmd = fotg210_readl(fotg210, &fotg210->regs->command);
7113 +
7114 + /*
7115 + * If IAA is set here it either legitimately triggered
7116 + * after the watchdog timer expired (_way_ late, so we'll
7117 + * still count it as lost) ... or a silicon erratum:
7118 + * - VIA seems to set IAA without triggering the IRQ;
7119 + * - IAAD potentially cleared without setting IAA.
7120 + */
7121 + status = fotg210_readl(fotg210, &fotg210->regs->status);
7122 + if ((status & STS_IAA) || !(cmd & CMD_IAAD)) {
7123 + INCR(fotg210->stats.lost_iaa);
7124 + fotg210_writel(fotg210, STS_IAA,
7125 + &fotg210->regs->status);
7126 + }
7127 +
7128 + fotg210_dbg(fotg210, "IAA watchdog: status %x cmd %x\n",
7129 + status, cmd);
7130 + end_unlink_async(fotg210);
7131 + }
7132 +}
7133 +
7134 +
7135 +/* Enable the I/O watchdog, if appropriate */
7136 +static void turn_on_io_watchdog(struct fotg210_hcd *fotg210)
7137 +{
7138 + /* Not needed if the controller isn't running or it's already enabled */
7139 + if (fotg210->rh_state != FOTG210_RH_RUNNING ||
7140 + (fotg210->enabled_hrtimer_events &
7141 + BIT(FOTG210_HRTIMER_IO_WATCHDOG)))
7142 + return;
7143 +
7144 + /*
7145 + * Isochronous transfers always need the watchdog.
7146 + * For other sorts we use it only if the flag is set.
7147 + */
7148 + if (fotg210->isoc_count > 0 || (fotg210->need_io_watchdog &&
7149 + fotg210->async_count + fotg210->intr_count > 0))
7150 + fotg210_enable_event(fotg210, FOTG210_HRTIMER_IO_WATCHDOG,
7151 + true);
7152 +}
7153 +
7154 +
7155 +/* Handler functions for the hrtimer event types.
7156 + * Keep this array in the same order as the event types indexed by
7157 + * enum fotg210_hrtimer_event in fotg210.h.
7158 + */
7159 +static void (*event_handlers[])(struct fotg210_hcd *) = {
7160 + fotg210_poll_ASS, /* FOTG210_HRTIMER_POLL_ASS */
7161 + fotg210_poll_PSS, /* FOTG210_HRTIMER_POLL_PSS */
7162 + fotg210_handle_controller_death, /* FOTG210_HRTIMER_POLL_DEAD */
7163 + fotg210_handle_intr_unlinks, /* FOTG210_HRTIMER_UNLINK_INTR */
7164 + end_free_itds, /* FOTG210_HRTIMER_FREE_ITDS */
7165 + unlink_empty_async, /* FOTG210_HRTIMER_ASYNC_UNLINKS */
7166 + fotg210_iaa_watchdog, /* FOTG210_HRTIMER_IAA_WATCHDOG */
7167 + fotg210_disable_PSE, /* FOTG210_HRTIMER_DISABLE_PERIODIC */
7168 + fotg210_disable_ASE, /* FOTG210_HRTIMER_DISABLE_ASYNC */
7169 + fotg210_work, /* FOTG210_HRTIMER_IO_WATCHDOG */
7170 +};
7171 +
7172 +static enum hrtimer_restart fotg210_hrtimer_func(struct hrtimer *t)
7173 +{
7174 + struct fotg210_hcd *fotg210 =
7175 + container_of(t, struct fotg210_hcd, hrtimer);
7176 + ktime_t now;
7177 + unsigned long events;
7178 + unsigned long flags;
7179 + unsigned e;
7180 +
7181 + spin_lock_irqsave(&fotg210->lock, flags);
7182 +
7183 + events = fotg210->enabled_hrtimer_events;
7184 + fotg210->enabled_hrtimer_events = 0;
7185 + fotg210->next_hrtimer_event = FOTG210_HRTIMER_NO_EVENT;
7186 +
7187 + /*
7188 + * Check each pending event. If its time has expired, handle
7189 + * the event; otherwise re-enable it.
7190 + */
7191 + now = ktime_get();
7192 + for_each_set_bit(e, &events, FOTG210_HRTIMER_NUM_EVENTS) {
7193 + if (ktime_compare(now, fotg210->hr_timeouts[e]) >= 0)
7194 + event_handlers[e](fotg210);
7195 + else
7196 + fotg210_enable_event(fotg210, e, false);
7197 + }
7198 +
7199 + spin_unlock_irqrestore(&fotg210->lock, flags);
7200 + return HRTIMER_NORESTART;
7201 +}
7202 +
7203 +#define fotg210_bus_suspend NULL
7204 +#define fotg210_bus_resume NULL
7205 +
7206 +static int check_reset_complete(struct fotg210_hcd *fotg210, int index,
7207 + u32 __iomem *status_reg, int port_status)
7208 +{
7209 + if (!(port_status & PORT_CONNECT))
7210 + return port_status;
7211 +
7212 + /* if reset finished and it's still not enabled -- handoff */
7213 + if (!(port_status & PORT_PE))
7214 + /* with integrated TT, there's nobody to hand it to! */
7215 + fotg210_dbg(fotg210, "Failed to enable port %d on root hub TT\n",
7216 + index + 1);
7217 + else
7218 + fotg210_dbg(fotg210, "port %d reset complete, port enabled\n",
7219 + index + 1);
7220 +
7221 + return port_status;
7222 +}
7223 +
7224 +
7225 +/* build "status change" packet (one or two bytes) from HC registers */
7226 +
7227 +static int fotg210_hub_status_data(struct usb_hcd *hcd, char *buf)
7228 +{
7229 + struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
7230 + u32 temp, status;
7231 + u32 mask;
7232 + int retval = 1;
7233 + unsigned long flags;
7234 +
7235 + /* init status to no-changes */
7236 + buf[0] = 0;
7237 +
7238 + /* Inform the core about resumes-in-progress by returning
7239 + * a non-zero value even if there are no status changes.
7240 + */
7241 + status = fotg210->resuming_ports;
7242 +
7243 + mask = PORT_CSC | PORT_PEC;
7244 + /* PORT_RESUME from hardware ~= PORT_STAT_C_SUSPEND */
7245 +
7246 + /* no hub change reports (bit 0) for now (power, ...) */
7247 +
7248 + /* port N changes (bit N)? */
7249 + spin_lock_irqsave(&fotg210->lock, flags);
7250 +
7251 + temp = fotg210_readl(fotg210, &fotg210->regs->port_status);
7252 +
7253 + /*
7254 + * Return status information even for ports with OWNER set.
7255 + * Otherwise hub_wq wouldn't see the disconnect event when a
7256 + * high-speed device is switched over to the companion
7257 + * controller by the user.
7258 + */
7259 +
7260 + if ((temp & mask) != 0 || test_bit(0, &fotg210->port_c_suspend) ||
7261 + (fotg210->reset_done[0] &&
7262 + time_after_eq(jiffies, fotg210->reset_done[0]))) {
7263 + buf[0] |= 1 << 1;
7264 + status = STS_PCD;
7265 + }
7266 + /* FIXME autosuspend idle root hubs */
7267 + spin_unlock_irqrestore(&fotg210->lock, flags);
7268 + return status ? retval : 0;
7269 +}
7270 +
7271 +static void fotg210_hub_descriptor(struct fotg210_hcd *fotg210,
7272 + struct usb_hub_descriptor *desc)
7273 +{
7274 + int ports = HCS_N_PORTS(fotg210->hcs_params);
7275 + u16 temp;
7276 +
7277 + desc->bDescriptorType = USB_DT_HUB;
7278 + desc->bPwrOn2PwrGood = 10; /* fotg210 1.0, 2.3.9 says 20ms max */
7279 + desc->bHubContrCurrent = 0;
7280 +
7281 + desc->bNbrPorts = ports;
7282 + temp = 1 + (ports / 8);
7283 + desc->bDescLength = 7 + 2 * temp;
7284 +
7285 + /* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */
7286 + memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
7287 + memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
7288 +
7289 + temp = HUB_CHAR_INDV_PORT_OCPM; /* per-port overcurrent reporting */
7290 + temp |= HUB_CHAR_NO_LPSM; /* no power switching */
7291 + desc->wHubCharacteristics = cpu_to_le16(temp);
7292 +}
7293 +
7294 +static int fotg210_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
7295 + u16 wIndex, char *buf, u16 wLength)
7296 +{
7297 + struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
7298 + int ports = HCS_N_PORTS(fotg210->hcs_params);
7299 + u32 __iomem *status_reg = &fotg210->regs->port_status;
7300 + u32 temp, temp1, status;
7301 + unsigned long flags;
7302 + int retval = 0;
7303 + unsigned selector;
7304 +
7305 + /*
7306 + * FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR.
7307 + * HCS_INDICATOR may say we can change LEDs to off/amber/green.
7308 + * (track current state ourselves) ... blink for diagnostics,
7309 + * power, "this is the one", etc. EHCI spec supports this.
7310 + */
7311 +
7312 + spin_lock_irqsave(&fotg210->lock, flags);
7313 + switch (typeReq) {
7314 + case ClearHubFeature:
7315 + switch (wValue) {
7316 + case C_HUB_LOCAL_POWER:
7317 + case C_HUB_OVER_CURRENT:
7318 + /* no hub-wide feature/status flags */
7319 + break;
7320 + default:
7321 + goto error;
7322 + }
7323 + break;
7324 + case ClearPortFeature:
7325 + if (!wIndex || wIndex > ports)
7326 + goto error;
7327 + wIndex--;
7328 + temp = fotg210_readl(fotg210, status_reg);
7329 + temp &= ~PORT_RWC_BITS;
7330 +
7331 + /*
7332 + * Even if OWNER is set, so the port is owned by the
7333 + * companion controller, hub_wq needs to be able to clear
7334 + * the port-change status bits (especially
7335 + * USB_PORT_STAT_C_CONNECTION).
7336 + */
7337 +
7338 + switch (wValue) {
7339 + case USB_PORT_FEAT_ENABLE:
7340 + fotg210_writel(fotg210, temp & ~PORT_PE, status_reg);
7341 + break;
7342 + case USB_PORT_FEAT_C_ENABLE:
7343 + fotg210_writel(fotg210, temp | PORT_PEC, status_reg);
7344 + break;
7345 + case USB_PORT_FEAT_SUSPEND:
7346 + if (temp & PORT_RESET)
7347 + goto error;
7348 + if (!(temp & PORT_SUSPEND))
7349 + break;
7350 + if ((temp & PORT_PE) == 0)
7351 + goto error;
7352 +
7353 + /* resume signaling for 20 msec */
7354 + fotg210_writel(fotg210, temp | PORT_RESUME, status_reg);
7355 + fotg210->reset_done[wIndex] = jiffies
7356 + + msecs_to_jiffies(USB_RESUME_TIMEOUT);
7357 + break;
7358 + case USB_PORT_FEAT_C_SUSPEND:
7359 + clear_bit(wIndex, &fotg210->port_c_suspend);
7360 + break;
7361 + case USB_PORT_FEAT_C_CONNECTION:
7362 + fotg210_writel(fotg210, temp | PORT_CSC, status_reg);
7363 + break;
7364 + case USB_PORT_FEAT_C_OVER_CURRENT:
7365 + fotg210_writel(fotg210, temp | OTGISR_OVC,
7366 + &fotg210->regs->otgisr);
7367 + break;
7368 + case USB_PORT_FEAT_C_RESET:
7369 + /* GetPortStatus clears reset */
7370 + break;
7371 + default:
7372 + goto error;
7373 + }
7374 + fotg210_readl(fotg210, &fotg210->regs->command);
7375 + break;
7376 + case GetHubDescriptor:
7377 + fotg210_hub_descriptor(fotg210, (struct usb_hub_descriptor *)
7378 + buf);
7379 + break;
7380 + case GetHubStatus:
7381 + /* no hub-wide feature/status flags */
7382 + memset(buf, 0, 4);
7383 + /*cpu_to_le32s ((u32 *) buf); */
7384 + break;
7385 + case GetPortStatus:
7386 + if (!wIndex || wIndex > ports)
7387 + goto error;
7388 + wIndex--;
7389 + status = 0;
7390 + temp = fotg210_readl(fotg210, status_reg);
7391 +
7392 + /* wPortChange bits */
7393 + if (temp & PORT_CSC)
7394 + status |= USB_PORT_STAT_C_CONNECTION << 16;
7395 + if (temp & PORT_PEC)
7396 + status |= USB_PORT_STAT_C_ENABLE << 16;
7397 +
7398 + temp1 = fotg210_readl(fotg210, &fotg210->regs->otgisr);
7399 + if (temp1 & OTGISR_OVC)
7400 + status |= USB_PORT_STAT_C_OVERCURRENT << 16;
7401 +
7402 + /* whoever resumes must GetPortStatus to complete it!! */
7403 + if (temp & PORT_RESUME) {
7404 +
7405 + /* Remote Wakeup received? */
7406 + if (!fotg210->reset_done[wIndex]) {
7407 + /* resume signaling for 20 msec */
7408 + fotg210->reset_done[wIndex] = jiffies
7409 + + msecs_to_jiffies(20);
7410 + /* check the port again */
7411 + mod_timer(&fotg210_to_hcd(fotg210)->rh_timer,
7412 + fotg210->reset_done[wIndex]);
7413 + }
7414 +
7415 + /* resume completed? */
7416 + else if (time_after_eq(jiffies,
7417 + fotg210->reset_done[wIndex])) {
7418 + clear_bit(wIndex, &fotg210->suspended_ports);
7419 + set_bit(wIndex, &fotg210->port_c_suspend);
7420 + fotg210->reset_done[wIndex] = 0;
7421 +
7422 + /* stop resume signaling */
7423 + temp = fotg210_readl(fotg210, status_reg);
7424 + fotg210_writel(fotg210, temp &
7425 + ~(PORT_RWC_BITS | PORT_RESUME),
7426 + status_reg);
7427 + clear_bit(wIndex, &fotg210->resuming_ports);
7428 + retval = handshake(fotg210, status_reg,
7429 + PORT_RESUME, 0, 2000);/* 2ms */
7430 + if (retval != 0) {
7431 + fotg210_err(fotg210,
7432 + "port %d resume error %d\n",
7433 + wIndex + 1, retval);
7434 + goto error;
7435 + }
7436 + temp &= ~(PORT_SUSPEND|PORT_RESUME|(3<<10));
7437 + }
7438 + }
7439 +
7440 + /* whoever resets must GetPortStatus to complete it!! */
7441 + if ((temp & PORT_RESET) && time_after_eq(jiffies,
7442 + fotg210->reset_done[wIndex])) {
7443 + status |= USB_PORT_STAT_C_RESET << 16;
7444 + fotg210->reset_done[wIndex] = 0;
7445 + clear_bit(wIndex, &fotg210->resuming_ports);
7446 +
7447 + /* force reset to complete */
7448 + fotg210_writel(fotg210,
7449 + temp & ~(PORT_RWC_BITS | PORT_RESET),
7450 + status_reg);
7451 + /* REVISIT: some hardware needs 550+ usec to clear
7452 + * this bit; seems too long to spin routinely...
7453 + */
7454 + retval = handshake(fotg210, status_reg,
7455 + PORT_RESET, 0, 1000);
7456 + if (retval != 0) {
7457 + fotg210_err(fotg210, "port %d reset error %d\n",
7458 + wIndex + 1, retval);
7459 + goto error;
7460 + }
7461 +
7462 + /* see what we found out */
7463 + temp = check_reset_complete(fotg210, wIndex, status_reg,
7464 + fotg210_readl(fotg210, status_reg));
7465 +
7466 + /* restart schedule */
7467 + fotg210->command |= CMD_RUN;
7468 + fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command);
7469 + }
7470 +
7471 + if (!(temp & (PORT_RESUME|PORT_RESET))) {
7472 + fotg210->reset_done[wIndex] = 0;
7473 + clear_bit(wIndex, &fotg210->resuming_ports);
7474 + }
7475 +
7476 + /* transfer dedicated ports to the companion hc */
7477 + if ((temp & PORT_CONNECT) &&
7478 + test_bit(wIndex, &fotg210->companion_ports)) {
7479 + temp &= ~PORT_RWC_BITS;
7480 + fotg210_writel(fotg210, temp, status_reg);
7481 + fotg210_dbg(fotg210, "port %d --> companion\n",
7482 + wIndex + 1);
7483 + temp = fotg210_readl(fotg210, status_reg);
7484 + }
7485 +
7486 + /*
7487 + * Even if OWNER is set, there's no harm letting hub_wq
7488 + * see the wPortStatus values (they should all be 0 except
7489 + * for PORT_POWER anyway).
7490 + */
7491 +
7492 + if (temp & PORT_CONNECT) {
7493 + status |= USB_PORT_STAT_CONNECTION;
7494 + status |= fotg210_port_speed(fotg210, temp);
7495 + }
7496 + if (temp & PORT_PE)
7497 + status |= USB_PORT_STAT_ENABLE;
7498 +
7499 + /* maybe the port was unsuspended without our knowledge */
7500 + if (temp & (PORT_SUSPEND|PORT_RESUME)) {
7501 + status |= USB_PORT_STAT_SUSPEND;
7502 + } else if (test_bit(wIndex, &fotg210->suspended_ports)) {
7503 + clear_bit(wIndex, &fotg210->suspended_ports);
7504 + clear_bit(wIndex, &fotg210->resuming_ports);
7505 + fotg210->reset_done[wIndex] = 0;
7506 + if (temp & PORT_PE)
7507 + set_bit(wIndex, &fotg210->port_c_suspend);
7508 + }
7509 +
7510 + temp1 = fotg210_readl(fotg210, &fotg210->regs->otgisr);
7511 + if (temp1 & OTGISR_OVC)
7512 + status |= USB_PORT_STAT_OVERCURRENT;
7513 + if (temp & PORT_RESET)
7514 + status |= USB_PORT_STAT_RESET;
7515 + if (test_bit(wIndex, &fotg210->port_c_suspend))
7516 + status |= USB_PORT_STAT_C_SUSPEND << 16;
7517 +
7518 + if (status & ~0xffff) /* only if wPortChange is interesting */
7519 + dbg_port(fotg210, "GetStatus", wIndex + 1, temp);
7520 + put_unaligned_le32(status, buf);
7521 + break;
7522 + case SetHubFeature:
7523 + switch (wValue) {
7524 + case C_HUB_LOCAL_POWER:
7525 + case C_HUB_OVER_CURRENT:
7526 + /* no hub-wide feature/status flags */
7527 + break;
7528 + default:
7529 + goto error;
7530 + }
7531 + break;
7532 + case SetPortFeature:
7533 + selector = wIndex >> 8;
7534 + wIndex &= 0xff;
7535 +
7536 + if (!wIndex || wIndex > ports)
7537 + goto error;
7538 + wIndex--;
7539 + temp = fotg210_readl(fotg210, status_reg);
7540 + temp &= ~PORT_RWC_BITS;
7541 + switch (wValue) {
7542 + case USB_PORT_FEAT_SUSPEND:
7543 + if ((temp & PORT_PE) == 0
7544 + || (temp & PORT_RESET) != 0)
7545 + goto error;
7546 +
7547 + /* After above check the port must be connected.
7548 + * Set appropriate bit thus could put phy into low power
7549 + * mode if we have hostpc feature
7550 + */
7551 + fotg210_writel(fotg210, temp | PORT_SUSPEND,
7552 + status_reg);
7553 + set_bit(wIndex, &fotg210->suspended_ports);
7554 + break;
7555 + case USB_PORT_FEAT_RESET:
7556 + if (temp & PORT_RESUME)
7557 + goto error;
7558 + /* line status bits may report this as low speed,
7559 + * which can be fine if this root hub has a
7560 + * transaction translator built in.
7561 + */
7562 + fotg210_dbg(fotg210, "port %d reset\n", wIndex + 1);
7563 + temp |= PORT_RESET;
7564 + temp &= ~PORT_PE;
7565 +
7566 + /*
7567 + * caller must wait, then call GetPortStatus
7568 + * usb 2.0 spec says 50 ms resets on root
7569 + */
7570 + fotg210->reset_done[wIndex] = jiffies
7571 + + msecs_to_jiffies(50);
7572 + fotg210_writel(fotg210, temp, status_reg);
7573 + break;
7574 +
7575 + /* For downstream facing ports (these): one hub port is put
7576 + * into test mode according to USB2 11.24.2.13, then the hub
7577 + * must be reset (which for root hub now means rmmod+modprobe,
7578 + * or else system reboot). See EHCI 2.3.9 and 4.14 for info
7579 + * about the EHCI-specific stuff.
7580 + */
7581 + case USB_PORT_FEAT_TEST:
7582 + if (!selector || selector > 5)
7583 + goto error;
7584 + spin_unlock_irqrestore(&fotg210->lock, flags);
7585 + fotg210_quiesce(fotg210);
7586 + spin_lock_irqsave(&fotg210->lock, flags);
7587 +
7588 + /* Put all enabled ports into suspend */
7589 + temp = fotg210_readl(fotg210, status_reg) &
7590 + ~PORT_RWC_BITS;
7591 + if (temp & PORT_PE)
7592 + fotg210_writel(fotg210, temp | PORT_SUSPEND,
7593 + status_reg);
7594 +
7595 + spin_unlock_irqrestore(&fotg210->lock, flags);
7596 + fotg210_halt(fotg210);
7597 + spin_lock_irqsave(&fotg210->lock, flags);
7598 +
7599 + temp = fotg210_readl(fotg210, status_reg);
7600 + temp |= selector << 16;
7601 + fotg210_writel(fotg210, temp, status_reg);
7602 + break;
7603 +
7604 + default:
7605 + goto error;
7606 + }
7607 + fotg210_readl(fotg210, &fotg210->regs->command);
7608 + break;
7609 +
7610 + default:
7611 +error:
7612 + /* "stall" on error */
7613 + retval = -EPIPE;
7614 + }
7615 + spin_unlock_irqrestore(&fotg210->lock, flags);
7616 + return retval;
7617 +}
7618 +
7619 +static void __maybe_unused fotg210_relinquish_port(struct usb_hcd *hcd,
7620 + int portnum)
7621 +{
7622 + return;
7623 +}
7624 +
7625 +static int __maybe_unused fotg210_port_handed_over(struct usb_hcd *hcd,
7626 + int portnum)
7627 +{
7628 + return 0;
7629 +}
7630 +
7631 +/* There's basically three types of memory:
7632 + * - data used only by the HCD ... kmalloc is fine
7633 + * - async and periodic schedules, shared by HC and HCD ... these
7634 + * need to use dma_pool or dma_alloc_coherent
7635 + * - driver buffers, read/written by HC ... single shot DMA mapped
7636 + *
7637 + * There's also "register" data (e.g. PCI or SOC), which is memory mapped.
7638 + * No memory seen by this driver is pageable.
7639 + */
7640 +
7641 +/* Allocate the key transfer structures from the previously allocated pool */
7642 +static inline void fotg210_qtd_init(struct fotg210_hcd *fotg210,
7643 + struct fotg210_qtd *qtd, dma_addr_t dma)
7644 +{
7645 + memset(qtd, 0, sizeof(*qtd));
7646 + qtd->qtd_dma = dma;
7647 + qtd->hw_token = cpu_to_hc32(fotg210, QTD_STS_HALT);
7648 + qtd->hw_next = FOTG210_LIST_END(fotg210);
7649 + qtd->hw_alt_next = FOTG210_LIST_END(fotg210);
7650 + INIT_LIST_HEAD(&qtd->qtd_list);
7651 +}
7652 +
7653 +static struct fotg210_qtd *fotg210_qtd_alloc(struct fotg210_hcd *fotg210,
7654 + gfp_t flags)
7655 +{
7656 + struct fotg210_qtd *qtd;
7657 + dma_addr_t dma;
7658 +
7659 + qtd = dma_pool_alloc(fotg210->qtd_pool, flags, &dma);
7660 + if (qtd != NULL)
7661 + fotg210_qtd_init(fotg210, qtd, dma);
7662 +
7663 + return qtd;
7664 +}
7665 +
7666 +static inline void fotg210_qtd_free(struct fotg210_hcd *fotg210,
7667 + struct fotg210_qtd *qtd)
7668 +{
7669 + dma_pool_free(fotg210->qtd_pool, qtd, qtd->qtd_dma);
7670 +}
7671 +
7672 +
7673 +static void qh_destroy(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
7674 +{
7675 + /* clean qtds first, and know this is not linked */
7676 + if (!list_empty(&qh->qtd_list) || qh->qh_next.ptr) {
7677 + fotg210_dbg(fotg210, "unused qh not empty!\n");
7678 + BUG();
7679 + }
7680 + if (qh->dummy)
7681 + fotg210_qtd_free(fotg210, qh->dummy);
7682 + dma_pool_free(fotg210->qh_pool, qh->hw, qh->qh_dma);
7683 + kfree(qh);
7684 +}
7685 +
7686 +static struct fotg210_qh *fotg210_qh_alloc(struct fotg210_hcd *fotg210,
7687 + gfp_t flags)
7688 +{
7689 + struct fotg210_qh *qh;
7690 + dma_addr_t dma;
7691 +
7692 + qh = kzalloc(sizeof(*qh), GFP_ATOMIC);
7693 + if (!qh)
7694 + goto done;
7695 + qh->hw = (struct fotg210_qh_hw *)
7696 + dma_pool_zalloc(fotg210->qh_pool, flags, &dma);
7697 + if (!qh->hw)
7698 + goto fail;
7699 + qh->qh_dma = dma;
7700 + INIT_LIST_HEAD(&qh->qtd_list);
7701 +
7702 + /* dummy td enables safe urb queuing */
7703 + qh->dummy = fotg210_qtd_alloc(fotg210, flags);
7704 + if (qh->dummy == NULL) {
7705 + fotg210_dbg(fotg210, "no dummy td\n");
7706 + goto fail1;
7707 + }
7708 +done:
7709 + return qh;
7710 +fail1:
7711 + dma_pool_free(fotg210->qh_pool, qh->hw, qh->qh_dma);
7712 +fail:
7713 + kfree(qh);
7714 + return NULL;
7715 +}
7716 +
7717 +/* The queue heads and transfer descriptors are managed from pools tied
7718 + * to each of the "per device" structures.
7719 + * This is the initialisation and cleanup code.
7720 + */
7721 +
7722 +static void fotg210_mem_cleanup(struct fotg210_hcd *fotg210)
7723 +{
7724 + if (fotg210->async)
7725 + qh_destroy(fotg210, fotg210->async);
7726 + fotg210->async = NULL;
7727 +
7728 + if (fotg210->dummy)
7729 + qh_destroy(fotg210, fotg210->dummy);
7730 + fotg210->dummy = NULL;
7731 +
7732 + /* DMA consistent memory and pools */
7733 + dma_pool_destroy(fotg210->qtd_pool);
7734 + fotg210->qtd_pool = NULL;
7735 +
7736 + dma_pool_destroy(fotg210->qh_pool);
7737 + fotg210->qh_pool = NULL;
7738 +
7739 + dma_pool_destroy(fotg210->itd_pool);
7740 + fotg210->itd_pool = NULL;
7741 +
7742 + if (fotg210->periodic)
7743 + dma_free_coherent(fotg210_to_hcd(fotg210)->self.controller,
7744 + fotg210->periodic_size * sizeof(u32),
7745 + fotg210->periodic, fotg210->periodic_dma);
7746 + fotg210->periodic = NULL;
7747 +
7748 + /* shadow periodic table */
7749 + kfree(fotg210->pshadow);
7750 + fotg210->pshadow = NULL;
7751 +}
7752 +
7753 +/* remember to add cleanup code (above) if you add anything here */
7754 +static int fotg210_mem_init(struct fotg210_hcd *fotg210, gfp_t flags)
7755 +{
7756 + int i;
7757 +
7758 + /* QTDs for control/bulk/intr transfers */
7759 + fotg210->qtd_pool = dma_pool_create("fotg210_qtd",
7760 + fotg210_to_hcd(fotg210)->self.controller,
7761 + sizeof(struct fotg210_qtd),
7762 + 32 /* byte alignment (for hw parts) */,
7763 + 4096 /* can't cross 4K */);
7764 + if (!fotg210->qtd_pool)
7765 + goto fail;
7766 +
7767 + /* QHs for control/bulk/intr transfers */
7768 + fotg210->qh_pool = dma_pool_create("fotg210_qh",
7769 + fotg210_to_hcd(fotg210)->self.controller,
7770 + sizeof(struct fotg210_qh_hw),
7771 + 32 /* byte alignment (for hw parts) */,
7772 + 4096 /* can't cross 4K */);
7773 + if (!fotg210->qh_pool)
7774 + goto fail;
7775 +
7776 + fotg210->async = fotg210_qh_alloc(fotg210, flags);
7777 + if (!fotg210->async)
7778 + goto fail;
7779 +
7780 + /* ITD for high speed ISO transfers */
7781 + fotg210->itd_pool = dma_pool_create("fotg210_itd",
7782 + fotg210_to_hcd(fotg210)->self.controller,
7783 + sizeof(struct fotg210_itd),
7784 + 64 /* byte alignment (for hw parts) */,
7785 + 4096 /* can't cross 4K */);
7786 + if (!fotg210->itd_pool)
7787 + goto fail;
7788 +
7789 + /* Hardware periodic table */
7790 + fotg210->periodic =
7791 + dma_alloc_coherent(fotg210_to_hcd(fotg210)->self.controller,
7792 + fotg210->periodic_size * sizeof(__le32),
7793 + &fotg210->periodic_dma, 0);
7794 + if (fotg210->periodic == NULL)
7795 + goto fail;
7796 +
7797 + for (i = 0; i < fotg210->periodic_size; i++)
7798 + fotg210->periodic[i] = FOTG210_LIST_END(fotg210);
7799 +
7800 + /* software shadow of hardware table */
7801 + fotg210->pshadow = kcalloc(fotg210->periodic_size, sizeof(void *),
7802 + flags);
7803 + if (fotg210->pshadow != NULL)
7804 + return 0;
7805 +
7806 +fail:
7807 + fotg210_dbg(fotg210, "couldn't init memory\n");
7808 + fotg210_mem_cleanup(fotg210);
7809 + return -ENOMEM;
7810 +}
7811 +/* EHCI hardware queue manipulation ... the core. QH/QTD manipulation.
7812 + *
7813 + * Control, bulk, and interrupt traffic all use "qh" lists. They list "qtd"
7814 + * entries describing USB transactions, max 16-20kB/entry (with 4kB-aligned
7815 + * buffers needed for the larger number). We use one QH per endpoint, queue
7816 + * multiple urbs (all three types) per endpoint. URBs may need several qtds.
7817 + *
7818 + * ISO traffic uses "ISO TD" (itd) records, and (along with
7819 + * interrupts) needs careful scheduling. Performance improvements can be
7820 + * an ongoing challenge. That's in "ehci-sched.c".
7821 + *
7822 + * USB 1.1 devices are handled (a) by "companion" OHCI or UHCI root hubs,
7823 + * or otherwise through transaction translators (TTs) in USB 2.0 hubs using
7824 + * (b) special fields in qh entries or (c) split iso entries. TTs will
7825 + * buffer low/full speed data so the host collects it at high speed.
7826 + */
7827 +
7828 +/* fill a qtd, returning how much of the buffer we were able to queue up */
7829 +static int qtd_fill(struct fotg210_hcd *fotg210, struct fotg210_qtd *qtd,
7830 + dma_addr_t buf, size_t len, int token, int maxpacket)
7831 +{
7832 + int i, count;
7833 + u64 addr = buf;
7834 +
7835 + /* one buffer entry per 4K ... first might be short or unaligned */
7836 + qtd->hw_buf[0] = cpu_to_hc32(fotg210, (u32)addr);
7837 + qtd->hw_buf_hi[0] = cpu_to_hc32(fotg210, (u32)(addr >> 32));
7838 + count = 0x1000 - (buf & 0x0fff); /* rest of that page */
7839 + if (likely(len < count)) /* ... iff needed */
7840 + count = len;
7841 + else {
7842 + buf += 0x1000;
7843 + buf &= ~0x0fff;
7844 +
7845 + /* per-qtd limit: from 16K to 20K (best alignment) */
7846 + for (i = 1; count < len && i < 5; i++) {
7847 + addr = buf;
7848 + qtd->hw_buf[i] = cpu_to_hc32(fotg210, (u32)addr);
7849 + qtd->hw_buf_hi[i] = cpu_to_hc32(fotg210,
7850 + (u32)(addr >> 32));
7851 + buf += 0x1000;
7852 + if ((count + 0x1000) < len)
7853 + count += 0x1000;
7854 + else
7855 + count = len;
7856 + }
7857 +
7858 + /* short packets may only terminate transfers */
7859 + if (count != len)
7860 + count -= (count % maxpacket);
7861 + }
7862 + qtd->hw_token = cpu_to_hc32(fotg210, (count << 16) | token);
7863 + qtd->length = count;
7864 +
7865 + return count;
7866 +}
7867 +
7868 +static inline void qh_update(struct fotg210_hcd *fotg210,
7869 + struct fotg210_qh *qh, struct fotg210_qtd *qtd)
7870 +{
7871 + struct fotg210_qh_hw *hw = qh->hw;
7872 +
7873 + /* writes to an active overlay are unsafe */
7874 + BUG_ON(qh->qh_state != QH_STATE_IDLE);
7875 +
7876 + hw->hw_qtd_next = QTD_NEXT(fotg210, qtd->qtd_dma);
7877 + hw->hw_alt_next = FOTG210_LIST_END(fotg210);
7878 +
7879 + /* Except for control endpoints, we make hardware maintain data
7880 + * toggle (like OHCI) ... here (re)initialize the toggle in the QH,
7881 + * and set the pseudo-toggle in udev. Only usb_clear_halt() will
7882 + * ever clear it.
7883 + */
7884 + if (!(hw->hw_info1 & cpu_to_hc32(fotg210, QH_TOGGLE_CTL))) {
7885 + unsigned is_out, epnum;
7886 +
7887 + is_out = qh->is_out;
7888 + epnum = (hc32_to_cpup(fotg210, &hw->hw_info1) >> 8) & 0x0f;
7889 + if (unlikely(!usb_gettoggle(qh->dev, epnum, is_out))) {
7890 + hw->hw_token &= ~cpu_to_hc32(fotg210, QTD_TOGGLE);
7891 + usb_settoggle(qh->dev, epnum, is_out, 1);
7892 + }
7893 + }
7894 +
7895 + hw->hw_token &= cpu_to_hc32(fotg210, QTD_TOGGLE | QTD_STS_PING);
7896 +}
7897 +
7898 +/* if it weren't for a common silicon quirk (writing the dummy into the qh
7899 + * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault
7900 + * recovery (including urb dequeue) would need software changes to a QH...
7901 + */
7902 +static void qh_refresh(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
7903 +{
7904 + struct fotg210_qtd *qtd;
7905 +
7906 + if (list_empty(&qh->qtd_list))
7907 + qtd = qh->dummy;
7908 + else {
7909 + qtd = list_entry(qh->qtd_list.next,
7910 + struct fotg210_qtd, qtd_list);
7911 + /*
7912 + * first qtd may already be partially processed.
7913 + * If we come here during unlink, the QH overlay region
7914 + * might have reference to the just unlinked qtd. The
7915 + * qtd is updated in qh_completions(). Update the QH
7916 + * overlay here.
7917 + */
7918 + if (cpu_to_hc32(fotg210, qtd->qtd_dma) == qh->hw->hw_current) {
7919 + qh->hw->hw_qtd_next = qtd->hw_next;
7920 + qtd = NULL;
7921 + }
7922 + }
7923 +
7924 + if (qtd)
7925 + qh_update(fotg210, qh, qtd);
7926 +}
7927 +
7928 +static void qh_link_async(struct fotg210_hcd *fotg210, struct fotg210_qh *qh);
7929 +
7930 +static void fotg210_clear_tt_buffer_complete(struct usb_hcd *hcd,
7931 + struct usb_host_endpoint *ep)
7932 +{
7933 + struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
7934 + struct fotg210_qh *qh = ep->hcpriv;
7935 + unsigned long flags;
7936 +
7937 + spin_lock_irqsave(&fotg210->lock, flags);
7938 + qh->clearing_tt = 0;
7939 + if (qh->qh_state == QH_STATE_IDLE && !list_empty(&qh->qtd_list)
7940 + && fotg210->rh_state == FOTG210_RH_RUNNING)
7941 + qh_link_async(fotg210, qh);
7942 + spin_unlock_irqrestore(&fotg210->lock, flags);
7943 +}
7944 +
7945 +static void fotg210_clear_tt_buffer(struct fotg210_hcd *fotg210,
7946 + struct fotg210_qh *qh, struct urb *urb, u32 token)
7947 +{
7948 +
7949 + /* If an async split transaction gets an error or is unlinked,
7950 + * the TT buffer may be left in an indeterminate state. We
7951 + * have to clear the TT buffer.
7952 + *
7953 + * Note: this routine is never called for Isochronous transfers.
7954 + */
7955 + if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) {
7956 + struct usb_device *tt = urb->dev->tt->hub;
7957 +
7958 + dev_dbg(&tt->dev,
7959 + "clear tt buffer port %d, a%d ep%d t%08x\n",
7960 + urb->dev->ttport, urb->dev->devnum,
7961 + usb_pipeendpoint(urb->pipe), token);
7962 +
7963 + if (urb->dev->tt->hub !=
7964 + fotg210_to_hcd(fotg210)->self.root_hub) {
7965 + if (usb_hub_clear_tt_buffer(urb) == 0)
7966 + qh->clearing_tt = 1;
7967 + }
7968 + }
7969 +}
7970 +
7971 +static int qtd_copy_status(struct fotg210_hcd *fotg210, struct urb *urb,
7972 + size_t length, u32 token)
7973 +{
7974 + int status = -EINPROGRESS;
7975 +
7976 + /* count IN/OUT bytes, not SETUP (even short packets) */
7977 + if (likely(QTD_PID(token) != 2))
7978 + urb->actual_length += length - QTD_LENGTH(token);
7979 +
7980 + /* don't modify error codes */
7981 + if (unlikely(urb->unlinked))
7982 + return status;
7983 +
7984 + /* force cleanup after short read; not always an error */
7985 + if (unlikely(IS_SHORT_READ(token)))
7986 + status = -EREMOTEIO;
7987 +
7988 + /* serious "can't proceed" faults reported by the hardware */
7989 + if (token & QTD_STS_HALT) {
7990 + if (token & QTD_STS_BABBLE) {
7991 + /* FIXME "must" disable babbling device's port too */
7992 + status = -EOVERFLOW;
7993 + /* CERR nonzero + halt --> stall */
7994 + } else if (QTD_CERR(token)) {
7995 + status = -EPIPE;
7996 +
7997 + /* In theory, more than one of the following bits can be set
7998 + * since they are sticky and the transaction is retried.
7999 + * Which to test first is rather arbitrary.
8000 + */
8001 + } else if (token & QTD_STS_MMF) {
8002 + /* fs/ls interrupt xfer missed the complete-split */
8003 + status = -EPROTO;
8004 + } else if (token & QTD_STS_DBE) {
8005 + status = (QTD_PID(token) == 1) /* IN ? */
8006 + ? -ENOSR /* hc couldn't read data */
8007 + : -ECOMM; /* hc couldn't write data */
8008 + } else if (token & QTD_STS_XACT) {
8009 + /* timeout, bad CRC, wrong PID, etc */
8010 + fotg210_dbg(fotg210, "devpath %s ep%d%s 3strikes\n",
8011 + urb->dev->devpath,
8012 + usb_pipeendpoint(urb->pipe),
8013 + usb_pipein(urb->pipe) ? "in" : "out");
8014 + status = -EPROTO;
8015 + } else { /* unknown */
8016 + status = -EPROTO;
8017 + }
8018 +
8019 + fotg210_dbg(fotg210,
8020 + "dev%d ep%d%s qtd token %08x --> status %d\n",
8021 + usb_pipedevice(urb->pipe),
8022 + usb_pipeendpoint(urb->pipe),
8023 + usb_pipein(urb->pipe) ? "in" : "out",
8024 + token, status);
8025 + }
8026 +
8027 + return status;
8028 +}
8029 +
8030 +static void fotg210_urb_done(struct fotg210_hcd *fotg210, struct urb *urb,
8031 + int status)
8032 +__releases(fotg210->lock)
8033 +__acquires(fotg210->lock)
8034 +{
8035 + if (likely(urb->hcpriv != NULL)) {
8036 + struct fotg210_qh *qh = (struct fotg210_qh *) urb->hcpriv;
8037 +
8038 + /* S-mask in a QH means it's an interrupt urb */
8039 + if ((qh->hw->hw_info2 & cpu_to_hc32(fotg210, QH_SMASK)) != 0) {
8040 +
8041 + /* ... update hc-wide periodic stats (for usbfs) */
8042 + fotg210_to_hcd(fotg210)->self.bandwidth_int_reqs--;
8043 + }
8044 + }
8045 +
8046 + if (unlikely(urb->unlinked)) {
8047 + INCR(fotg210->stats.unlink);
8048 + } else {
8049 + /* report non-error and short read status as zero */
8050 + if (status == -EINPROGRESS || status == -EREMOTEIO)
8051 + status = 0;
8052 + INCR(fotg210->stats.complete);
8053 + }
8054 +
8055 +#ifdef FOTG210_URB_TRACE
8056 + fotg210_dbg(fotg210,
8057 + "%s %s urb %p ep%d%s status %d len %d/%d\n",
8058 + __func__, urb->dev->devpath, urb,
8059 + usb_pipeendpoint(urb->pipe),
8060 + usb_pipein(urb->pipe) ? "in" : "out",
8061 + status,
8062 + urb->actual_length, urb->transfer_buffer_length);
8063 +#endif
8064 +
8065 + /* complete() can reenter this HCD */
8066 + usb_hcd_unlink_urb_from_ep(fotg210_to_hcd(fotg210), urb);
8067 + spin_unlock(&fotg210->lock);
8068 + usb_hcd_giveback_urb(fotg210_to_hcd(fotg210), urb, status);
8069 + spin_lock(&fotg210->lock);
8070 +}
8071 +
8072 +static int qh_schedule(struct fotg210_hcd *fotg210, struct fotg210_qh *qh);
8073 +
8074 +/* Process and free completed qtds for a qh, returning URBs to drivers.
8075 + * Chases up to qh->hw_current. Returns number of completions called,
8076 + * indicating how much "real" work we did.
8077 + */
8078 +static unsigned qh_completions(struct fotg210_hcd *fotg210,
8079 + struct fotg210_qh *qh)
8080 +{
8081 + struct fotg210_qtd *last, *end = qh->dummy;
8082 + struct fotg210_qtd *qtd, *tmp;
8083 + int last_status;
8084 + int stopped;
8085 + unsigned count = 0;
8086 + u8 state;
8087 + struct fotg210_qh_hw *hw = qh->hw;
8088 +
8089 + if (unlikely(list_empty(&qh->qtd_list)))
8090 + return count;
8091 +
8092 + /* completions (or tasks on other cpus) must never clobber HALT
8093 + * till we've gone through and cleaned everything up, even when
8094 + * they add urbs to this qh's queue or mark them for unlinking.
8095 + *
8096 + * NOTE: unlinking expects to be done in queue order.
8097 + *
8098 + * It's a bug for qh->qh_state to be anything other than
8099 + * QH_STATE_IDLE, unless our caller is scan_async() or
8100 + * scan_intr().
8101 + */
8102 + state = qh->qh_state;
8103 + qh->qh_state = QH_STATE_COMPLETING;
8104 + stopped = (state == QH_STATE_IDLE);
8105 +
8106 +rescan:
8107 + last = NULL;
8108 + last_status = -EINPROGRESS;
8109 + qh->needs_rescan = 0;
8110 +
8111 + /* remove de-activated QTDs from front of queue.
8112 + * after faults (including short reads), cleanup this urb
8113 + * then let the queue advance.
8114 + * if queue is stopped, handles unlinks.
8115 + */
8116 + list_for_each_entry_safe(qtd, tmp, &qh->qtd_list, qtd_list) {
8117 + struct urb *urb;
8118 + u32 token = 0;
8119 +
8120 + urb = qtd->urb;
8121 +
8122 + /* clean up any state from previous QTD ...*/
8123 + if (last) {
8124 + if (likely(last->urb != urb)) {
8125 + fotg210_urb_done(fotg210, last->urb,
8126 + last_status);
8127 + count++;
8128 + last_status = -EINPROGRESS;
8129 + }
8130 + fotg210_qtd_free(fotg210, last);
8131 + last = NULL;
8132 + }
8133 +
8134 + /* ignore urbs submitted during completions we reported */
8135 + if (qtd == end)
8136 + break;
8137 +
8138 + /* hardware copies qtd out of qh overlay */
8139 + rmb();
8140 + token = hc32_to_cpu(fotg210, qtd->hw_token);
8141 +
8142 + /* always clean up qtds the hc de-activated */
8143 +retry_xacterr:
8144 + if ((token & QTD_STS_ACTIVE) == 0) {
8145 +
8146 + /* Report Data Buffer Error: non-fatal but useful */
8147 + if (token & QTD_STS_DBE)
8148 + fotg210_dbg(fotg210,
8149 + "detected DataBufferErr for urb %p ep%d%s len %d, qtd %p [qh %p]\n",
8150 + urb, usb_endpoint_num(&urb->ep->desc),
8151 + usb_endpoint_dir_in(&urb->ep->desc)
8152 + ? "in" : "out",
8153 + urb->transfer_buffer_length, qtd, qh);
8154 +
8155 + /* on STALL, error, and short reads this urb must
8156 + * complete and all its qtds must be recycled.
8157 + */
8158 + if ((token & QTD_STS_HALT) != 0) {
8159 +
8160 + /* retry transaction errors until we
8161 + * reach the software xacterr limit
8162 + */
8163 + if ((token & QTD_STS_XACT) &&
8164 + QTD_CERR(token) == 0 &&
8165 + ++qh->xacterrs < QH_XACTERR_MAX &&
8166 + !urb->unlinked) {
8167 + fotg210_dbg(fotg210,
8168 + "detected XactErr len %zu/%zu retry %d\n",
8169 + qtd->length - QTD_LENGTH(token),
8170 + qtd->length,
8171 + qh->xacterrs);
8172 +
8173 + /* reset the token in the qtd and the
8174 + * qh overlay (which still contains
8175 + * the qtd) so that we pick up from
8176 + * where we left off
8177 + */
8178 + token &= ~QTD_STS_HALT;
8179 + token |= QTD_STS_ACTIVE |
8180 + (FOTG210_TUNE_CERR << 10);
8181 + qtd->hw_token = cpu_to_hc32(fotg210,
8182 + token);
8183 + wmb();
8184 + hw->hw_token = cpu_to_hc32(fotg210,
8185 + token);
8186 + goto retry_xacterr;
8187 + }
8188 + stopped = 1;
8189 +
8190 + /* magic dummy for some short reads; qh won't advance.
8191 + * that silicon quirk can kick in with this dummy too.
8192 + *
8193 + * other short reads won't stop the queue, including
8194 + * control transfers (status stage handles that) or
8195 + * most other single-qtd reads ... the queue stops if
8196 + * URB_SHORT_NOT_OK was set so the driver submitting
8197 + * the urbs could clean it up.
8198 + */
8199 + } else if (IS_SHORT_READ(token) &&
8200 + !(qtd->hw_alt_next &
8201 + FOTG210_LIST_END(fotg210))) {
8202 + stopped = 1;
8203 + }
8204 +
8205 + /* stop scanning when we reach qtds the hc is using */
8206 + } else if (likely(!stopped
8207 + && fotg210->rh_state >= FOTG210_RH_RUNNING)) {
8208 + break;
8209 +
8210 + /* scan the whole queue for unlinks whenever it stops */
8211 + } else {
8212 + stopped = 1;
8213 +
8214 + /* cancel everything if we halt, suspend, etc */
8215 + if (fotg210->rh_state < FOTG210_RH_RUNNING)
8216 + last_status = -ESHUTDOWN;
8217 +
8218 + /* this qtd is active; skip it unless a previous qtd
8219 + * for its urb faulted, or its urb was canceled.
8220 + */
8221 + else if (last_status == -EINPROGRESS && !urb->unlinked)
8222 + continue;
8223 +
8224 + /* qh unlinked; token in overlay may be most current */
8225 + if (state == QH_STATE_IDLE &&
8226 + cpu_to_hc32(fotg210, qtd->qtd_dma)
8227 + == hw->hw_current) {
8228 + token = hc32_to_cpu(fotg210, hw->hw_token);
8229 +
8230 + /* An unlink may leave an incomplete
8231 + * async transaction in the TT buffer.
8232 + * We have to clear it.
8233 + */
8234 + fotg210_clear_tt_buffer(fotg210, qh, urb,
8235 + token);
8236 + }
8237 + }
8238 +
8239 + /* unless we already know the urb's status, collect qtd status
8240 + * and update count of bytes transferred. in common short read
8241 + * cases with only one data qtd (including control transfers),
8242 + * queue processing won't halt. but with two or more qtds (for
8243 + * example, with a 32 KB transfer), when the first qtd gets a
8244 + * short read the second must be removed by hand.
8245 + */
8246 + if (last_status == -EINPROGRESS) {
8247 + last_status = qtd_copy_status(fotg210, urb,
8248 + qtd->length, token);
8249 + if (last_status == -EREMOTEIO &&
8250 + (qtd->hw_alt_next &
8251 + FOTG210_LIST_END(fotg210)))
8252 + last_status = -EINPROGRESS;
8253 +
8254 + /* As part of low/full-speed endpoint-halt processing
8255 + * we must clear the TT buffer (11.17.5).
8256 + */
8257 + if (unlikely(last_status != -EINPROGRESS &&
8258 + last_status != -EREMOTEIO)) {
8259 + /* The TT's in some hubs malfunction when they
8260 + * receive this request following a STALL (they
8261 + * stop sending isochronous packets). Since a
8262 + * STALL can't leave the TT buffer in a busy
8263 + * state (if you believe Figures 11-48 - 11-51
8264 + * in the USB 2.0 spec), we won't clear the TT
8265 + * buffer in this case. Strictly speaking this
8266 + * is a violation of the spec.
8267 + */
8268 + if (last_status != -EPIPE)
8269 + fotg210_clear_tt_buffer(fotg210, qh,
8270 + urb, token);
8271 + }
8272 + }
8273 +
8274 + /* if we're removing something not at the queue head,
8275 + * patch the hardware queue pointer.
8276 + */
8277 + if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
8278 + last = list_entry(qtd->qtd_list.prev,
8279 + struct fotg210_qtd, qtd_list);
8280 + last->hw_next = qtd->hw_next;
8281 + }
8282 +
8283 + /* remove qtd; it's recycled after possible urb completion */
8284 + list_del(&qtd->qtd_list);
8285 + last = qtd;
8286 +
8287 + /* reinit the xacterr counter for the next qtd */
8288 + qh->xacterrs = 0;
8289 + }
8290 +
8291 + /* last urb's completion might still need calling */
8292 + if (likely(last != NULL)) {
8293 + fotg210_urb_done(fotg210, last->urb, last_status);
8294 + count++;
8295 + fotg210_qtd_free(fotg210, last);
8296 + }
8297 +
8298 + /* Do we need to rescan for URBs dequeued during a giveback? */
8299 + if (unlikely(qh->needs_rescan)) {
8300 + /* If the QH is already unlinked, do the rescan now. */
8301 + if (state == QH_STATE_IDLE)
8302 + goto rescan;
8303 +
8304 + /* Otherwise we have to wait until the QH is fully unlinked.
8305 + * Our caller will start an unlink if qh->needs_rescan is
8306 + * set. But if an unlink has already started, nothing needs
8307 + * to be done.
8308 + */
8309 + if (state != QH_STATE_LINKED)
8310 + qh->needs_rescan = 0;
8311 + }
8312 +
8313 + /* restore original state; caller must unlink or relink */
8314 + qh->qh_state = state;
8315 +
8316 + /* be sure the hardware's done with the qh before refreshing
8317 + * it after fault cleanup, or recovering from silicon wrongly
8318 + * overlaying the dummy qtd (which reduces DMA chatter).
8319 + */
8320 + if (stopped != 0 || hw->hw_qtd_next == FOTG210_LIST_END(fotg210)) {
8321 + switch (state) {
8322 + case QH_STATE_IDLE:
8323 + qh_refresh(fotg210, qh);
8324 + break;
8325 + case QH_STATE_LINKED:
8326 + /* We won't refresh a QH that's linked (after the HC
8327 + * stopped the queue). That avoids a race:
8328 + * - HC reads first part of QH;
8329 + * - CPU updates that first part and the token;
8330 + * - HC reads rest of that QH, including token
8331 + * Result: HC gets an inconsistent image, and then
8332 + * DMAs to/from the wrong memory (corrupting it).
8333 + *
8334 + * That should be rare for interrupt transfers,
8335 + * except maybe high bandwidth ...
8336 + */
8337 +
8338 + /* Tell the caller to start an unlink */
8339 + qh->needs_rescan = 1;
8340 + break;
8341 + /* otherwise, unlink already started */
8342 + }
8343 + }
8344 +
8345 + return count;
8346 +}
8347 +
8348 +/* reverse of qh_urb_transaction: free a list of TDs.
8349 + * used for cleanup after errors, before HC sees an URB's TDs.
8350 + */
8351 +static void qtd_list_free(struct fotg210_hcd *fotg210, struct urb *urb,
8352 + struct list_head *head)
8353 +{
8354 + struct fotg210_qtd *qtd, *temp;
8355 +
8356 + list_for_each_entry_safe(qtd, temp, head, qtd_list) {
8357 + list_del(&qtd->qtd_list);
8358 + fotg210_qtd_free(fotg210, qtd);
8359 + }
8360 +}
8361 +
8362 +/* create a list of filled qtds for this URB; won't link into qh.
8363 + */
8364 +static struct list_head *qh_urb_transaction(struct fotg210_hcd *fotg210,
8365 + struct urb *urb, struct list_head *head, gfp_t flags)
8366 +{
8367 + struct fotg210_qtd *qtd, *qtd_prev;
8368 + dma_addr_t buf;
8369 + int len, this_sg_len, maxpacket;
8370 + int is_input;
8371 + u32 token;
8372 + int i;
8373 + struct scatterlist *sg;
8374 +
8375 + /*
8376 + * URBs map to sequences of QTDs: one logical transaction
8377 + */
8378 + qtd = fotg210_qtd_alloc(fotg210, flags);
8379 + if (unlikely(!qtd))
8380 + return NULL;
8381 + list_add_tail(&qtd->qtd_list, head);
8382 + qtd->urb = urb;
8383 +
8384 + token = QTD_STS_ACTIVE;
8385 + token |= (FOTG210_TUNE_CERR << 10);
8386 + /* for split transactions, SplitXState initialized to zero */
8387 +
8388 + len = urb->transfer_buffer_length;
8389 + is_input = usb_pipein(urb->pipe);
8390 + if (usb_pipecontrol(urb->pipe)) {
8391 + /* SETUP pid */
8392 + qtd_fill(fotg210, qtd, urb->setup_dma,
8393 + sizeof(struct usb_ctrlrequest),
8394 + token | (2 /* "setup" */ << 8), 8);
8395 +
8396 + /* ... and always at least one more pid */
8397 + token ^= QTD_TOGGLE;
8398 + qtd_prev = qtd;
8399 + qtd = fotg210_qtd_alloc(fotg210, flags);
8400 + if (unlikely(!qtd))
8401 + goto cleanup;
8402 + qtd->urb = urb;
8403 + qtd_prev->hw_next = QTD_NEXT(fotg210, qtd->qtd_dma);
8404 + list_add_tail(&qtd->qtd_list, head);
8405 +
8406 + /* for zero length DATA stages, STATUS is always IN */
8407 + if (len == 0)
8408 + token |= (1 /* "in" */ << 8);
8409 + }
8410 +
8411 + /*
8412 + * data transfer stage: buffer setup
8413 + */
8414 + i = urb->num_mapped_sgs;
8415 + if (len > 0 && i > 0) {
8416 + sg = urb->sg;
8417 + buf = sg_dma_address(sg);
8418 +
8419 + /* urb->transfer_buffer_length may be smaller than the
8420 + * size of the scatterlist (or vice versa)
8421 + */
8422 + this_sg_len = min_t(int, sg_dma_len(sg), len);
8423 + } else {
8424 + sg = NULL;
8425 + buf = urb->transfer_dma;
8426 + this_sg_len = len;
8427 + }
8428 +
8429 + if (is_input)
8430 + token |= (1 /* "in" */ << 8);
8431 + /* else it's already initted to "out" pid (0 << 8) */
8432 +
8433 + maxpacket = usb_maxpacket(urb->dev, urb->pipe);
8434 +
8435 + /*
8436 + * buffer gets wrapped in one or more qtds;
8437 + * last one may be "short" (including zero len)
8438 + * and may serve as a control status ack
8439 + */
8440 + for (;;) {
8441 + int this_qtd_len;
8442 +
8443 + this_qtd_len = qtd_fill(fotg210, qtd, buf, this_sg_len, token,
8444 + maxpacket);
8445 + this_sg_len -= this_qtd_len;
8446 + len -= this_qtd_len;
8447 + buf += this_qtd_len;
8448 +
8449 + /*
8450 + * short reads advance to a "magic" dummy instead of the next
8451 + * qtd ... that forces the queue to stop, for manual cleanup.
8452 + * (this will usually be overridden later.)
8453 + */
8454 + if (is_input)
8455 + qtd->hw_alt_next = fotg210->async->hw->hw_alt_next;
8456 +
8457 + /* qh makes control packets use qtd toggle; maybe switch it */
8458 + if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
8459 + token ^= QTD_TOGGLE;
8460 +
8461 + if (likely(this_sg_len <= 0)) {
8462 + if (--i <= 0 || len <= 0)
8463 + break;
8464 + sg = sg_next(sg);
8465 + buf = sg_dma_address(sg);
8466 + this_sg_len = min_t(int, sg_dma_len(sg), len);
8467 + }
8468 +
8469 + qtd_prev = qtd;
8470 + qtd = fotg210_qtd_alloc(fotg210, flags);
8471 + if (unlikely(!qtd))
8472 + goto cleanup;
8473 + qtd->urb = urb;
8474 + qtd_prev->hw_next = QTD_NEXT(fotg210, qtd->qtd_dma);
8475 + list_add_tail(&qtd->qtd_list, head);
8476 + }
8477 +
8478 + /*
8479 + * unless the caller requires manual cleanup after short reads,
8480 + * have the alt_next mechanism keep the queue running after the
8481 + * last data qtd (the only one, for control and most other cases).
8482 + */
8483 + if (likely((urb->transfer_flags & URB_SHORT_NOT_OK) == 0 ||
8484 + usb_pipecontrol(urb->pipe)))
8485 + qtd->hw_alt_next = FOTG210_LIST_END(fotg210);
8486 +
8487 + /*
8488 + * control requests may need a terminating data "status" ack;
8489 + * other OUT ones may need a terminating short packet
8490 + * (zero length).
8491 + */
8492 + if (likely(urb->transfer_buffer_length != 0)) {
8493 + int one_more = 0;
8494 +
8495 + if (usb_pipecontrol(urb->pipe)) {
8496 + one_more = 1;
8497 + token ^= 0x0100; /* "in" <--> "out" */
8498 + token |= QTD_TOGGLE; /* force DATA1 */
8499 + } else if (usb_pipeout(urb->pipe)
8500 + && (urb->transfer_flags & URB_ZERO_PACKET)
8501 + && !(urb->transfer_buffer_length % maxpacket)) {
8502 + one_more = 1;
8503 + }
8504 + if (one_more) {
8505 + qtd_prev = qtd;
8506 + qtd = fotg210_qtd_alloc(fotg210, flags);
8507 + if (unlikely(!qtd))
8508 + goto cleanup;
8509 + qtd->urb = urb;
8510 + qtd_prev->hw_next = QTD_NEXT(fotg210, qtd->qtd_dma);
8511 + list_add_tail(&qtd->qtd_list, head);
8512 +
8513 + /* never any data in such packets */
8514 + qtd_fill(fotg210, qtd, 0, 0, token, 0);
8515 + }
8516 + }
8517 +
8518 + /* by default, enable interrupt on urb completion */
8519 + if (likely(!(urb->transfer_flags & URB_NO_INTERRUPT)))
8520 + qtd->hw_token |= cpu_to_hc32(fotg210, QTD_IOC);
8521 + return head;
8522 +
8523 +cleanup:
8524 + qtd_list_free(fotg210, urb, head);
8525 + return NULL;
8526 +}
8527 +
8528 +/* Would be best to create all qh's from config descriptors,
8529 + * when each interface/altsetting is established. Unlink
8530 + * any previous qh and cancel its urbs first; endpoints are
8531 + * implicitly reset then (data toggle too).
8532 + * That'd mean updating how usbcore talks to HCDs. (2.7?)
8533 + */
8534 +
8535 +
8536 +/* Each QH holds a qtd list; a QH is used for everything except iso.
8537 + *
8538 + * For interrupt urbs, the scheduler must set the microframe scheduling
8539 + * mask(s) each time the QH gets scheduled. For highspeed, that's
8540 + * just one microframe in the s-mask. For split interrupt transactions
8541 + * there are additional complications: c-mask, maybe FSTNs.
8542 + */
8543 +static struct fotg210_qh *qh_make(struct fotg210_hcd *fotg210, struct urb *urb,
8544 + gfp_t flags)
8545 +{
8546 + struct fotg210_qh *qh = fotg210_qh_alloc(fotg210, flags);
8547 + struct usb_host_endpoint *ep;
8548 + u32 info1 = 0, info2 = 0;
8549 + int is_input, type;
8550 + int maxp = 0;
8551 + int mult;
8552 + struct usb_tt *tt = urb->dev->tt;
8553 + struct fotg210_qh_hw *hw;
8554 +
8555 + if (!qh)
8556 + return qh;
8557 +
8558 + /*
8559 + * init endpoint/device data for this QH
8560 + */
8561 + info1 |= usb_pipeendpoint(urb->pipe) << 8;
8562 + info1 |= usb_pipedevice(urb->pipe) << 0;
8563 +
8564 + is_input = usb_pipein(urb->pipe);
8565 + type = usb_pipetype(urb->pipe);
8566 + ep = usb_pipe_endpoint(urb->dev, urb->pipe);
8567 + maxp = usb_endpoint_maxp(&ep->desc);
8568 + mult = usb_endpoint_maxp_mult(&ep->desc);
8569 +
8570 + /* 1024 byte maxpacket is a hardware ceiling. High bandwidth
8571 + * acts like up to 3KB, but is built from smaller packets.
8572 + */
8573 + if (maxp > 1024) {
8574 + fotg210_dbg(fotg210, "bogus qh maxpacket %d\n", maxp);
8575 + goto done;
8576 + }
8577 +
8578 + /* Compute interrupt scheduling parameters just once, and save.
8579 + * - allowing for high bandwidth, how many nsec/uframe are used?
8580 + * - split transactions need a second CSPLIT uframe; same question
8581 + * - splits also need a schedule gap (for full/low speed I/O)
8582 + * - qh has a polling interval
8583 + *
8584 + * For control/bulk requests, the HC or TT handles these.
8585 + */
8586 + if (type == PIPE_INTERRUPT) {
8587 + qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
8588 + is_input, 0, mult * maxp));
8589 + qh->start = NO_FRAME;
8590 +
8591 + if (urb->dev->speed == USB_SPEED_HIGH) {
8592 + qh->c_usecs = 0;
8593 + qh->gap_uf = 0;
8594 +
8595 + qh->period = urb->interval >> 3;
8596 + if (qh->period == 0 && urb->interval != 1) {
8597 + /* NOTE interval 2 or 4 uframes could work.
8598 + * But interval 1 scheduling is simpler, and
8599 + * includes high bandwidth.
8600 + */
8601 + urb->interval = 1;
8602 + } else if (qh->period > fotg210->periodic_size) {
8603 + qh->period = fotg210->periodic_size;
8604 + urb->interval = qh->period << 3;
8605 + }
8606 + } else {
8607 + int think_time;
8608 +
8609 + /* gap is f(FS/LS transfer times) */
8610 + qh->gap_uf = 1 + usb_calc_bus_time(urb->dev->speed,
8611 + is_input, 0, maxp) / (125 * 1000);
8612 +
8613 + /* FIXME this just approximates SPLIT/CSPLIT times */
8614 + if (is_input) { /* SPLIT, gap, CSPLIT+DATA */
8615 + qh->c_usecs = qh->usecs + HS_USECS(0);
8616 + qh->usecs = HS_USECS(1);
8617 + } else { /* SPLIT+DATA, gap, CSPLIT */
8618 + qh->usecs += HS_USECS(1);
8619 + qh->c_usecs = HS_USECS(0);
8620 + }
8621 +
8622 + think_time = tt ? tt->think_time : 0;
8623 + qh->tt_usecs = NS_TO_US(think_time +
8624 + usb_calc_bus_time(urb->dev->speed,
8625 + is_input, 0, maxp));
8626 + qh->period = urb->interval;
8627 + if (qh->period > fotg210->periodic_size) {
8628 + qh->period = fotg210->periodic_size;
8629 + urb->interval = qh->period;
8630 + }
8631 + }
8632 + }
8633 +
8634 + /* support for tt scheduling, and access to toggles */
8635 + qh->dev = urb->dev;
8636 +
8637 + /* using TT? */
8638 + switch (urb->dev->speed) {
8639 + case USB_SPEED_LOW:
8640 + info1 |= QH_LOW_SPEED;
8641 + fallthrough;
8642 +
8643 + case USB_SPEED_FULL:
8644 + /* EPS 0 means "full" */
8645 + if (type != PIPE_INTERRUPT)
8646 + info1 |= (FOTG210_TUNE_RL_TT << 28);
8647 + if (type == PIPE_CONTROL) {
8648 + info1 |= QH_CONTROL_EP; /* for TT */
8649 + info1 |= QH_TOGGLE_CTL; /* toggle from qtd */
8650 + }
8651 + info1 |= maxp << 16;
8652 +
8653 + info2 |= (FOTG210_TUNE_MULT_TT << 30);
8654 +
8655 + /* Some Freescale processors have an erratum in which the
8656 + * port number in the queue head was 0..N-1 instead of 1..N.
8657 + */
8658 + if (fotg210_has_fsl_portno_bug(fotg210))
8659 + info2 |= (urb->dev->ttport-1) << 23;
8660 + else
8661 + info2 |= urb->dev->ttport << 23;
8662 +
8663 + /* set the address of the TT; for TDI's integrated
8664 + * root hub tt, leave it zeroed.
8665 + */
8666 + if (tt && tt->hub != fotg210_to_hcd(fotg210)->self.root_hub)
8667 + info2 |= tt->hub->devnum << 16;
8668 +
8669 + /* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */
8670 +
8671 + break;
8672 +
8673 + case USB_SPEED_HIGH: /* no TT involved */
8674 + info1 |= QH_HIGH_SPEED;
8675 + if (type == PIPE_CONTROL) {
8676 + info1 |= (FOTG210_TUNE_RL_HS << 28);
8677 + info1 |= 64 << 16; /* usb2 fixed maxpacket */
8678 + info1 |= QH_TOGGLE_CTL; /* toggle from qtd */
8679 + info2 |= (FOTG210_TUNE_MULT_HS << 30);
8680 + } else if (type == PIPE_BULK) {
8681 + info1 |= (FOTG210_TUNE_RL_HS << 28);
8682 + /* The USB spec says that high speed bulk endpoints
8683 + * always use 512 byte maxpacket. But some device
8684 + * vendors decided to ignore that, and MSFT is happy
8685 + * to help them do so. So now people expect to use
8686 + * such nonconformant devices with Linux too; sigh.
8687 + */
8688 + info1 |= maxp << 16;
8689 + info2 |= (FOTG210_TUNE_MULT_HS << 30);
8690 + } else { /* PIPE_INTERRUPT */
8691 + info1 |= maxp << 16;
8692 + info2 |= mult << 30;
8693 + }
8694 + break;
8695 + default:
8696 + fotg210_dbg(fotg210, "bogus dev %p speed %d\n", urb->dev,
8697 + urb->dev->speed);
8698 +done:
8699 + qh_destroy(fotg210, qh);
8700 + return NULL;
8701 + }
8702 +
8703 + /* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */
8704 +
8705 + /* init as live, toggle clear, advance to dummy */
8706 + qh->qh_state = QH_STATE_IDLE;
8707 + hw = qh->hw;
8708 + hw->hw_info1 = cpu_to_hc32(fotg210, info1);
8709 + hw->hw_info2 = cpu_to_hc32(fotg210, info2);
8710 + qh->is_out = !is_input;
8711 + usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), !is_input, 1);
8712 + qh_refresh(fotg210, qh);
8713 + return qh;
8714 +}
8715 +
8716 +static void enable_async(struct fotg210_hcd *fotg210)
8717 +{
8718 + if (fotg210->async_count++)
8719 + return;
8720 +
8721 + /* Stop waiting to turn off the async schedule */
8722 + fotg210->enabled_hrtimer_events &= ~BIT(FOTG210_HRTIMER_DISABLE_ASYNC);
8723 +
8724 + /* Don't start the schedule until ASS is 0 */
8725 + fotg210_poll_ASS(fotg210);
8726 + turn_on_io_watchdog(fotg210);
8727 +}
8728 +
8729 +static void disable_async(struct fotg210_hcd *fotg210)
8730 +{
8731 + if (--fotg210->async_count)
8732 + return;
8733 +
8734 + /* The async schedule and async_unlink list are supposed to be empty */
8735 + WARN_ON(fotg210->async->qh_next.qh || fotg210->async_unlink);
8736 +
8737 + /* Don't turn off the schedule until ASS is 1 */
8738 + fotg210_poll_ASS(fotg210);
8739 +}
8740 +
8741 +/* move qh (and its qtds) onto async queue; maybe enable queue. */
8742 +
8743 +static void qh_link_async(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
8744 +{
8745 + __hc32 dma = QH_NEXT(fotg210, qh->qh_dma);
8746 + struct fotg210_qh *head;
8747 +
8748 + /* Don't link a QH if there's a Clear-TT-Buffer pending */
8749 + if (unlikely(qh->clearing_tt))
8750 + return;
8751 +
8752 + WARN_ON(qh->qh_state != QH_STATE_IDLE);
8753 +
8754 + /* clear halt and/or toggle; and maybe recover from silicon quirk */
8755 + qh_refresh(fotg210, qh);
8756 +
8757 + /* splice right after start */
8758 + head = fotg210->async;
8759 + qh->qh_next = head->qh_next;
8760 + qh->hw->hw_next = head->hw->hw_next;
8761 + wmb();
8762 +
8763 + head->qh_next.qh = qh;
8764 + head->hw->hw_next = dma;
8765 +
8766 + qh->xacterrs = 0;
8767 + qh->qh_state = QH_STATE_LINKED;
8768 + /* qtd completions reported later by interrupt */
8769 +
8770 + enable_async(fotg210);
8771 +}
8772 +
8773 +/* For control/bulk/interrupt, return QH with these TDs appended.
8774 + * Allocates and initializes the QH if necessary.
8775 + * Returns null if it can't allocate a QH it needs to.
8776 + * If the QH has TDs (urbs) already, that's great.
8777 + */
8778 +static struct fotg210_qh *qh_append_tds(struct fotg210_hcd *fotg210,
8779 + struct urb *urb, struct list_head *qtd_list,
8780 + int epnum, void **ptr)
8781 +{
8782 + struct fotg210_qh *qh = NULL;
8783 + __hc32 qh_addr_mask = cpu_to_hc32(fotg210, 0x7f);
8784 +
8785 + qh = (struct fotg210_qh *) *ptr;
8786 + if (unlikely(qh == NULL)) {
8787 + /* can't sleep here, we have fotg210->lock... */
8788 + qh = qh_make(fotg210, urb, GFP_ATOMIC);
8789 + *ptr = qh;
8790 + }
8791 + if (likely(qh != NULL)) {
8792 + struct fotg210_qtd *qtd;
8793 +
8794 + if (unlikely(list_empty(qtd_list)))
8795 + qtd = NULL;
8796 + else
8797 + qtd = list_entry(qtd_list->next, struct fotg210_qtd,
8798 + qtd_list);
8799 +
8800 + /* control qh may need patching ... */
8801 + if (unlikely(epnum == 0)) {
8802 + /* usb_reset_device() briefly reverts to address 0 */
8803 + if (usb_pipedevice(urb->pipe) == 0)
8804 + qh->hw->hw_info1 &= ~qh_addr_mask;
8805 + }
8806 +
8807 + /* just one way to queue requests: swap with the dummy qtd.
8808 + * only hc or qh_refresh() ever modify the overlay.
8809 + */
8810 + if (likely(qtd != NULL)) {
8811 + struct fotg210_qtd *dummy;
8812 + dma_addr_t dma;
8813 + __hc32 token;
8814 +
8815 + /* to avoid racing the HC, use the dummy td instead of
8816 + * the first td of our list (becomes new dummy). both
8817 + * tds stay deactivated until we're done, when the
8818 + * HC is allowed to fetch the old dummy (4.10.2).
8819 + */
8820 + token = qtd->hw_token;
8821 + qtd->hw_token = HALT_BIT(fotg210);
8822 +
8823 + dummy = qh->dummy;
8824 +
8825 + dma = dummy->qtd_dma;
8826 + *dummy = *qtd;
8827 + dummy->qtd_dma = dma;
8828 +
8829 + list_del(&qtd->qtd_list);
8830 + list_add(&dummy->qtd_list, qtd_list);
8831 + list_splice_tail(qtd_list, &qh->qtd_list);
8832 +
8833 + fotg210_qtd_init(fotg210, qtd, qtd->qtd_dma);
8834 + qh->dummy = qtd;
8835 +
8836 + /* hc must see the new dummy at list end */
8837 + dma = qtd->qtd_dma;
8838 + qtd = list_entry(qh->qtd_list.prev,
8839 + struct fotg210_qtd, qtd_list);
8840 + qtd->hw_next = QTD_NEXT(fotg210, dma);
8841 +
8842 + /* let the hc process these next qtds */
8843 + wmb();
8844 + dummy->hw_token = token;
8845 +
8846 + urb->hcpriv = qh;
8847 + }
8848 + }
8849 + return qh;
8850 +}
8851 +
8852 +static int submit_async(struct fotg210_hcd *fotg210, struct urb *urb,
8853 + struct list_head *qtd_list, gfp_t mem_flags)
8854 +{
8855 + int epnum;
8856 + unsigned long flags;
8857 + struct fotg210_qh *qh = NULL;
8858 + int rc;
8859 +
8860 + epnum = urb->ep->desc.bEndpointAddress;
8861 +
8862 +#ifdef FOTG210_URB_TRACE
8863 + {
8864 + struct fotg210_qtd *qtd;
8865 +
8866 + qtd = list_entry(qtd_list->next, struct fotg210_qtd, qtd_list);
8867 + fotg210_dbg(fotg210,
8868 + "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
8869 + __func__, urb->dev->devpath, urb,
8870 + epnum & 0x0f, (epnum & USB_DIR_IN)
8871 + ? "in" : "out",
8872 + urb->transfer_buffer_length,
8873 + qtd, urb->ep->hcpriv);
8874 + }
8875 +#endif
8876 +
8877 + spin_lock_irqsave(&fotg210->lock, flags);
8878 + if (unlikely(!HCD_HW_ACCESSIBLE(fotg210_to_hcd(fotg210)))) {
8879 + rc = -ESHUTDOWN;
8880 + goto done;
8881 + }
8882 + rc = usb_hcd_link_urb_to_ep(fotg210_to_hcd(fotg210), urb);
8883 + if (unlikely(rc))
8884 + goto done;
8885 +
8886 + qh = qh_append_tds(fotg210, urb, qtd_list, epnum, &urb->ep->hcpriv);
8887 + if (unlikely(qh == NULL)) {
8888 + usb_hcd_unlink_urb_from_ep(fotg210_to_hcd(fotg210), urb);
8889 + rc = -ENOMEM;
8890 + goto done;
8891 + }
8892 +
8893 + /* Control/bulk operations through TTs don't need scheduling,
8894 + * the HC and TT handle it when the TT has a buffer ready.
8895 + */
8896 + if (likely(qh->qh_state == QH_STATE_IDLE))
8897 + qh_link_async(fotg210, qh);
8898 +done:
8899 + spin_unlock_irqrestore(&fotg210->lock, flags);
8900 + if (unlikely(qh == NULL))
8901 + qtd_list_free(fotg210, urb, qtd_list);
8902 + return rc;
8903 +}
8904 +
8905 +static void single_unlink_async(struct fotg210_hcd *fotg210,
8906 + struct fotg210_qh *qh)
8907 +{
8908 + struct fotg210_qh *prev;
8909 +
8910 + /* Add to the end of the list of QHs waiting for the next IAAD */
8911 + qh->qh_state = QH_STATE_UNLINK;
8912 + if (fotg210->async_unlink)
8913 + fotg210->async_unlink_last->unlink_next = qh;
8914 + else
8915 + fotg210->async_unlink = qh;
8916 + fotg210->async_unlink_last = qh;
8917 +
8918 + /* Unlink it from the schedule */
8919 + prev = fotg210->async;
8920 + while (prev->qh_next.qh != qh)
8921 + prev = prev->qh_next.qh;
8922 +
8923 + prev->hw->hw_next = qh->hw->hw_next;
8924 + prev->qh_next = qh->qh_next;
8925 + if (fotg210->qh_scan_next == qh)
8926 + fotg210->qh_scan_next = qh->qh_next.qh;
8927 +}
8928 +
8929 +static void start_iaa_cycle(struct fotg210_hcd *fotg210, bool nested)
8930 +{
8931 + /*
8932 + * Do nothing if an IAA cycle is already running or
8933 + * if one will be started shortly.
8934 + */
8935 + if (fotg210->async_iaa || fotg210->async_unlinking)
8936 + return;
8937 +
8938 + /* Do all the waiting QHs at once */
8939 + fotg210->async_iaa = fotg210->async_unlink;
8940 + fotg210->async_unlink = NULL;
8941 +
8942 + /* If the controller isn't running, we don't have to wait for it */
8943 + if (unlikely(fotg210->rh_state < FOTG210_RH_RUNNING)) {
8944 + if (!nested) /* Avoid recursion */
8945 + end_unlink_async(fotg210);
8946 +
8947 + /* Otherwise start a new IAA cycle */
8948 + } else if (likely(fotg210->rh_state == FOTG210_RH_RUNNING)) {
8949 + /* Make sure the unlinks are all visible to the hardware */
8950 + wmb();
8951 +
8952 + fotg210_writel(fotg210, fotg210->command | CMD_IAAD,
8953 + &fotg210->regs->command);
8954 + fotg210_readl(fotg210, &fotg210->regs->command);
8955 + fotg210_enable_event(fotg210, FOTG210_HRTIMER_IAA_WATCHDOG,
8956 + true);
8957 + }
8958 +}
8959 +
8960 +/* the async qh for the qtds being unlinked are now gone from the HC */
8961 +
8962 +static void end_unlink_async(struct fotg210_hcd *fotg210)
8963 +{
8964 + struct fotg210_qh *qh;
8965 +
8966 + /* Process the idle QHs */
8967 +restart:
8968 + fotg210->async_unlinking = true;
8969 + while (fotg210->async_iaa) {
8970 + qh = fotg210->async_iaa;
8971 + fotg210->async_iaa = qh->unlink_next;
8972 + qh->unlink_next = NULL;
8973 +
8974 + qh->qh_state = QH_STATE_IDLE;
8975 + qh->qh_next.qh = NULL;
8976 +
8977 + qh_completions(fotg210, qh);
8978 + if (!list_empty(&qh->qtd_list) &&
8979 + fotg210->rh_state == FOTG210_RH_RUNNING)
8980 + qh_link_async(fotg210, qh);
8981 + disable_async(fotg210);
8982 + }
8983 + fotg210->async_unlinking = false;
8984 +
8985 + /* Start a new IAA cycle if any QHs are waiting for it */
8986 + if (fotg210->async_unlink) {
8987 + start_iaa_cycle(fotg210, true);
8988 + if (unlikely(fotg210->rh_state < FOTG210_RH_RUNNING))
8989 + goto restart;
8990 + }
8991 +}
8992 +
8993 +static void unlink_empty_async(struct fotg210_hcd *fotg210)
8994 +{
8995 + struct fotg210_qh *qh, *next;
8996 + bool stopped = (fotg210->rh_state < FOTG210_RH_RUNNING);
8997 + bool check_unlinks_later = false;
8998 +
8999 + /* Unlink all the async QHs that have been empty for a timer cycle */
9000 + next = fotg210->async->qh_next.qh;
9001 + while (next) {
9002 + qh = next;
9003 + next = qh->qh_next.qh;
9004 +
9005 + if (list_empty(&qh->qtd_list) &&
9006 + qh->qh_state == QH_STATE_LINKED) {
9007 + if (!stopped && qh->unlink_cycle ==
9008 + fotg210->async_unlink_cycle)
9009 + check_unlinks_later = true;
9010 + else
9011 + single_unlink_async(fotg210, qh);
9012 + }
9013 + }
9014 +
9015 + /* Start a new IAA cycle if any QHs are waiting for it */
9016 + if (fotg210->async_unlink)
9017 + start_iaa_cycle(fotg210, false);
9018 +
9019 + /* QHs that haven't been empty for long enough will be handled later */
9020 + if (check_unlinks_later) {
9021 + fotg210_enable_event(fotg210, FOTG210_HRTIMER_ASYNC_UNLINKS,
9022 + true);
9023 + ++fotg210->async_unlink_cycle;
9024 + }
9025 +}
9026 +
9027 +/* makes sure the async qh will become idle */
9028 +/* caller must own fotg210->lock */
9029 +
9030 +static void start_unlink_async(struct fotg210_hcd *fotg210,
9031 + struct fotg210_qh *qh)
9032 +{
9033 + /*
9034 + * If the QH isn't linked then there's nothing we can do
9035 + * unless we were called during a giveback, in which case
9036 + * qh_completions() has to deal with it.
9037 + */
9038 + if (qh->qh_state != QH_STATE_LINKED) {
9039 + if (qh->qh_state == QH_STATE_COMPLETING)
9040 + qh->needs_rescan = 1;
9041 + return;
9042 + }
9043 +
9044 + single_unlink_async(fotg210, qh);
9045 + start_iaa_cycle(fotg210, false);
9046 +}
9047 +
9048 +static void scan_async(struct fotg210_hcd *fotg210)
9049 +{
9050 + struct fotg210_qh *qh;
9051 + bool check_unlinks_later = false;
9052 +
9053 + fotg210->qh_scan_next = fotg210->async->qh_next.qh;
9054 + while (fotg210->qh_scan_next) {
9055 + qh = fotg210->qh_scan_next;
9056 + fotg210->qh_scan_next = qh->qh_next.qh;
9057 +rescan:
9058 + /* clean any finished work for this qh */
9059 + if (!list_empty(&qh->qtd_list)) {
9060 + int temp;
9061 +
9062 + /*
9063 + * Unlinks could happen here; completion reporting
9064 + * drops the lock. That's why fotg210->qh_scan_next
9065 + * always holds the next qh to scan; if the next qh
9066 + * gets unlinked then fotg210->qh_scan_next is adjusted
9067 + * in single_unlink_async().
9068 + */
9069 + temp = qh_completions(fotg210, qh);
9070 + if (qh->needs_rescan) {
9071 + start_unlink_async(fotg210, qh);
9072 + } else if (list_empty(&qh->qtd_list)
9073 + && qh->qh_state == QH_STATE_LINKED) {
9074 + qh->unlink_cycle = fotg210->async_unlink_cycle;
9075 + check_unlinks_later = true;
9076 + } else if (temp != 0)
9077 + goto rescan;
9078 + }
9079 + }
9080 +
9081 + /*
9082 + * Unlink empty entries, reducing DMA usage as well
9083 + * as HCD schedule-scanning costs. Delay for any qh
9084 + * we just scanned, there's a not-unusual case that it
9085 + * doesn't stay idle for long.
9086 + */
9087 + if (check_unlinks_later && fotg210->rh_state == FOTG210_RH_RUNNING &&
9088 + !(fotg210->enabled_hrtimer_events &
9089 + BIT(FOTG210_HRTIMER_ASYNC_UNLINKS))) {
9090 + fotg210_enable_event(fotg210,
9091 + FOTG210_HRTIMER_ASYNC_UNLINKS, true);
9092 + ++fotg210->async_unlink_cycle;
9093 + }
9094 +}
9095 +/* EHCI scheduled transaction support: interrupt, iso, split iso
9096 + * These are called "periodic" transactions in the EHCI spec.
9097 + *
9098 + * Note that for interrupt transfers, the QH/QTD manipulation is shared
9099 + * with the "asynchronous" transaction support (control/bulk transfers).
9100 + * The only real difference is in how interrupt transfers are scheduled.
9101 + *
9102 + * For ISO, we make an "iso_stream" head to serve the same role as a QH.
9103 + * It keeps track of every ITD (or SITD) that's linked, and holds enough
9104 + * pre-calculated schedule data to make appending to the queue be quick.
9105 + */
9106 +static int fotg210_get_frame(struct usb_hcd *hcd);
9107 +
9108 +/* periodic_next_shadow - return "next" pointer on shadow list
9109 + * @periodic: host pointer to qh/itd
9110 + * @tag: hardware tag for type of this record
9111 + */
9112 +static union fotg210_shadow *periodic_next_shadow(struct fotg210_hcd *fotg210,
9113 + union fotg210_shadow *periodic, __hc32 tag)
9114 +{
9115 + switch (hc32_to_cpu(fotg210, tag)) {
9116 + case Q_TYPE_QH:
9117 + return &periodic->qh->qh_next;
9118 + case Q_TYPE_FSTN:
9119 + return &periodic->fstn->fstn_next;
9120 + default:
9121 + return &periodic->itd->itd_next;
9122 + }
9123 +}
9124 +
9125 +static __hc32 *shadow_next_periodic(struct fotg210_hcd *fotg210,
9126 + union fotg210_shadow *periodic, __hc32 tag)
9127 +{
9128 + switch (hc32_to_cpu(fotg210, tag)) {
9129 + /* our fotg210_shadow.qh is actually software part */
9130 + case Q_TYPE_QH:
9131 + return &periodic->qh->hw->hw_next;
9132 + /* others are hw parts */
9133 + default:
9134 + return periodic->hw_next;
9135 + }
9136 +}
9137 +
9138 +/* caller must hold fotg210->lock */
9139 +static void periodic_unlink(struct fotg210_hcd *fotg210, unsigned frame,
9140 + void *ptr)
9141 +{
9142 + union fotg210_shadow *prev_p = &fotg210->pshadow[frame];
9143 + __hc32 *hw_p = &fotg210->periodic[frame];
9144 + union fotg210_shadow here = *prev_p;
9145 +
9146 + /* find predecessor of "ptr"; hw and shadow lists are in sync */
9147 + while (here.ptr && here.ptr != ptr) {
9148 + prev_p = periodic_next_shadow(fotg210, prev_p,
9149 + Q_NEXT_TYPE(fotg210, *hw_p));
9150 + hw_p = shadow_next_periodic(fotg210, &here,
9151 + Q_NEXT_TYPE(fotg210, *hw_p));
9152 + here = *prev_p;
9153 + }
9154 + /* an interrupt entry (at list end) could have been shared */
9155 + if (!here.ptr)
9156 + return;
9157 +
9158 + /* update shadow and hardware lists ... the old "next" pointers
9159 + * from ptr may still be in use, the caller updates them.
9160 + */
9161 + *prev_p = *periodic_next_shadow(fotg210, &here,
9162 + Q_NEXT_TYPE(fotg210, *hw_p));
9163 +
9164 + *hw_p = *shadow_next_periodic(fotg210, &here,
9165 + Q_NEXT_TYPE(fotg210, *hw_p));
9166 +}
9167 +
9168 +/* how many of the uframe's 125 usecs are allocated? */
9169 +static unsigned short periodic_usecs(struct fotg210_hcd *fotg210,
9170 + unsigned frame, unsigned uframe)
9171 +{
9172 + __hc32 *hw_p = &fotg210->periodic[frame];
9173 + union fotg210_shadow *q = &fotg210->pshadow[frame];
9174 + unsigned usecs = 0;
9175 + struct fotg210_qh_hw *hw;
9176 +
9177 + while (q->ptr) {
9178 + switch (hc32_to_cpu(fotg210, Q_NEXT_TYPE(fotg210, *hw_p))) {
9179 + case Q_TYPE_QH:
9180 + hw = q->qh->hw;
9181 + /* is it in the S-mask? */
9182 + if (hw->hw_info2 & cpu_to_hc32(fotg210, 1 << uframe))
9183 + usecs += q->qh->usecs;
9184 + /* ... or C-mask? */
9185 + if (hw->hw_info2 & cpu_to_hc32(fotg210,
9186 + 1 << (8 + uframe)))
9187 + usecs += q->qh->c_usecs;
9188 + hw_p = &hw->hw_next;
9189 + q = &q->qh->qh_next;
9190 + break;
9191 + /* case Q_TYPE_FSTN: */
9192 + default:
9193 + /* for "save place" FSTNs, count the relevant INTR
9194 + * bandwidth from the previous frame
9195 + */
9196 + if (q->fstn->hw_prev != FOTG210_LIST_END(fotg210))
9197 + fotg210_dbg(fotg210, "ignoring FSTN cost ...\n");
9198 +
9199 + hw_p = &q->fstn->hw_next;
9200 + q = &q->fstn->fstn_next;
9201 + break;
9202 + case Q_TYPE_ITD:
9203 + if (q->itd->hw_transaction[uframe])
9204 + usecs += q->itd->stream->usecs;
9205 + hw_p = &q->itd->hw_next;
9206 + q = &q->itd->itd_next;
9207 + break;
9208 + }
9209 + }
9210 + if (usecs > fotg210->uframe_periodic_max)
9211 + fotg210_err(fotg210, "uframe %d sched overrun: %d usecs\n",
9212 + frame * 8 + uframe, usecs);
9213 + return usecs;
9214 +}
9215 +
9216 +static int same_tt(struct usb_device *dev1, struct usb_device *dev2)
9217 +{
9218 + if (!dev1->tt || !dev2->tt)
9219 + return 0;
9220 + if (dev1->tt != dev2->tt)
9221 + return 0;
9222 + if (dev1->tt->multi)
9223 + return dev1->ttport == dev2->ttport;
9224 + else
9225 + return 1;
9226 +}
9227 +
9228 +/* return true iff the device's transaction translator is available
9229 + * for a periodic transfer starting at the specified frame, using
9230 + * all the uframes in the mask.
9231 + */
9232 +static int tt_no_collision(struct fotg210_hcd *fotg210, unsigned period,
9233 + struct usb_device *dev, unsigned frame, u32 uf_mask)
9234 +{
9235 + if (period == 0) /* error */
9236 + return 0;
9237 +
9238 + /* note bandwidth wastage: split never follows csplit
9239 + * (different dev or endpoint) until the next uframe.
9240 + * calling convention doesn't make that distinction.
9241 + */
9242 + for (; frame < fotg210->periodic_size; frame += period) {
9243 + union fotg210_shadow here;
9244 + __hc32 type;
9245 + struct fotg210_qh_hw *hw;
9246 +
9247 + here = fotg210->pshadow[frame];
9248 + type = Q_NEXT_TYPE(fotg210, fotg210->periodic[frame]);
9249 + while (here.ptr) {
9250 + switch (hc32_to_cpu(fotg210, type)) {
9251 + case Q_TYPE_ITD:
9252 + type = Q_NEXT_TYPE(fotg210, here.itd->hw_next);
9253 + here = here.itd->itd_next;
9254 + continue;
9255 + case Q_TYPE_QH:
9256 + hw = here.qh->hw;
9257 + if (same_tt(dev, here.qh->dev)) {
9258 + u32 mask;
9259 +
9260 + mask = hc32_to_cpu(fotg210,
9261 + hw->hw_info2);
9262 + /* "knows" no gap is needed */
9263 + mask |= mask >> 8;
9264 + if (mask & uf_mask)
9265 + break;
9266 + }
9267 + type = Q_NEXT_TYPE(fotg210, hw->hw_next);
9268 + here = here.qh->qh_next;
9269 + continue;
9270 + /* case Q_TYPE_FSTN: */
9271 + default:
9272 + fotg210_dbg(fotg210,
9273 + "periodic frame %d bogus type %d\n",
9274 + frame, type);
9275 + }
9276 +
9277 + /* collision or error */
9278 + return 0;
9279 + }
9280 + }
9281 +
9282 + /* no collision */
9283 + return 1;
9284 +}
9285 +
9286 +static void enable_periodic(struct fotg210_hcd *fotg210)
9287 +{
9288 + if (fotg210->periodic_count++)
9289 + return;
9290 +
9291 + /* Stop waiting to turn off the periodic schedule */
9292 + fotg210->enabled_hrtimer_events &=
9293 + ~BIT(FOTG210_HRTIMER_DISABLE_PERIODIC);
9294 +
9295 + /* Don't start the schedule until PSS is 0 */
9296 + fotg210_poll_PSS(fotg210);
9297 + turn_on_io_watchdog(fotg210);
9298 +}
9299 +
9300 +static void disable_periodic(struct fotg210_hcd *fotg210)
9301 +{
9302 + if (--fotg210->periodic_count)
9303 + return;
9304 +
9305 + /* Don't turn off the schedule until PSS is 1 */
9306 + fotg210_poll_PSS(fotg210);
9307 +}
9308 +
9309 +/* periodic schedule slots have iso tds (normal or split) first, then a
9310 + * sparse tree for active interrupt transfers.
9311 + *
9312 + * this just links in a qh; caller guarantees uframe masks are set right.
9313 + * no FSTN support (yet; fotg210 0.96+)
9314 + */
9315 +static void qh_link_periodic(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
9316 +{
9317 + unsigned i;
9318 + unsigned period = qh->period;
9319 +
9320 + dev_dbg(&qh->dev->dev,
9321 + "link qh%d-%04x/%p start %d [%d/%d us]\n", period,
9322 + hc32_to_cpup(fotg210, &qh->hw->hw_info2) &
9323 + (QH_CMASK | QH_SMASK), qh, qh->start, qh->usecs,
9324 + qh->c_usecs);
9325 +
9326 + /* high bandwidth, or otherwise every microframe */
9327 + if (period == 0)
9328 + period = 1;
9329 +
9330 + for (i = qh->start; i < fotg210->periodic_size; i += period) {
9331 + union fotg210_shadow *prev = &fotg210->pshadow[i];
9332 + __hc32 *hw_p = &fotg210->periodic[i];
9333 + union fotg210_shadow here = *prev;
9334 + __hc32 type = 0;
9335 +
9336 + /* skip the iso nodes at list head */
9337 + while (here.ptr) {
9338 + type = Q_NEXT_TYPE(fotg210, *hw_p);
9339 + if (type == cpu_to_hc32(fotg210, Q_TYPE_QH))
9340 + break;
9341 + prev = periodic_next_shadow(fotg210, prev, type);
9342 + hw_p = shadow_next_periodic(fotg210, &here, type);
9343 + here = *prev;
9344 + }
9345 +
9346 + /* sorting each branch by period (slow-->fast)
9347 + * enables sharing interior tree nodes
9348 + */
9349 + while (here.ptr && qh != here.qh) {
9350 + if (qh->period > here.qh->period)
9351 + break;
9352 + prev = &here.qh->qh_next;
9353 + hw_p = &here.qh->hw->hw_next;
9354 + here = *prev;
9355 + }
9356 + /* link in this qh, unless some earlier pass did that */
9357 + if (qh != here.qh) {
9358 + qh->qh_next = here;
9359 + if (here.qh)
9360 + qh->hw->hw_next = *hw_p;
9361 + wmb();
9362 + prev->qh = qh;
9363 + *hw_p = QH_NEXT(fotg210, qh->qh_dma);
9364 + }
9365 + }
9366 + qh->qh_state = QH_STATE_LINKED;
9367 + qh->xacterrs = 0;
9368 +
9369 + /* update per-qh bandwidth for usbfs */
9370 + fotg210_to_hcd(fotg210)->self.bandwidth_allocated += qh->period
9371 + ? ((qh->usecs + qh->c_usecs) / qh->period)
9372 + : (qh->usecs * 8);
9373 +
9374 + list_add(&qh->intr_node, &fotg210->intr_qh_list);
9375 +
9376 + /* maybe enable periodic schedule processing */
9377 + ++fotg210->intr_count;
9378 + enable_periodic(fotg210);
9379 +}
9380 +
9381 +static void qh_unlink_periodic(struct fotg210_hcd *fotg210,
9382 + struct fotg210_qh *qh)
9383 +{
9384 + unsigned i;
9385 + unsigned period;
9386 +
9387 + /*
9388 + * If qh is for a low/full-speed device, simply unlinking it
9389 + * could interfere with an ongoing split transaction. To unlink
9390 + * it safely would require setting the QH_INACTIVATE bit and
9391 + * waiting at least one frame, as described in EHCI 4.12.2.5.
9392 + *
9393 + * We won't bother with any of this. Instead, we assume that the
9394 + * only reason for unlinking an interrupt QH while the current URB
9395 + * is still active is to dequeue all the URBs (flush the whole
9396 + * endpoint queue).
9397 + *
9398 + * If rebalancing the periodic schedule is ever implemented, this
9399 + * approach will no longer be valid.
9400 + */
9401 +
9402 + /* high bandwidth, or otherwise part of every microframe */
9403 + period = qh->period;
9404 + if (!period)
9405 + period = 1;
9406 +
9407 + for (i = qh->start; i < fotg210->periodic_size; i += period)
9408 + periodic_unlink(fotg210, i, qh);
9409 +
9410 + /* update per-qh bandwidth for usbfs */
9411 + fotg210_to_hcd(fotg210)->self.bandwidth_allocated -= qh->period
9412 + ? ((qh->usecs + qh->c_usecs) / qh->period)
9413 + : (qh->usecs * 8);
9414 +
9415 + dev_dbg(&qh->dev->dev,
9416 + "unlink qh%d-%04x/%p start %d [%d/%d us]\n",
9417 + qh->period, hc32_to_cpup(fotg210, &qh->hw->hw_info2) &
9418 + (QH_CMASK | QH_SMASK), qh, qh->start, qh->usecs,
9419 + qh->c_usecs);
9420 +
9421 + /* qh->qh_next still "live" to HC */
9422 + qh->qh_state = QH_STATE_UNLINK;
9423 + qh->qh_next.ptr = NULL;
9424 +
9425 + if (fotg210->qh_scan_next == qh)
9426 + fotg210->qh_scan_next = list_entry(qh->intr_node.next,
9427 + struct fotg210_qh, intr_node);
9428 + list_del(&qh->intr_node);
9429 +}
9430 +
9431 +static void start_unlink_intr(struct fotg210_hcd *fotg210,
9432 + struct fotg210_qh *qh)
9433 +{
9434 + /* If the QH isn't linked then there's nothing we can do
9435 + * unless we were called during a giveback, in which case
9436 + * qh_completions() has to deal with it.
9437 + */
9438 + if (qh->qh_state != QH_STATE_LINKED) {
9439 + if (qh->qh_state == QH_STATE_COMPLETING)
9440 + qh->needs_rescan = 1;
9441 + return;
9442 + }
9443 +
9444 + qh_unlink_periodic(fotg210, qh);
9445 +
9446 + /* Make sure the unlinks are visible before starting the timer */
9447 + wmb();
9448 +
9449 + /*
9450 + * The EHCI spec doesn't say how long it takes the controller to
9451 + * stop accessing an unlinked interrupt QH. The timer delay is
9452 + * 9 uframes; presumably that will be long enough.
9453 + */
9454 + qh->unlink_cycle = fotg210->intr_unlink_cycle;
9455 +
9456 + /* New entries go at the end of the intr_unlink list */
9457 + if (fotg210->intr_unlink)
9458 + fotg210->intr_unlink_last->unlink_next = qh;
9459 + else
9460 + fotg210->intr_unlink = qh;
9461 + fotg210->intr_unlink_last = qh;
9462 +
9463 + if (fotg210->intr_unlinking)
9464 + ; /* Avoid recursive calls */
9465 + else if (fotg210->rh_state < FOTG210_RH_RUNNING)
9466 + fotg210_handle_intr_unlinks(fotg210);
9467 + else if (fotg210->intr_unlink == qh) {
9468 + fotg210_enable_event(fotg210, FOTG210_HRTIMER_UNLINK_INTR,
9469 + true);
9470 + ++fotg210->intr_unlink_cycle;
9471 + }
9472 +}
9473 +
9474 +static void end_unlink_intr(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
9475 +{
9476 + struct fotg210_qh_hw *hw = qh->hw;
9477 + int rc;
9478 +
9479 + qh->qh_state = QH_STATE_IDLE;
9480 + hw->hw_next = FOTG210_LIST_END(fotg210);
9481 +
9482 + qh_completions(fotg210, qh);
9483 +
9484 + /* reschedule QH iff another request is queued */
9485 + if (!list_empty(&qh->qtd_list) &&
9486 + fotg210->rh_state == FOTG210_RH_RUNNING) {
9487 + rc = qh_schedule(fotg210, qh);
9488 +
9489 + /* An error here likely indicates handshake failure
9490 + * or no space left in the schedule. Neither fault
9491 + * should happen often ...
9492 + *
9493 + * FIXME kill the now-dysfunctional queued urbs
9494 + */
9495 + if (rc != 0)
9496 + fotg210_err(fotg210, "can't reschedule qh %p, err %d\n",
9497 + qh, rc);
9498 + }
9499 +
9500 + /* maybe turn off periodic schedule */
9501 + --fotg210->intr_count;
9502 + disable_periodic(fotg210);
9503 +}
9504 +
9505 +static int check_period(struct fotg210_hcd *fotg210, unsigned frame,
9506 + unsigned uframe, unsigned period, unsigned usecs)
9507 +{
9508 + int claimed;
9509 +
9510 + /* complete split running into next frame?
9511 + * given FSTN support, we could sometimes check...
9512 + */
9513 + if (uframe >= 8)
9514 + return 0;
9515 +
9516 + /* convert "usecs we need" to "max already claimed" */
9517 + usecs = fotg210->uframe_periodic_max - usecs;
9518 +
9519 + /* we "know" 2 and 4 uframe intervals were rejected; so
9520 + * for period 0, check _every_ microframe in the schedule.
9521 + */
9522 + if (unlikely(period == 0)) {
9523 + do {
9524 + for (uframe = 0; uframe < 7; uframe++) {
9525 + claimed = periodic_usecs(fotg210, frame,
9526 + uframe);
9527 + if (claimed > usecs)
9528 + return 0;
9529 + }
9530 + } while ((frame += 1) < fotg210->periodic_size);
9531 +
9532 + /* just check the specified uframe, at that period */
9533 + } else {
9534 + do {
9535 + claimed = periodic_usecs(fotg210, frame, uframe);
9536 + if (claimed > usecs)
9537 + return 0;
9538 + } while ((frame += period) < fotg210->periodic_size);
9539 + }
9540 +
9541 + /* success! */
9542 + return 1;
9543 +}
9544 +
9545 +static int check_intr_schedule(struct fotg210_hcd *fotg210, unsigned frame,
9546 + unsigned uframe, const struct fotg210_qh *qh, __hc32 *c_maskp)
9547 +{
9548 + int retval = -ENOSPC;
9549 + u8 mask = 0;
9550 +
9551 + if (qh->c_usecs && uframe >= 6) /* FSTN territory? */
9552 + goto done;
9553 +
9554 + if (!check_period(fotg210, frame, uframe, qh->period, qh->usecs))
9555 + goto done;
9556 + if (!qh->c_usecs) {
9557 + retval = 0;
9558 + *c_maskp = 0;
9559 + goto done;
9560 + }
9561 +
9562 + /* Make sure this tt's buffer is also available for CSPLITs.
9563 + * We pessimize a bit; probably the typical full speed case
9564 + * doesn't need the second CSPLIT.
9565 + *
9566 + * NOTE: both SPLIT and CSPLIT could be checked in just
9567 + * one smart pass...
9568 + */
9569 + mask = 0x03 << (uframe + qh->gap_uf);
9570 + *c_maskp = cpu_to_hc32(fotg210, mask << 8);
9571 +
9572 + mask |= 1 << uframe;
9573 + if (tt_no_collision(fotg210, qh->period, qh->dev, frame, mask)) {
9574 + if (!check_period(fotg210, frame, uframe + qh->gap_uf + 1,
9575 + qh->period, qh->c_usecs))
9576 + goto done;
9577 + if (!check_period(fotg210, frame, uframe + qh->gap_uf,
9578 + qh->period, qh->c_usecs))
9579 + goto done;
9580 + retval = 0;
9581 + }
9582 +done:
9583 + return retval;
9584 +}
9585 +
9586 +/* "first fit" scheduling policy used the first time through,
9587 + * or when the previous schedule slot can't be re-used.
9588 + */
9589 +static int qh_schedule(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
9590 +{
9591 + int status;
9592 + unsigned uframe;
9593 + __hc32 c_mask;
9594 + unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */
9595 + struct fotg210_qh_hw *hw = qh->hw;
9596 +
9597 + qh_refresh(fotg210, qh);
9598 + hw->hw_next = FOTG210_LIST_END(fotg210);
9599 + frame = qh->start;
9600 +
9601 + /* reuse the previous schedule slots, if we can */
9602 + if (frame < qh->period) {
9603 + uframe = ffs(hc32_to_cpup(fotg210, &hw->hw_info2) & QH_SMASK);
9604 + status = check_intr_schedule(fotg210, frame, --uframe,
9605 + qh, &c_mask);
9606 + } else {
9607 + uframe = 0;
9608 + c_mask = 0;
9609 + status = -ENOSPC;
9610 + }
9611 +
9612 + /* else scan the schedule to find a group of slots such that all
9613 + * uframes have enough periodic bandwidth available.
9614 + */
9615 + if (status) {
9616 + /* "normal" case, uframing flexible except with splits */
9617 + if (qh->period) {
9618 + int i;
9619 +
9620 + for (i = qh->period; status && i > 0; --i) {
9621 + frame = ++fotg210->random_frame % qh->period;
9622 + for (uframe = 0; uframe < 8; uframe++) {
9623 + status = check_intr_schedule(fotg210,
9624 + frame, uframe, qh,
9625 + &c_mask);
9626 + if (status == 0)
9627 + break;
9628 + }
9629 + }
9630 +
9631 + /* qh->period == 0 means every uframe */
9632 + } else {
9633 + frame = 0;
9634 + status = check_intr_schedule(fotg210, 0, 0, qh,
9635 + &c_mask);
9636 + }
9637 + if (status)
9638 + goto done;
9639 + qh->start = frame;
9640 +
9641 + /* reset S-frame and (maybe) C-frame masks */
9642 + hw->hw_info2 &= cpu_to_hc32(fotg210, ~(QH_CMASK | QH_SMASK));
9643 + hw->hw_info2 |= qh->period
9644 + ? cpu_to_hc32(fotg210, 1 << uframe)
9645 + : cpu_to_hc32(fotg210, QH_SMASK);
9646 + hw->hw_info2 |= c_mask;
9647 + } else
9648 + fotg210_dbg(fotg210, "reused qh %p schedule\n", qh);
9649 +
9650 + /* stuff into the periodic schedule */
9651 + qh_link_periodic(fotg210, qh);
9652 +done:
9653 + return status;
9654 +}
9655 +
9656 +static int intr_submit(struct fotg210_hcd *fotg210, struct urb *urb,
9657 + struct list_head *qtd_list, gfp_t mem_flags)
9658 +{
9659 + unsigned epnum;
9660 + unsigned long flags;
9661 + struct fotg210_qh *qh;
9662 + int status;
9663 + struct list_head empty;
9664 +
9665 + /* get endpoint and transfer/schedule data */
9666 + epnum = urb->ep->desc.bEndpointAddress;
9667 +
9668 + spin_lock_irqsave(&fotg210->lock, flags);
9669 +
9670 + if (unlikely(!HCD_HW_ACCESSIBLE(fotg210_to_hcd(fotg210)))) {
9671 + status = -ESHUTDOWN;
9672 + goto done_not_linked;
9673 + }
9674 + status = usb_hcd_link_urb_to_ep(fotg210_to_hcd(fotg210), urb);
9675 + if (unlikely(status))
9676 + goto done_not_linked;
9677 +
9678 + /* get qh and force any scheduling errors */
9679 + INIT_LIST_HEAD(&empty);
9680 + qh = qh_append_tds(fotg210, urb, &empty, epnum, &urb->ep->hcpriv);
9681 + if (qh == NULL) {
9682 + status = -ENOMEM;
9683 + goto done;
9684 + }
9685 + if (qh->qh_state == QH_STATE_IDLE) {
9686 + status = qh_schedule(fotg210, qh);
9687 + if (status)
9688 + goto done;
9689 + }
9690 +
9691 + /* then queue the urb's tds to the qh */
9692 + qh = qh_append_tds(fotg210, urb, qtd_list, epnum, &urb->ep->hcpriv);
9693 + BUG_ON(qh == NULL);
9694 +
9695 + /* ... update usbfs periodic stats */
9696 + fotg210_to_hcd(fotg210)->self.bandwidth_int_reqs++;
9697 +
9698 +done:
9699 + if (unlikely(status))
9700 + usb_hcd_unlink_urb_from_ep(fotg210_to_hcd(fotg210), urb);
9701 +done_not_linked:
9702 + spin_unlock_irqrestore(&fotg210->lock, flags);
9703 + if (status)
9704 + qtd_list_free(fotg210, urb, qtd_list);
9705 +
9706 + return status;
9707 +}
9708 +
9709 +static void scan_intr(struct fotg210_hcd *fotg210)
9710 +{
9711 + struct fotg210_qh *qh;
9712 +
9713 + list_for_each_entry_safe(qh, fotg210->qh_scan_next,
9714 + &fotg210->intr_qh_list, intr_node) {
9715 +rescan:
9716 + /* clean any finished work for this qh */
9717 + if (!list_empty(&qh->qtd_list)) {
9718 + int temp;
9719 +
9720 + /*
9721 + * Unlinks could happen here; completion reporting
9722 + * drops the lock. That's why fotg210->qh_scan_next
9723 + * always holds the next qh to scan; if the next qh
9724 + * gets unlinked then fotg210->qh_scan_next is adjusted
9725 + * in qh_unlink_periodic().
9726 + */
9727 + temp = qh_completions(fotg210, qh);
9728 + if (unlikely(qh->needs_rescan ||
9729 + (list_empty(&qh->qtd_list) &&
9730 + qh->qh_state == QH_STATE_LINKED)))
9731 + start_unlink_intr(fotg210, qh);
9732 + else if (temp != 0)
9733 + goto rescan;
9734 + }
9735 + }
9736 +}
9737 +
9738 +/* fotg210_iso_stream ops work with both ITD and SITD */
9739 +
9740 +static struct fotg210_iso_stream *iso_stream_alloc(gfp_t mem_flags)
9741 +{
9742 + struct fotg210_iso_stream *stream;
9743 +
9744 + stream = kzalloc(sizeof(*stream), mem_flags);
9745 + if (likely(stream != NULL)) {
9746 + INIT_LIST_HEAD(&stream->td_list);
9747 + INIT_LIST_HEAD(&stream->free_list);
9748 + stream->next_uframe = -1;
9749 + }
9750 + return stream;
9751 +}
9752 +
9753 +static void iso_stream_init(struct fotg210_hcd *fotg210,
9754 + struct fotg210_iso_stream *stream, struct usb_device *dev,
9755 + int pipe, unsigned interval)
9756 +{
9757 + u32 buf1;
9758 + unsigned epnum, maxp;
9759 + int is_input;
9760 + long bandwidth;
9761 + unsigned multi;
9762 + struct usb_host_endpoint *ep;
9763 +
9764 + /*
9765 + * this might be a "high bandwidth" highspeed endpoint,
9766 + * as encoded in the ep descriptor's wMaxPacket field
9767 + */
9768 + epnum = usb_pipeendpoint(pipe);
9769 + is_input = usb_pipein(pipe) ? USB_DIR_IN : 0;
9770 + ep = usb_pipe_endpoint(dev, pipe);
9771 + maxp = usb_endpoint_maxp(&ep->desc);
9772 + if (is_input)
9773 + buf1 = (1 << 11);
9774 + else
9775 + buf1 = 0;
9776 +
9777 + multi = usb_endpoint_maxp_mult(&ep->desc);
9778 + buf1 |= maxp;
9779 + maxp *= multi;
9780 +
9781 + stream->buf0 = cpu_to_hc32(fotg210, (epnum << 8) | dev->devnum);
9782 + stream->buf1 = cpu_to_hc32(fotg210, buf1);
9783 + stream->buf2 = cpu_to_hc32(fotg210, multi);
9784 +
9785 + /* usbfs wants to report the average usecs per frame tied up
9786 + * when transfers on this endpoint are scheduled ...
9787 + */
9788 + if (dev->speed == USB_SPEED_FULL) {
9789 + interval <<= 3;
9790 + stream->usecs = NS_TO_US(usb_calc_bus_time(dev->speed,
9791 + is_input, 1, maxp));
9792 + stream->usecs /= 8;
9793 + } else {
9794 + stream->highspeed = 1;
9795 + stream->usecs = HS_USECS_ISO(maxp);
9796 + }
9797 + bandwidth = stream->usecs * 8;
9798 + bandwidth /= interval;
9799 +
9800 + stream->bandwidth = bandwidth;
9801 + stream->udev = dev;
9802 + stream->bEndpointAddress = is_input | epnum;
9803 + stream->interval = interval;
9804 + stream->maxp = maxp;
9805 +}
9806 +
9807 +static struct fotg210_iso_stream *iso_stream_find(struct fotg210_hcd *fotg210,
9808 + struct urb *urb)
9809 +{
9810 + unsigned epnum;
9811 + struct fotg210_iso_stream *stream;
9812 + struct usb_host_endpoint *ep;
9813 + unsigned long flags;
9814 +
9815 + epnum = usb_pipeendpoint(urb->pipe);
9816 + if (usb_pipein(urb->pipe))
9817 + ep = urb->dev->ep_in[epnum];
9818 + else
9819 + ep = urb->dev->ep_out[epnum];
9820 +
9821 + spin_lock_irqsave(&fotg210->lock, flags);
9822 + stream = ep->hcpriv;
9823 +
9824 + if (unlikely(stream == NULL)) {
9825 + stream = iso_stream_alloc(GFP_ATOMIC);
9826 + if (likely(stream != NULL)) {
9827 + ep->hcpriv = stream;
9828 + stream->ep = ep;
9829 + iso_stream_init(fotg210, stream, urb->dev, urb->pipe,
9830 + urb->interval);
9831 + }
9832 +
9833 + /* if dev->ep[epnum] is a QH, hw is set */
9834 + } else if (unlikely(stream->hw != NULL)) {
9835 + fotg210_dbg(fotg210, "dev %s ep%d%s, not iso??\n",
9836 + urb->dev->devpath, epnum,
9837 + usb_pipein(urb->pipe) ? "in" : "out");
9838 + stream = NULL;
9839 + }
9840 +
9841 + spin_unlock_irqrestore(&fotg210->lock, flags);
9842 + return stream;
9843 +}
9844 +
9845 +/* fotg210_iso_sched ops can be ITD-only or SITD-only */
9846 +
9847 +static struct fotg210_iso_sched *iso_sched_alloc(unsigned packets,
9848 + gfp_t mem_flags)
9849 +{
9850 + struct fotg210_iso_sched *iso_sched;
9851 +
9852 + iso_sched = kzalloc(struct_size(iso_sched, packet, packets), mem_flags);
9853 + if (likely(iso_sched != NULL))
9854 + INIT_LIST_HEAD(&iso_sched->td_list);
9855 +
9856 + return iso_sched;
9857 +}
9858 +
9859 +static inline void itd_sched_init(struct fotg210_hcd *fotg210,
9860 + struct fotg210_iso_sched *iso_sched,
9861 + struct fotg210_iso_stream *stream, struct urb *urb)
9862 +{
9863 + unsigned i;
9864 + dma_addr_t dma = urb->transfer_dma;
9865 +
9866 + /* how many uframes are needed for these transfers */
9867 + iso_sched->span = urb->number_of_packets * stream->interval;
9868 +
9869 + /* figure out per-uframe itd fields that we'll need later
9870 + * when we fit new itds into the schedule.
9871 + */
9872 + for (i = 0; i < urb->number_of_packets; i++) {
9873 + struct fotg210_iso_packet *uframe = &iso_sched->packet[i];
9874 + unsigned length;
9875 + dma_addr_t buf;
9876 + u32 trans;
9877 +
9878 + length = urb->iso_frame_desc[i].length;
9879 + buf = dma + urb->iso_frame_desc[i].offset;
9880 +
9881 + trans = FOTG210_ISOC_ACTIVE;
9882 + trans |= buf & 0x0fff;
9883 + if (unlikely(((i + 1) == urb->number_of_packets))
9884 + && !(urb->transfer_flags & URB_NO_INTERRUPT))
9885 + trans |= FOTG210_ITD_IOC;
9886 + trans |= length << 16;
9887 + uframe->transaction = cpu_to_hc32(fotg210, trans);
9888 +
9889 + /* might need to cross a buffer page within a uframe */
9890 + uframe->bufp = (buf & ~(u64)0x0fff);
9891 + buf += length;
9892 + if (unlikely((uframe->bufp != (buf & ~(u64)0x0fff))))
9893 + uframe->cross = 1;
9894 + }
9895 +}
9896 +
9897 +static void iso_sched_free(struct fotg210_iso_stream *stream,
9898 + struct fotg210_iso_sched *iso_sched)
9899 +{
9900 + if (!iso_sched)
9901 + return;
9902 + /* caller must hold fotg210->lock!*/
9903 + list_splice(&iso_sched->td_list, &stream->free_list);
9904 + kfree(iso_sched);
9905 +}
9906 +
9907 +static int itd_urb_transaction(struct fotg210_iso_stream *stream,
9908 + struct fotg210_hcd *fotg210, struct urb *urb, gfp_t mem_flags)
9909 +{
9910 + struct fotg210_itd *itd;
9911 + dma_addr_t itd_dma;
9912 + int i;
9913 + unsigned num_itds;
9914 + struct fotg210_iso_sched *sched;
9915 + unsigned long flags;
9916 +
9917 + sched = iso_sched_alloc(urb->number_of_packets, mem_flags);
9918 + if (unlikely(sched == NULL))
9919 + return -ENOMEM;
9920 +
9921 + itd_sched_init(fotg210, sched, stream, urb);
9922 +
9923 + if (urb->interval < 8)
9924 + num_itds = 1 + (sched->span + 7) / 8;
9925 + else
9926 + num_itds = urb->number_of_packets;
9927 +
9928 + /* allocate/init ITDs */
9929 + spin_lock_irqsave(&fotg210->lock, flags);
9930 + for (i = 0; i < num_itds; i++) {
9931 +
9932 + /*
9933 + * Use iTDs from the free list, but not iTDs that may
9934 + * still be in use by the hardware.
9935 + */
9936 + if (likely(!list_empty(&stream->free_list))) {
9937 + itd = list_first_entry(&stream->free_list,
9938 + struct fotg210_itd, itd_list);
9939 + if (itd->frame == fotg210->now_frame)
9940 + goto alloc_itd;
9941 + list_del(&itd->itd_list);
9942 + itd_dma = itd->itd_dma;
9943 + } else {
9944 +alloc_itd:
9945 + spin_unlock_irqrestore(&fotg210->lock, flags);
9946 + itd = dma_pool_alloc(fotg210->itd_pool, mem_flags,
9947 + &itd_dma);
9948 + spin_lock_irqsave(&fotg210->lock, flags);
9949 + if (!itd) {
9950 + iso_sched_free(stream, sched);
9951 + spin_unlock_irqrestore(&fotg210->lock, flags);
9952 + return -ENOMEM;
9953 + }
9954 + }
9955 +
9956 + memset(itd, 0, sizeof(*itd));
9957 + itd->itd_dma = itd_dma;
9958 + list_add(&itd->itd_list, &sched->td_list);
9959 + }
9960 + spin_unlock_irqrestore(&fotg210->lock, flags);
9961 +
9962 + /* temporarily store schedule info in hcpriv */
9963 + urb->hcpriv = sched;
9964 + urb->error_count = 0;
9965 + return 0;
9966 +}
9967 +
9968 +static inline int itd_slot_ok(struct fotg210_hcd *fotg210, u32 mod, u32 uframe,
9969 + u8 usecs, u32 period)
9970 +{
9971 + uframe %= period;
9972 + do {
9973 + /* can't commit more than uframe_periodic_max usec */
9974 + if (periodic_usecs(fotg210, uframe >> 3, uframe & 0x7)
9975 + > (fotg210->uframe_periodic_max - usecs))
9976 + return 0;
9977 +
9978 + /* we know urb->interval is 2^N uframes */
9979 + uframe += period;
9980 + } while (uframe < mod);
9981 + return 1;
9982 +}
9983 +
9984 +/* This scheduler plans almost as far into the future as it has actual
9985 + * periodic schedule slots. (Affected by TUNE_FLS, which defaults to
9986 + * "as small as possible" to be cache-friendlier.) That limits the size
9987 + * transfers you can stream reliably; avoid more than 64 msec per urb.
9988 + * Also avoid queue depths of less than fotg210's worst irq latency (affected
9989 + * by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter,
9990 + * and other factors); or more than about 230 msec total (for portability,
9991 + * given FOTG210_TUNE_FLS and the slop). Or, write a smarter scheduler!
9992 + */
9993 +
9994 +#define SCHEDULE_SLOP 80 /* microframes */
9995 +
9996 +static int iso_stream_schedule(struct fotg210_hcd *fotg210, struct urb *urb,
9997 + struct fotg210_iso_stream *stream)
9998 +{
9999 + u32 now, next, start, period, span;
10000 + int status;
10001 + unsigned mod = fotg210->periodic_size << 3;
10002 + struct fotg210_iso_sched *sched = urb->hcpriv;
10003 +
10004 + period = urb->interval;
10005 + span = sched->span;
10006 +
10007 + if (span > mod - SCHEDULE_SLOP) {
10008 + fotg210_dbg(fotg210, "iso request %p too long\n", urb);
10009 + status = -EFBIG;
10010 + goto fail;
10011 + }
10012 +
10013 + now = fotg210_read_frame_index(fotg210) & (mod - 1);
10014 +
10015 + /* Typical case: reuse current schedule, stream is still active.
10016 + * Hopefully there are no gaps from the host falling behind
10017 + * (irq delays etc), but if there are we'll take the next
10018 + * slot in the schedule, implicitly assuming URB_ISO_ASAP.
10019 + */
10020 + if (likely(!list_empty(&stream->td_list))) {
10021 + u32 excess;
10022 +
10023 + /* For high speed devices, allow scheduling within the
10024 + * isochronous scheduling threshold. For full speed devices
10025 + * and Intel PCI-based controllers, don't (work around for
10026 + * Intel ICH9 bug).
10027 + */
10028 + if (!stream->highspeed && fotg210->fs_i_thresh)
10029 + next = now + fotg210->i_thresh;
10030 + else
10031 + next = now;
10032 +
10033 + /* Fell behind (by up to twice the slop amount)?
10034 + * We decide based on the time of the last currently-scheduled
10035 + * slot, not the time of the next available slot.
10036 + */
10037 + excess = (stream->next_uframe - period - next) & (mod - 1);
10038 + if (excess >= mod - 2 * SCHEDULE_SLOP)
10039 + start = next + excess - mod + period *
10040 + DIV_ROUND_UP(mod - excess, period);
10041 + else
10042 + start = next + excess + period;
10043 + if (start - now >= mod) {
10044 + fotg210_dbg(fotg210, "request %p would overflow (%d+%d >= %d)\n",
10045 + urb, start - now - period, period,
10046 + mod);
10047 + status = -EFBIG;
10048 + goto fail;
10049 + }
10050 + }
10051 +
10052 + /* need to schedule; when's the next (u)frame we could start?
10053 + * this is bigger than fotg210->i_thresh allows; scheduling itself
10054 + * isn't free, the slop should handle reasonably slow cpus. it
10055 + * can also help high bandwidth if the dma and irq loads don't
10056 + * jump until after the queue is primed.
10057 + */
10058 + else {
10059 + int done = 0;
10060 +
10061 + start = SCHEDULE_SLOP + (now & ~0x07);
10062 +
10063 + /* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */
10064 +
10065 + /* find a uframe slot with enough bandwidth.
10066 + * Early uframes are more precious because full-speed
10067 + * iso IN transfers can't use late uframes,
10068 + * and therefore they should be allocated last.
10069 + */
10070 + next = start;
10071 + start += period;
10072 + do {
10073 + start--;
10074 + /* check schedule: enough space? */
10075 + if (itd_slot_ok(fotg210, mod, start,
10076 + stream->usecs, period))
10077 + done = 1;
10078 + } while (start > next && !done);
10079 +
10080 + /* no room in the schedule */
10081 + if (!done) {
10082 + fotg210_dbg(fotg210, "iso resched full %p (now %d max %d)\n",
10083 + urb, now, now + mod);
10084 + status = -ENOSPC;
10085 + goto fail;
10086 + }
10087 + }
10088 +
10089 + /* Tried to schedule too far into the future? */
10090 + if (unlikely(start - now + span - period >=
10091 + mod - 2 * SCHEDULE_SLOP)) {
10092 + fotg210_dbg(fotg210, "request %p would overflow (%d+%d >= %d)\n",
10093 + urb, start - now, span - period,
10094 + mod - 2 * SCHEDULE_SLOP);
10095 + status = -EFBIG;
10096 + goto fail;
10097 + }
10098 +
10099 + stream->next_uframe = start & (mod - 1);
10100 +
10101 + /* report high speed start in uframes; full speed, in frames */
10102 + urb->start_frame = stream->next_uframe;
10103 + if (!stream->highspeed)
10104 + urb->start_frame >>= 3;
10105 +
10106 + /* Make sure scan_isoc() sees these */
10107 + if (fotg210->isoc_count == 0)
10108 + fotg210->next_frame = now >> 3;
10109 + return 0;
10110 +
10111 +fail:
10112 + iso_sched_free(stream, sched);
10113 + urb->hcpriv = NULL;
10114 + return status;
10115 +}
10116 +
10117 +static inline void itd_init(struct fotg210_hcd *fotg210,
10118 + struct fotg210_iso_stream *stream, struct fotg210_itd *itd)
10119 +{
10120 + int i;
10121 +
10122 + /* it's been recently zeroed */
10123 + itd->hw_next = FOTG210_LIST_END(fotg210);
10124 + itd->hw_bufp[0] = stream->buf0;
10125 + itd->hw_bufp[1] = stream->buf1;
10126 + itd->hw_bufp[2] = stream->buf2;
10127 +
10128 + for (i = 0; i < 8; i++)
10129 + itd->index[i] = -1;
10130 +
10131 + /* All other fields are filled when scheduling */
10132 +}
10133 +
10134 +static inline void itd_patch(struct fotg210_hcd *fotg210,
10135 + struct fotg210_itd *itd, struct fotg210_iso_sched *iso_sched,
10136 + unsigned index, u16 uframe)
10137 +{
10138 + struct fotg210_iso_packet *uf = &iso_sched->packet[index];
10139 + unsigned pg = itd->pg;
10140 +
10141 + uframe &= 0x07;
10142 + itd->index[uframe] = index;
10143 +
10144 + itd->hw_transaction[uframe] = uf->transaction;
10145 + itd->hw_transaction[uframe] |= cpu_to_hc32(fotg210, pg << 12);
10146 + itd->hw_bufp[pg] |= cpu_to_hc32(fotg210, uf->bufp & ~(u32)0);
10147 + itd->hw_bufp_hi[pg] |= cpu_to_hc32(fotg210, (u32)(uf->bufp >> 32));
10148 +
10149 + /* iso_frame_desc[].offset must be strictly increasing */
10150 + if (unlikely(uf->cross)) {
10151 + u64 bufp = uf->bufp + 4096;
10152 +
10153 + itd->pg = ++pg;
10154 + itd->hw_bufp[pg] |= cpu_to_hc32(fotg210, bufp & ~(u32)0);
10155 + itd->hw_bufp_hi[pg] |= cpu_to_hc32(fotg210, (u32)(bufp >> 32));
10156 + }
10157 +}
10158 +
10159 +static inline void itd_link(struct fotg210_hcd *fotg210, unsigned frame,
10160 + struct fotg210_itd *itd)
10161 +{
10162 + union fotg210_shadow *prev = &fotg210->pshadow[frame];
10163 + __hc32 *hw_p = &fotg210->periodic[frame];
10164 + union fotg210_shadow here = *prev;
10165 + __hc32 type = 0;
10166 +
10167 + /* skip any iso nodes which might belong to previous microframes */
10168 + while (here.ptr) {
10169 + type = Q_NEXT_TYPE(fotg210, *hw_p);
10170 + if (type == cpu_to_hc32(fotg210, Q_TYPE_QH))
10171 + break;
10172 + prev = periodic_next_shadow(fotg210, prev, type);
10173 + hw_p = shadow_next_periodic(fotg210, &here, type);
10174 + here = *prev;
10175 + }
10176 +
10177 + itd->itd_next = here;
10178 + itd->hw_next = *hw_p;
10179 + prev->itd = itd;
10180 + itd->frame = frame;
10181 + wmb();
10182 + *hw_p = cpu_to_hc32(fotg210, itd->itd_dma | Q_TYPE_ITD);
10183 +}
10184 +
10185 +/* fit urb's itds into the selected schedule slot; activate as needed */
10186 +static void itd_link_urb(struct fotg210_hcd *fotg210, struct urb *urb,
10187 + unsigned mod, struct fotg210_iso_stream *stream)
10188 +{
10189 + int packet;
10190 + unsigned next_uframe, uframe, frame;
10191 + struct fotg210_iso_sched *iso_sched = urb->hcpriv;
10192 + struct fotg210_itd *itd;
10193 +
10194 + next_uframe = stream->next_uframe & (mod - 1);
10195 +
10196 + if (unlikely(list_empty(&stream->td_list))) {
10197 + fotg210_to_hcd(fotg210)->self.bandwidth_allocated
10198 + += stream->bandwidth;
10199 + fotg210_dbg(fotg210,
10200 + "schedule devp %s ep%d%s-iso period %d start %d.%d\n",
10201 + urb->dev->devpath, stream->bEndpointAddress & 0x0f,
10202 + (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
10203 + urb->interval,
10204 + next_uframe >> 3, next_uframe & 0x7);
10205 + }
10206 +
10207 + /* fill iTDs uframe by uframe */
10208 + for (packet = 0, itd = NULL; packet < urb->number_of_packets;) {
10209 + if (itd == NULL) {
10210 + /* ASSERT: we have all necessary itds */
10211 +
10212 + /* ASSERT: no itds for this endpoint in this uframe */
10213 +
10214 + itd = list_entry(iso_sched->td_list.next,
10215 + struct fotg210_itd, itd_list);
10216 + list_move_tail(&itd->itd_list, &stream->td_list);
10217 + itd->stream = stream;
10218 + itd->urb = urb;
10219 + itd_init(fotg210, stream, itd);
10220 + }
10221 +
10222 + uframe = next_uframe & 0x07;
10223 + frame = next_uframe >> 3;
10224 +
10225 + itd_patch(fotg210, itd, iso_sched, packet, uframe);
10226 +
10227 + next_uframe += stream->interval;
10228 + next_uframe &= mod - 1;
10229 + packet++;
10230 +
10231 + /* link completed itds into the schedule */
10232 + if (((next_uframe >> 3) != frame)
10233 + || packet == urb->number_of_packets) {
10234 + itd_link(fotg210, frame & (fotg210->periodic_size - 1),
10235 + itd);
10236 + itd = NULL;
10237 + }
10238 + }
10239 + stream->next_uframe = next_uframe;
10240 +
10241 + /* don't need that schedule data any more */
10242 + iso_sched_free(stream, iso_sched);
10243 + urb->hcpriv = NULL;
10244 +
10245 + ++fotg210->isoc_count;
10246 + enable_periodic(fotg210);
10247 +}
10248 +
10249 +#define ISO_ERRS (FOTG210_ISOC_BUF_ERR | FOTG210_ISOC_BABBLE |\
10250 + FOTG210_ISOC_XACTERR)
10251 +
10252 +/* Process and recycle a completed ITD. Return true iff its urb completed,
10253 + * and hence its completion callback probably added things to the hardware
10254 + * schedule.
10255 + *
10256 + * Note that we carefully avoid recycling this descriptor until after any
10257 + * completion callback runs, so that it won't be reused quickly. That is,
10258 + * assuming (a) no more than two urbs per frame on this endpoint, and also
10259 + * (b) only this endpoint's completions submit URBs. It seems some silicon
10260 + * corrupts things if you reuse completed descriptors very quickly...
10261 + */
10262 +static bool itd_complete(struct fotg210_hcd *fotg210, struct fotg210_itd *itd)
10263 +{
10264 + struct urb *urb = itd->urb;
10265 + struct usb_iso_packet_descriptor *desc;
10266 + u32 t;
10267 + unsigned uframe;
10268 + int urb_index = -1;
10269 + struct fotg210_iso_stream *stream = itd->stream;
10270 + struct usb_device *dev;
10271 + bool retval = false;
10272 +
10273 + /* for each uframe with a packet */
10274 + for (uframe = 0; uframe < 8; uframe++) {
10275 + if (likely(itd->index[uframe] == -1))
10276 + continue;
10277 + urb_index = itd->index[uframe];
10278 + desc = &urb->iso_frame_desc[urb_index];
10279 +
10280 + t = hc32_to_cpup(fotg210, &itd->hw_transaction[uframe]);
10281 + itd->hw_transaction[uframe] = 0;
10282 +
10283 + /* report transfer status */
10284 + if (unlikely(t & ISO_ERRS)) {
10285 + urb->error_count++;
10286 + if (t & FOTG210_ISOC_BUF_ERR)
10287 + desc->status = usb_pipein(urb->pipe)
10288 + ? -ENOSR /* hc couldn't read */
10289 + : -ECOMM; /* hc couldn't write */
10290 + else if (t & FOTG210_ISOC_BABBLE)
10291 + desc->status = -EOVERFLOW;
10292 + else /* (t & FOTG210_ISOC_XACTERR) */
10293 + desc->status = -EPROTO;
10294 +
10295 + /* HC need not update length with this error */
10296 + if (!(t & FOTG210_ISOC_BABBLE)) {
10297 + desc->actual_length = FOTG210_ITD_LENGTH(t);
10298 + urb->actual_length += desc->actual_length;
10299 + }
10300 + } else if (likely((t & FOTG210_ISOC_ACTIVE) == 0)) {
10301 + desc->status = 0;
10302 + desc->actual_length = FOTG210_ITD_LENGTH(t);
10303 + urb->actual_length += desc->actual_length;
10304 + } else {
10305 + /* URB was too late */
10306 + desc->status = -EXDEV;
10307 + }
10308 + }
10309 +
10310 + /* handle completion now? */
10311 + if (likely((urb_index + 1) != urb->number_of_packets))
10312 + goto done;
10313 +
10314 + /* ASSERT: it's really the last itd for this urb
10315 + * list_for_each_entry (itd, &stream->td_list, itd_list)
10316 + * BUG_ON (itd->urb == urb);
10317 + */
10318 +
10319 + /* give urb back to the driver; completion often (re)submits */
10320 + dev = urb->dev;
10321 + fotg210_urb_done(fotg210, urb, 0);
10322 + retval = true;
10323 + urb = NULL;
10324 +
10325 + --fotg210->isoc_count;
10326 + disable_periodic(fotg210);
10327 +
10328 + if (unlikely(list_is_singular(&stream->td_list))) {
10329 + fotg210_to_hcd(fotg210)->self.bandwidth_allocated
10330 + -= stream->bandwidth;
10331 + fotg210_dbg(fotg210,
10332 + "deschedule devp %s ep%d%s-iso\n",
10333 + dev->devpath, stream->bEndpointAddress & 0x0f,
10334 + (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
10335 + }
10336 +
10337 +done:
10338 + itd->urb = NULL;
10339 +
10340 + /* Add to the end of the free list for later reuse */
10341 + list_move_tail(&itd->itd_list, &stream->free_list);
10342 +
10343 + /* Recycle the iTDs when the pipeline is empty (ep no longer in use) */
10344 + if (list_empty(&stream->td_list)) {
10345 + list_splice_tail_init(&stream->free_list,
10346 + &fotg210->cached_itd_list);
10347 + start_free_itds(fotg210);
10348 + }
10349 +
10350 + return retval;
10351 +}
10352 +
10353 +static int itd_submit(struct fotg210_hcd *fotg210, struct urb *urb,
10354 + gfp_t mem_flags)
10355 +{
10356 + int status = -EINVAL;
10357 + unsigned long flags;
10358 + struct fotg210_iso_stream *stream;
10359 +
10360 + /* Get iso_stream head */
10361 + stream = iso_stream_find(fotg210, urb);
10362 + if (unlikely(stream == NULL)) {
10363 + fotg210_dbg(fotg210, "can't get iso stream\n");
10364 + return -ENOMEM;
10365 + }
10366 + if (unlikely(urb->interval != stream->interval &&
10367 + fotg210_port_speed(fotg210, 0) ==
10368 + USB_PORT_STAT_HIGH_SPEED)) {
10369 + fotg210_dbg(fotg210, "can't change iso interval %d --> %d\n",
10370 + stream->interval, urb->interval);
10371 + goto done;
10372 + }
10373 +
10374 +#ifdef FOTG210_URB_TRACE
10375 + fotg210_dbg(fotg210,
10376 + "%s %s urb %p ep%d%s len %d, %d pkts %d uframes[%p]\n",
10377 + __func__, urb->dev->devpath, urb,
10378 + usb_pipeendpoint(urb->pipe),
10379 + usb_pipein(urb->pipe) ? "in" : "out",
10380 + urb->transfer_buffer_length,
10381 + urb->number_of_packets, urb->interval,
10382 + stream);
10383 +#endif
10384 +
10385 + /* allocate ITDs w/o locking anything */
10386 + status = itd_urb_transaction(stream, fotg210, urb, mem_flags);
10387 + if (unlikely(status < 0)) {
10388 + fotg210_dbg(fotg210, "can't init itds\n");
10389 + goto done;
10390 + }
10391 +
10392 + /* schedule ... need to lock */
10393 + spin_lock_irqsave(&fotg210->lock, flags);
10394 + if (unlikely(!HCD_HW_ACCESSIBLE(fotg210_to_hcd(fotg210)))) {
10395 + status = -ESHUTDOWN;
10396 + goto done_not_linked;
10397 + }
10398 + status = usb_hcd_link_urb_to_ep(fotg210_to_hcd(fotg210), urb);
10399 + if (unlikely(status))
10400 + goto done_not_linked;
10401 + status = iso_stream_schedule(fotg210, urb, stream);
10402 + if (likely(status == 0))
10403 + itd_link_urb(fotg210, urb, fotg210->periodic_size << 3, stream);
10404 + else
10405 + usb_hcd_unlink_urb_from_ep(fotg210_to_hcd(fotg210), urb);
10406 +done_not_linked:
10407 + spin_unlock_irqrestore(&fotg210->lock, flags);
10408 +done:
10409 + return status;
10410 +}
10411 +
10412 +static inline int scan_frame_queue(struct fotg210_hcd *fotg210, unsigned frame,
10413 + unsigned now_frame, bool live)
10414 +{
10415 + unsigned uf;
10416 + bool modified;
10417 + union fotg210_shadow q, *q_p;
10418 + __hc32 type, *hw_p;
10419 +
10420 + /* scan each element in frame's queue for completions */
10421 + q_p = &fotg210->pshadow[frame];
10422 + hw_p = &fotg210->periodic[frame];
10423 + q.ptr = q_p->ptr;
10424 + type = Q_NEXT_TYPE(fotg210, *hw_p);
10425 + modified = false;
10426 +
10427 + while (q.ptr) {
10428 + switch (hc32_to_cpu(fotg210, type)) {
10429 + case Q_TYPE_ITD:
10430 + /* If this ITD is still active, leave it for
10431 + * later processing ... check the next entry.
10432 + * No need to check for activity unless the
10433 + * frame is current.
10434 + */
10435 + if (frame == now_frame && live) {
10436 + rmb();
10437 + for (uf = 0; uf < 8; uf++) {
10438 + if (q.itd->hw_transaction[uf] &
10439 + ITD_ACTIVE(fotg210))
10440 + break;
10441 + }
10442 + if (uf < 8) {
10443 + q_p = &q.itd->itd_next;
10444 + hw_p = &q.itd->hw_next;
10445 + type = Q_NEXT_TYPE(fotg210,
10446 + q.itd->hw_next);
10447 + q = *q_p;
10448 + break;
10449 + }
10450 + }
10451 +
10452 + /* Take finished ITDs out of the schedule
10453 + * and process them: recycle, maybe report
10454 + * URB completion. HC won't cache the
10455 + * pointer for much longer, if at all.
10456 + */
10457 + *q_p = q.itd->itd_next;
10458 + *hw_p = q.itd->hw_next;
10459 + type = Q_NEXT_TYPE(fotg210, q.itd->hw_next);
10460 + wmb();
10461 + modified = itd_complete(fotg210, q.itd);
10462 + q = *q_p;
10463 + break;
10464 + default:
10465 + fotg210_dbg(fotg210, "corrupt type %d frame %d shadow %p\n",
10466 + type, frame, q.ptr);
10467 + fallthrough;
10468 + case Q_TYPE_QH:
10469 + case Q_TYPE_FSTN:
10470 + /* End of the iTDs and siTDs */
10471 + q.ptr = NULL;
10472 + break;
10473 + }
10474 +
10475 + /* assume completion callbacks modify the queue */
10476 + if (unlikely(modified && fotg210->isoc_count > 0))
10477 + return -EINVAL;
10478 + }
10479 + return 0;
10480 +}
10481 +
10482 +static void scan_isoc(struct fotg210_hcd *fotg210)
10483 +{
10484 + unsigned uf, now_frame, frame, ret;
10485 + unsigned fmask = fotg210->periodic_size - 1;
10486 + bool live;
10487 +
10488 + /*
10489 + * When running, scan from last scan point up to "now"
10490 + * else clean up by scanning everything that's left.
10491 + * Touches as few pages as possible: cache-friendly.
10492 + */
10493 + if (fotg210->rh_state >= FOTG210_RH_RUNNING) {
10494 + uf = fotg210_read_frame_index(fotg210);
10495 + now_frame = (uf >> 3) & fmask;
10496 + live = true;
10497 + } else {
10498 + now_frame = (fotg210->next_frame - 1) & fmask;
10499 + live = false;
10500 + }
10501 + fotg210->now_frame = now_frame;
10502 +
10503 + frame = fotg210->next_frame;
10504 + for (;;) {
10505 + ret = 1;
10506 + while (ret != 0)
10507 + ret = scan_frame_queue(fotg210, frame,
10508 + now_frame, live);
10509 +
10510 + /* Stop when we have reached the current frame */
10511 + if (frame == now_frame)
10512 + break;
10513 + frame = (frame + 1) & fmask;
10514 + }
10515 + fotg210->next_frame = now_frame;
10516 +}
10517 +
10518 +/* Display / Set uframe_periodic_max
10519 + */
10520 +static ssize_t uframe_periodic_max_show(struct device *dev,
10521 + struct device_attribute *attr, char *buf)
10522 +{
10523 + struct fotg210_hcd *fotg210;
10524 + int n;
10525 +
10526 + fotg210 = hcd_to_fotg210(bus_to_hcd(dev_get_drvdata(dev)));
10527 + n = scnprintf(buf, PAGE_SIZE, "%d\n", fotg210->uframe_periodic_max);
10528 + return n;
10529 +}
10530 +
10531 +
10532 +static ssize_t uframe_periodic_max_store(struct device *dev,
10533 + struct device_attribute *attr, const char *buf, size_t count)
10534 +{
10535 + struct fotg210_hcd *fotg210;
10536 + unsigned uframe_periodic_max;
10537 + unsigned frame, uframe;
10538 + unsigned short allocated_max;
10539 + unsigned long flags;
10540 + ssize_t ret;
10541 +
10542 + fotg210 = hcd_to_fotg210(bus_to_hcd(dev_get_drvdata(dev)));
10543 + if (kstrtouint(buf, 0, &uframe_periodic_max) < 0)
10544 + return -EINVAL;
10545 +
10546 + if (uframe_periodic_max < 100 || uframe_periodic_max >= 125) {
10547 + fotg210_info(fotg210, "rejecting invalid request for uframe_periodic_max=%u\n",
10548 + uframe_periodic_max);
10549 + return -EINVAL;
10550 + }
10551 +
10552 + ret = -EINVAL;
10553 +
10554 + /*
10555 + * lock, so that our checking does not race with possible periodic
10556 + * bandwidth allocation through submitting new urbs.
10557 + */
10558 + spin_lock_irqsave(&fotg210->lock, flags);
10559 +
10560 + /*
10561 + * for request to decrease max periodic bandwidth, we have to check
10562 + * every microframe in the schedule to see whether the decrease is
10563 + * possible.
10564 + */
10565 + if (uframe_periodic_max < fotg210->uframe_periodic_max) {
10566 + allocated_max = 0;
10567 +
10568 + for (frame = 0; frame < fotg210->periodic_size; ++frame)
10569 + for (uframe = 0; uframe < 7; ++uframe)
10570 + allocated_max = max(allocated_max,
10571 + periodic_usecs(fotg210, frame,
10572 + uframe));
10573 +
10574 + if (allocated_max > uframe_periodic_max) {
10575 + fotg210_info(fotg210,
10576 + "cannot decrease uframe_periodic_max because periodic bandwidth is already allocated (%u > %u)\n",
10577 + allocated_max, uframe_periodic_max);
10578 + goto out_unlock;
10579 + }
10580 + }
10581 +
10582 + /* increasing is always ok */
10583 +
10584 + fotg210_info(fotg210,
10585 + "setting max periodic bandwidth to %u%% (== %u usec/uframe)\n",
10586 + 100 * uframe_periodic_max/125, uframe_periodic_max);
10587 +
10588 + if (uframe_periodic_max != 100)
10589 + fotg210_warn(fotg210, "max periodic bandwidth set is non-standard\n");
10590 +
10591 + fotg210->uframe_periodic_max = uframe_periodic_max;
10592 + ret = count;
10593 +
10594 +out_unlock:
10595 + spin_unlock_irqrestore(&fotg210->lock, flags);
10596 + return ret;
10597 +}
10598 +
10599 +static DEVICE_ATTR_RW(uframe_periodic_max);
10600 +
10601 +static inline int create_sysfs_files(struct fotg210_hcd *fotg210)
10602 +{
10603 + struct device *controller = fotg210_to_hcd(fotg210)->self.controller;
10604 +
10605 + return device_create_file(controller, &dev_attr_uframe_periodic_max);
10606 +}
10607 +
10608 +static inline void remove_sysfs_files(struct fotg210_hcd *fotg210)
10609 +{
10610 + struct device *controller = fotg210_to_hcd(fotg210)->self.controller;
10611 +
10612 + device_remove_file(controller, &dev_attr_uframe_periodic_max);
10613 +}
10614 +/* On some systems, leaving remote wakeup enabled prevents system shutdown.
10615 + * The firmware seems to think that powering off is a wakeup event!
10616 + * This routine turns off remote wakeup and everything else, on all ports.
10617 + */
10618 +static void fotg210_turn_off_all_ports(struct fotg210_hcd *fotg210)
10619 +{
10620 + u32 __iomem *status_reg = &fotg210->regs->port_status;
10621 +
10622 + fotg210_writel(fotg210, PORT_RWC_BITS, status_reg);
10623 +}
10624 +
10625 +/* Halt HC, turn off all ports, and let the BIOS use the companion controllers.
10626 + * Must be called with interrupts enabled and the lock not held.
10627 + */
10628 +static void fotg210_silence_controller(struct fotg210_hcd *fotg210)
10629 +{
10630 + fotg210_halt(fotg210);
10631 +
10632 + spin_lock_irq(&fotg210->lock);
10633 + fotg210->rh_state = FOTG210_RH_HALTED;
10634 + fotg210_turn_off_all_ports(fotg210);
10635 + spin_unlock_irq(&fotg210->lock);
10636 +}
10637 +
10638 +/* fotg210_shutdown kick in for silicon on any bus (not just pci, etc).
10639 + * This forcibly disables dma and IRQs, helping kexec and other cases
10640 + * where the next system software may expect clean state.
10641 + */
10642 +static void fotg210_shutdown(struct usb_hcd *hcd)
10643 +{
10644 + struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
10645 +
10646 + spin_lock_irq(&fotg210->lock);
10647 + fotg210->shutdown = true;
10648 + fotg210->rh_state = FOTG210_RH_STOPPING;
10649 + fotg210->enabled_hrtimer_events = 0;
10650 + spin_unlock_irq(&fotg210->lock);
10651 +
10652 + fotg210_silence_controller(fotg210);
10653 +
10654 + hrtimer_cancel(&fotg210->hrtimer);
10655 +}
10656 +
10657 +/* fotg210_work is called from some interrupts, timers, and so on.
10658 + * it calls driver completion functions, after dropping fotg210->lock.
10659 + */
10660 +static void fotg210_work(struct fotg210_hcd *fotg210)
10661 +{
10662 + /* another CPU may drop fotg210->lock during a schedule scan while
10663 + * it reports urb completions. this flag guards against bogus
10664 + * attempts at re-entrant schedule scanning.
10665 + */
10666 + if (fotg210->scanning) {
10667 + fotg210->need_rescan = true;
10668 + return;
10669 + }
10670 + fotg210->scanning = true;
10671 +
10672 +rescan:
10673 + fotg210->need_rescan = false;
10674 + if (fotg210->async_count)
10675 + scan_async(fotg210);
10676 + if (fotg210->intr_count > 0)
10677 + scan_intr(fotg210);
10678 + if (fotg210->isoc_count > 0)
10679 + scan_isoc(fotg210);
10680 + if (fotg210->need_rescan)
10681 + goto rescan;
10682 + fotg210->scanning = false;
10683 +
10684 + /* the IO watchdog guards against hardware or driver bugs that
10685 + * misplace IRQs, and should let us run completely without IRQs.
10686 + * such lossage has been observed on both VT6202 and VT8235.
10687 + */
10688 + turn_on_io_watchdog(fotg210);
10689 +}
10690 +
10691 +/* Called when the fotg210_hcd module is removed.
10692 + */
10693 +static void fotg210_stop(struct usb_hcd *hcd)
10694 +{
10695 + struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
10696 +
10697 + fotg210_dbg(fotg210, "stop\n");
10698 +
10699 + /* no more interrupts ... */
10700 +
10701 + spin_lock_irq(&fotg210->lock);
10702 + fotg210->enabled_hrtimer_events = 0;
10703 + spin_unlock_irq(&fotg210->lock);
10704 +
10705 + fotg210_quiesce(fotg210);
10706 + fotg210_silence_controller(fotg210);
10707 + fotg210_reset(fotg210);
10708 +
10709 + hrtimer_cancel(&fotg210->hrtimer);
10710 + remove_sysfs_files(fotg210);
10711 + remove_debug_files(fotg210);
10712 +
10713 + /* root hub is shut down separately (first, when possible) */
10714 + spin_lock_irq(&fotg210->lock);
10715 + end_free_itds(fotg210);
10716 + spin_unlock_irq(&fotg210->lock);
10717 + fotg210_mem_cleanup(fotg210);
10718 +
10719 +#ifdef FOTG210_STATS
10720 + fotg210_dbg(fotg210, "irq normal %ld err %ld iaa %ld (lost %ld)\n",
10721 + fotg210->stats.normal, fotg210->stats.error,
10722 + fotg210->stats.iaa, fotg210->stats.lost_iaa);
10723 + fotg210_dbg(fotg210, "complete %ld unlink %ld\n",
10724 + fotg210->stats.complete, fotg210->stats.unlink);
10725 +#endif
10726 +
10727 + dbg_status(fotg210, "fotg210_stop completed",
10728 + fotg210_readl(fotg210, &fotg210->regs->status));
10729 +}
10730 +
10731 +/* one-time init, only for memory state */
10732 +static int hcd_fotg210_init(struct usb_hcd *hcd)
10733 +{
10734 + struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
10735 + u32 temp;
10736 + int retval;
10737 + u32 hcc_params;
10738 + struct fotg210_qh_hw *hw;
10739 +
10740 + spin_lock_init(&fotg210->lock);
10741 +
10742 + /*
10743 + * keep io watchdog by default, those good HCDs could turn off it later
10744 + */
10745 + fotg210->need_io_watchdog = 1;
10746 +
10747 + hrtimer_init(&fotg210->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
10748 + fotg210->hrtimer.function = fotg210_hrtimer_func;
10749 + fotg210->next_hrtimer_event = FOTG210_HRTIMER_NO_EVENT;
10750 +
10751 + hcc_params = fotg210_readl(fotg210, &fotg210->caps->hcc_params);
10752 +
10753 + /*
10754 + * by default set standard 80% (== 100 usec/uframe) max periodic
10755 + * bandwidth as required by USB 2.0
10756 + */
10757 + fotg210->uframe_periodic_max = 100;
10758 +
10759 + /*
10760 + * hw default: 1K periodic list heads, one per frame.
10761 + * periodic_size can shrink by USBCMD update if hcc_params allows.
10762 + */
10763 + fotg210->periodic_size = DEFAULT_I_TDPS;
10764 + INIT_LIST_HEAD(&fotg210->intr_qh_list);
10765 + INIT_LIST_HEAD(&fotg210->cached_itd_list);
10766 +
10767 + if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
10768 + /* periodic schedule size can be smaller than default */
10769 + switch (FOTG210_TUNE_FLS) {
10770 + case 0:
10771 + fotg210->periodic_size = 1024;
10772 + break;
10773 + case 1:
10774 + fotg210->periodic_size = 512;
10775 + break;
10776 + case 2:
10777 + fotg210->periodic_size = 256;
10778 + break;
10779 + default:
10780 + BUG();
10781 + }
10782 + }
10783 + retval = fotg210_mem_init(fotg210, GFP_KERNEL);
10784 + if (retval < 0)
10785 + return retval;
10786 +
10787 + /* controllers may cache some of the periodic schedule ... */
10788 + fotg210->i_thresh = 2;
10789 +
10790 + /*
10791 + * dedicate a qh for the async ring head, since we couldn't unlink
10792 + * a 'real' qh without stopping the async schedule [4.8]. use it
10793 + * as the 'reclamation list head' too.
10794 + * its dummy is used in hw_alt_next of many tds, to prevent the qh
10795 + * from automatically advancing to the next td after short reads.
10796 + */
10797 + fotg210->async->qh_next.qh = NULL;
10798 + hw = fotg210->async->hw;
10799 + hw->hw_next = QH_NEXT(fotg210, fotg210->async->qh_dma);
10800 + hw->hw_info1 = cpu_to_hc32(fotg210, QH_HEAD);
10801 + hw->hw_token = cpu_to_hc32(fotg210, QTD_STS_HALT);
10802 + hw->hw_qtd_next = FOTG210_LIST_END(fotg210);
10803 + fotg210->async->qh_state = QH_STATE_LINKED;
10804 + hw->hw_alt_next = QTD_NEXT(fotg210, fotg210->async->dummy->qtd_dma);
10805 +
10806 + /* clear interrupt enables, set irq latency */
10807 + if (log2_irq_thresh < 0 || log2_irq_thresh > 6)
10808 + log2_irq_thresh = 0;
10809 + temp = 1 << (16 + log2_irq_thresh);
10810 + if (HCC_CANPARK(hcc_params)) {
10811 + /* HW default park == 3, on hardware that supports it (like
10812 + * NVidia and ALI silicon), maximizes throughput on the async
10813 + * schedule by avoiding QH fetches between transfers.
10814 + *
10815 + * With fast usb storage devices and NForce2, "park" seems to
10816 + * make problems: throughput reduction (!), data errors...
10817 + */
10818 + if (park) {
10819 + park = min_t(unsigned, park, 3);
10820 + temp |= CMD_PARK;
10821 + temp |= park << 8;
10822 + }
10823 + fotg210_dbg(fotg210, "park %d\n", park);
10824 + }
10825 + if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
10826 + /* periodic schedule size can be smaller than default */
10827 + temp &= ~(3 << 2);
10828 + temp |= (FOTG210_TUNE_FLS << 2);
10829 + }
10830 + fotg210->command = temp;
10831 +
10832 + /* Accept arbitrarily long scatter-gather lists */
10833 + if (!hcd->localmem_pool)
10834 + hcd->self.sg_tablesize = ~0;
10835 + return 0;
10836 +}
10837 +
10838 +/* start HC running; it's halted, hcd_fotg210_init() has been run (once) */
10839 +static int fotg210_run(struct usb_hcd *hcd)
10840 +{
10841 + struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
10842 + u32 temp;
10843 +
10844 + hcd->uses_new_polling = 1;
10845 +
10846 + /* EHCI spec section 4.1 */
10847 +
10848 + fotg210_writel(fotg210, fotg210->periodic_dma,
10849 + &fotg210->regs->frame_list);
10850 + fotg210_writel(fotg210, (u32)fotg210->async->qh_dma,
10851 + &fotg210->regs->async_next);
10852 +
10853 + /*
10854 + * hcc_params controls whether fotg210->regs->segment must (!!!)
10855 + * be used; it constrains QH/ITD/SITD and QTD locations.
10856 + * dma_pool consistent memory always uses segment zero.
10857 + * streaming mappings for I/O buffers, like dma_map_single(),
10858 + * can return segments above 4GB, if the device allows.
10859 + *
10860 + * NOTE: the dma mask is visible through dev->dma_mask, so
10861 + * drivers can pass this info along ... like NETIF_F_HIGHDMA,
10862 + * Scsi_Host.highmem_io, and so forth. It's readonly to all
10863 + * host side drivers though.
10864 + */
10865 + fotg210_readl(fotg210, &fotg210->caps->hcc_params);
10866 +
10867 + /*
10868 + * Philips, Intel, and maybe others need CMD_RUN before the
10869 + * root hub will detect new devices (why?); NEC doesn't
10870 + */
10871 + fotg210->command &= ~(CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET);
10872 + fotg210->command |= CMD_RUN;
10873 + fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command);
10874 + dbg_cmd(fotg210, "init", fotg210->command);
10875 +
10876 + /*
10877 + * Start, enabling full USB 2.0 functionality ... usb 1.1 devices
10878 + * are explicitly handed to companion controller(s), so no TT is
10879 + * involved with the root hub. (Except where one is integrated,
10880 + * and there's no companion controller unless maybe for USB OTG.)
10881 + *
10882 + * Turning on the CF flag will transfer ownership of all ports
10883 + * from the companions to the EHCI controller. If any of the
10884 + * companions are in the middle of a port reset at the time, it
10885 + * could cause trouble. Write-locking ehci_cf_port_reset_rwsem
10886 + * guarantees that no resets are in progress. After we set CF,
10887 + * a short delay lets the hardware catch up; new resets shouldn't
10888 + * be started before the port switching actions could complete.
10889 + */
10890 + down_write(&ehci_cf_port_reset_rwsem);
10891 + fotg210->rh_state = FOTG210_RH_RUNNING;
10892 + /* unblock posted writes */
10893 + fotg210_readl(fotg210, &fotg210->regs->command);
10894 + usleep_range(5000, 10000);
10895 + up_write(&ehci_cf_port_reset_rwsem);
10896 + fotg210->last_periodic_enable = ktime_get_real();
10897 +
10898 + temp = HC_VERSION(fotg210,
10899 + fotg210_readl(fotg210, &fotg210->caps->hc_capbase));
10900 + fotg210_info(fotg210,
10901 + "USB %x.%x started, EHCI %x.%02x\n",
10902 + ((fotg210->sbrn & 0xf0) >> 4), (fotg210->sbrn & 0x0f),
10903 + temp >> 8, temp & 0xff);
10904 +
10905 + fotg210_writel(fotg210, INTR_MASK,
10906 + &fotg210->regs->intr_enable); /* Turn On Interrupts */
10907 +
10908 + /* GRR this is run-once init(), being done every time the HC starts.
10909 + * So long as they're part of class devices, we can't do it init()
10910 + * since the class device isn't created that early.
10911 + */
10912 + create_debug_files(fotg210);
10913 + create_sysfs_files(fotg210);
10914 +
10915 + return 0;
10916 +}
10917 +
10918 +static int fotg210_setup(struct usb_hcd *hcd)
10919 +{
10920 + struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
10921 + int retval;
10922 +
10923 + fotg210->regs = (void __iomem *)fotg210->caps +
10924 + HC_LENGTH(fotg210,
10925 + fotg210_readl(fotg210, &fotg210->caps->hc_capbase));
10926 + dbg_hcs_params(fotg210, "reset");
10927 + dbg_hcc_params(fotg210, "reset");
10928 +
10929 + /* cache this readonly data; minimize chip reads */
10930 + fotg210->hcs_params = fotg210_readl(fotg210,
10931 + &fotg210->caps->hcs_params);
10932 +
10933 + fotg210->sbrn = HCD_USB2;
10934 +
10935 + /* data structure init */
10936 + retval = hcd_fotg210_init(hcd);
10937 + if (retval)
10938 + return retval;
10939 +
10940 + retval = fotg210_halt(fotg210);
10941 + if (retval)
10942 + return retval;
10943 +
10944 + fotg210_reset(fotg210);
10945 +
10946 + return 0;
10947 +}
10948 +
10949 +static irqreturn_t fotg210_irq(struct usb_hcd *hcd)
10950 +{
10951 + struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
10952 + u32 status, masked_status, pcd_status = 0, cmd;
10953 + int bh;
10954 +
10955 + spin_lock(&fotg210->lock);
10956 +
10957 + status = fotg210_readl(fotg210, &fotg210->regs->status);
10958 +
10959 + /* e.g. cardbus physical eject */
10960 + if (status == ~(u32) 0) {
10961 + fotg210_dbg(fotg210, "device removed\n");
10962 + goto dead;
10963 + }
10964 +
10965 + /*
10966 + * We don't use STS_FLR, but some controllers don't like it to
10967 + * remain on, so mask it out along with the other status bits.
10968 + */
10969 + masked_status = status & (INTR_MASK | STS_FLR);
10970 +
10971 + /* Shared IRQ? */
10972 + if (!masked_status ||
10973 + unlikely(fotg210->rh_state == FOTG210_RH_HALTED)) {
10974 + spin_unlock(&fotg210->lock);
10975 + return IRQ_NONE;
10976 + }
10977 +
10978 + /* clear (just) interrupts */
10979 + fotg210_writel(fotg210, masked_status, &fotg210->regs->status);
10980 + cmd = fotg210_readl(fotg210, &fotg210->regs->command);
10981 + bh = 0;
10982 +
10983 + /* unrequested/ignored: Frame List Rollover */
10984 + dbg_status(fotg210, "irq", status);
10985 +
10986 + /* INT, ERR, and IAA interrupt rates can be throttled */
10987 +
10988 + /* normal [4.15.1.2] or error [4.15.1.1] completion */
10989 + if (likely((status & (STS_INT|STS_ERR)) != 0)) {
10990 + if (likely((status & STS_ERR) == 0))
10991 + INCR(fotg210->stats.normal);
10992 + else
10993 + INCR(fotg210->stats.error);
10994 + bh = 1;
10995 + }
10996 +
10997 + /* complete the unlinking of some qh [4.15.2.3] */
10998 + if (status & STS_IAA) {
10999 +
11000 + /* Turn off the IAA watchdog */
11001 + fotg210->enabled_hrtimer_events &=
11002 + ~BIT(FOTG210_HRTIMER_IAA_WATCHDOG);
11003 +
11004 + /*
11005 + * Mild optimization: Allow another IAAD to reset the
11006 + * hrtimer, if one occurs before the next expiration.
11007 + * In theory we could always cancel the hrtimer, but
11008 + * tests show that about half the time it will be reset
11009 + * for some other event anyway.
11010 + */
11011 + if (fotg210->next_hrtimer_event == FOTG210_HRTIMER_IAA_WATCHDOG)
11012 + ++fotg210->next_hrtimer_event;
11013 +
11014 + /* guard against (alleged) silicon errata */
11015 + if (cmd & CMD_IAAD)
11016 + fotg210_dbg(fotg210, "IAA with IAAD still set?\n");
11017 + if (fotg210->async_iaa) {
11018 + INCR(fotg210->stats.iaa);
11019 + end_unlink_async(fotg210);
11020 + } else
11021 + fotg210_dbg(fotg210, "IAA with nothing unlinked?\n");
11022 + }
11023 +
11024 + /* remote wakeup [4.3.1] */
11025 + if (status & STS_PCD) {
11026 + int pstatus;
11027 + u32 __iomem *status_reg = &fotg210->regs->port_status;
11028 +
11029 + /* kick root hub later */
11030 + pcd_status = status;
11031 +
11032 + /* resume root hub? */
11033 + if (fotg210->rh_state == FOTG210_RH_SUSPENDED)
11034 + usb_hcd_resume_root_hub(hcd);
11035 +
11036 + pstatus = fotg210_readl(fotg210, status_reg);
11037 +
11038 + if (test_bit(0, &fotg210->suspended_ports) &&
11039 + ((pstatus & PORT_RESUME) ||
11040 + !(pstatus & PORT_SUSPEND)) &&
11041 + (pstatus & PORT_PE) &&
11042 + fotg210->reset_done[0] == 0) {
11043 +
11044 + /* start 20 msec resume signaling from this port,
11045 + * and make hub_wq collect PORT_STAT_C_SUSPEND to
11046 + * stop that signaling. Use 5 ms extra for safety,
11047 + * like usb_port_resume() does.
11048 + */
11049 + fotg210->reset_done[0] = jiffies + msecs_to_jiffies(25);
11050 + set_bit(0, &fotg210->resuming_ports);
11051 + fotg210_dbg(fotg210, "port 1 remote wakeup\n");
11052 + mod_timer(&hcd->rh_timer, fotg210->reset_done[0]);
11053 + }
11054 + }
11055 +
11056 + /* PCI errors [4.15.2.4] */
11057 + if (unlikely((status & STS_FATAL) != 0)) {
11058 + fotg210_err(fotg210, "fatal error\n");
11059 + dbg_cmd(fotg210, "fatal", cmd);
11060 + dbg_status(fotg210, "fatal", status);
11061 +dead:
11062 + usb_hc_died(hcd);
11063 +
11064 + /* Don't let the controller do anything more */
11065 + fotg210->shutdown = true;
11066 + fotg210->rh_state = FOTG210_RH_STOPPING;
11067 + fotg210->command &= ~(CMD_RUN | CMD_ASE | CMD_PSE);
11068 + fotg210_writel(fotg210, fotg210->command,
11069 + &fotg210->regs->command);
11070 + fotg210_writel(fotg210, 0, &fotg210->regs->intr_enable);
11071 + fotg210_handle_controller_death(fotg210);
11072 +
11073 + /* Handle completions when the controller stops */
11074 + bh = 0;
11075 + }
11076 +
11077 + if (bh)
11078 + fotg210_work(fotg210);
11079 + spin_unlock(&fotg210->lock);
11080 + if (pcd_status)
11081 + usb_hcd_poll_rh_status(hcd);
11082 + return IRQ_HANDLED;
11083 +}
11084 +
11085 +/* non-error returns are a promise to giveback() the urb later
11086 + * we drop ownership so next owner (or urb unlink) can get it
11087 + *
11088 + * urb + dev is in hcd.self.controller.urb_list
11089 + * we're queueing TDs onto software and hardware lists
11090 + *
11091 + * hcd-specific init for hcpriv hasn't been done yet
11092 + *
11093 + * NOTE: control, bulk, and interrupt share the same code to append TDs
11094 + * to a (possibly active) QH, and the same QH scanning code.
11095 + */
11096 +static int fotg210_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
11097 + gfp_t mem_flags)
11098 +{
11099 + struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
11100 + struct list_head qtd_list;
11101 +
11102 + INIT_LIST_HEAD(&qtd_list);
11103 +
11104 + switch (usb_pipetype(urb->pipe)) {
11105 + case PIPE_CONTROL:
11106 + /* qh_completions() code doesn't handle all the fault cases
11107 + * in multi-TD control transfers. Even 1KB is rare anyway.
11108 + */
11109 + if (urb->transfer_buffer_length > (16 * 1024))
11110 + return -EMSGSIZE;
11111 + fallthrough;
11112 + /* case PIPE_BULK: */
11113 + default:
11114 + if (!qh_urb_transaction(fotg210, urb, &qtd_list, mem_flags))
11115 + return -ENOMEM;
11116 + return submit_async(fotg210, urb, &qtd_list, mem_flags);
11117 +
11118 + case PIPE_INTERRUPT:
11119 + if (!qh_urb_transaction(fotg210, urb, &qtd_list, mem_flags))
11120 + return -ENOMEM;
11121 + return intr_submit(fotg210, urb, &qtd_list, mem_flags);
11122 +
11123 + case PIPE_ISOCHRONOUS:
11124 + return itd_submit(fotg210, urb, mem_flags);
11125 + }
11126 +}
11127 +
11128 +/* remove from hardware lists
11129 + * completions normally happen asynchronously
11130 + */
11131 +
11132 +static int fotg210_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
11133 +{
11134 + struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
11135 + struct fotg210_qh *qh;
11136 + unsigned long flags;
11137 + int rc;
11138 +
11139 + spin_lock_irqsave(&fotg210->lock, flags);
11140 + rc = usb_hcd_check_unlink_urb(hcd, urb, status);
11141 + if (rc)
11142 + goto done;
11143 +
11144 + switch (usb_pipetype(urb->pipe)) {
11145 + /* case PIPE_CONTROL: */
11146 + /* case PIPE_BULK:*/
11147 + default:
11148 + qh = (struct fotg210_qh *) urb->hcpriv;
11149 + if (!qh)
11150 + break;
11151 + switch (qh->qh_state) {
11152 + case QH_STATE_LINKED:
11153 + case QH_STATE_COMPLETING:
11154 + start_unlink_async(fotg210, qh);
11155 + break;
11156 + case QH_STATE_UNLINK:
11157 + case QH_STATE_UNLINK_WAIT:
11158 + /* already started */
11159 + break;
11160 + case QH_STATE_IDLE:
11161 + /* QH might be waiting for a Clear-TT-Buffer */
11162 + qh_completions(fotg210, qh);
11163 + break;
11164 + }
11165 + break;
11166 +
11167 + case PIPE_INTERRUPT:
11168 + qh = (struct fotg210_qh *) urb->hcpriv;
11169 + if (!qh)
11170 + break;
11171 + switch (qh->qh_state) {
11172 + case QH_STATE_LINKED:
11173 + case QH_STATE_COMPLETING:
11174 + start_unlink_intr(fotg210, qh);
11175 + break;
11176 + case QH_STATE_IDLE:
11177 + qh_completions(fotg210, qh);
11178 + break;
11179 + default:
11180 + fotg210_dbg(fotg210, "bogus qh %p state %d\n",
11181 + qh, qh->qh_state);
11182 + goto done;
11183 + }
11184 + break;
11185 +
11186 + case PIPE_ISOCHRONOUS:
11187 + /* itd... */
11188 +
11189 + /* wait till next completion, do it then. */
11190 + /* completion irqs can wait up to 1024 msec, */
11191 + break;
11192 + }
11193 +done:
11194 + spin_unlock_irqrestore(&fotg210->lock, flags);
11195 + return rc;
11196 +}
11197 +
11198 +/* bulk qh holds the data toggle */
11199 +
11200 +static void fotg210_endpoint_disable(struct usb_hcd *hcd,
11201 + struct usb_host_endpoint *ep)
11202 +{
11203 + struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
11204 + unsigned long flags;
11205 + struct fotg210_qh *qh, *tmp;
11206 +
11207 + /* ASSERT: any requests/urbs are being unlinked */
11208 + /* ASSERT: nobody can be submitting urbs for this any more */
11209 +
11210 +rescan:
11211 + spin_lock_irqsave(&fotg210->lock, flags);
11212 + qh = ep->hcpriv;
11213 + if (!qh)
11214 + goto done;
11215 +
11216 + /* endpoints can be iso streams. for now, we don't
11217 + * accelerate iso completions ... so spin a while.
11218 + */
11219 + if (qh->hw == NULL) {
11220 + struct fotg210_iso_stream *stream = ep->hcpriv;
11221 +
11222 + if (!list_empty(&stream->td_list))
11223 + goto idle_timeout;
11224 +
11225 + /* BUG_ON(!list_empty(&stream->free_list)); */
11226 + kfree(stream);
11227 + goto done;
11228 + }
11229 +
11230 + if (fotg210->rh_state < FOTG210_RH_RUNNING)
11231 + qh->qh_state = QH_STATE_IDLE;
11232 + switch (qh->qh_state) {
11233 + case QH_STATE_LINKED:
11234 + case QH_STATE_COMPLETING:
11235 + for (tmp = fotg210->async->qh_next.qh;
11236 + tmp && tmp != qh;
11237 + tmp = tmp->qh_next.qh)
11238 + continue;
11239 + /* periodic qh self-unlinks on empty, and a COMPLETING qh
11240 + * may already be unlinked.
11241 + */
11242 + if (tmp)
11243 + start_unlink_async(fotg210, qh);
11244 + fallthrough;
11245 + case QH_STATE_UNLINK: /* wait for hw to finish? */
11246 + case QH_STATE_UNLINK_WAIT:
11247 +idle_timeout:
11248 + spin_unlock_irqrestore(&fotg210->lock, flags);
11249 + schedule_timeout_uninterruptible(1);
11250 + goto rescan;
11251 + case QH_STATE_IDLE: /* fully unlinked */
11252 + if (qh->clearing_tt)
11253 + goto idle_timeout;
11254 + if (list_empty(&qh->qtd_list)) {
11255 + qh_destroy(fotg210, qh);
11256 + break;
11257 + }
11258 + fallthrough;
11259 + default:
11260 + /* caller was supposed to have unlinked any requests;
11261 + * that's not our job. just leak this memory.
11262 + */
11263 + fotg210_err(fotg210, "qh %p (#%02x) state %d%s\n",
11264 + qh, ep->desc.bEndpointAddress, qh->qh_state,
11265 + list_empty(&qh->qtd_list) ? "" : "(has tds)");
11266 + break;
11267 + }
11268 +done:
11269 + ep->hcpriv = NULL;
11270 + spin_unlock_irqrestore(&fotg210->lock, flags);
11271 +}
11272 +
11273 +static void fotg210_endpoint_reset(struct usb_hcd *hcd,
11274 + struct usb_host_endpoint *ep)
11275 +{
11276 + struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
11277 + struct fotg210_qh *qh;
11278 + int eptype = usb_endpoint_type(&ep->desc);
11279 + int epnum = usb_endpoint_num(&ep->desc);
11280 + int is_out = usb_endpoint_dir_out(&ep->desc);
11281 + unsigned long flags;
11282 +
11283 + if (eptype != USB_ENDPOINT_XFER_BULK && eptype != USB_ENDPOINT_XFER_INT)
11284 + return;
11285 +
11286 + spin_lock_irqsave(&fotg210->lock, flags);
11287 + qh = ep->hcpriv;
11288 +
11289 + /* For Bulk and Interrupt endpoints we maintain the toggle state
11290 + * in the hardware; the toggle bits in udev aren't used at all.
11291 + * When an endpoint is reset by usb_clear_halt() we must reset
11292 + * the toggle bit in the QH.
11293 + */
11294 + if (qh) {
11295 + usb_settoggle(qh->dev, epnum, is_out, 0);
11296 + if (!list_empty(&qh->qtd_list)) {
11297 + WARN_ONCE(1, "clear_halt for a busy endpoint\n");
11298 + } else if (qh->qh_state == QH_STATE_LINKED ||
11299 + qh->qh_state == QH_STATE_COMPLETING) {
11300 +
11301 + /* The toggle value in the QH can't be updated
11302 + * while the QH is active. Unlink it now;
11303 + * re-linking will call qh_refresh().
11304 + */
11305 + if (eptype == USB_ENDPOINT_XFER_BULK)
11306 + start_unlink_async(fotg210, qh);
11307 + else
11308 + start_unlink_intr(fotg210, qh);
11309 + }
11310 + }
11311 + spin_unlock_irqrestore(&fotg210->lock, flags);
11312 +}
11313 +
11314 +static int fotg210_get_frame(struct usb_hcd *hcd)
11315 +{
11316 + struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
11317 +
11318 + return (fotg210_read_frame_index(fotg210) >> 3) %
11319 + fotg210->periodic_size;
11320 +}
11321 +
11322 +/* The EHCI in ChipIdea HDRC cannot be a separate module or device,
11323 + * because its registers (and irq) are shared between host/gadget/otg
11324 + * functions and in order to facilitate role switching we cannot
11325 + * give the fotg210 driver exclusive access to those.
11326 + */
11327 +MODULE_DESCRIPTION(DRIVER_DESC);
11328 +MODULE_AUTHOR(DRIVER_AUTHOR);
11329 +MODULE_LICENSE("GPL");
11330 +
11331 +static const struct hc_driver fotg210_fotg210_hc_driver = {
11332 + .description = hcd_name,
11333 + .product_desc = "Faraday USB2.0 Host Controller",
11334 + .hcd_priv_size = sizeof(struct fotg210_hcd),
11335 +
11336 + /*
11337 + * generic hardware linkage
11338 + */
11339 + .irq = fotg210_irq,
11340 + .flags = HCD_MEMORY | HCD_DMA | HCD_USB2,
11341 +
11342 + /*
11343 + * basic lifecycle operations
11344 + */
11345 + .reset = hcd_fotg210_init,
11346 + .start = fotg210_run,
11347 + .stop = fotg210_stop,
11348 + .shutdown = fotg210_shutdown,
11349 +
11350 + /*
11351 + * managing i/o requests and associated device resources
11352 + */
11353 + .urb_enqueue = fotg210_urb_enqueue,
11354 + .urb_dequeue = fotg210_urb_dequeue,
11355 + .endpoint_disable = fotg210_endpoint_disable,
11356 + .endpoint_reset = fotg210_endpoint_reset,
11357 +
11358 + /*
11359 + * scheduling support
11360 + */
11361 + .get_frame_number = fotg210_get_frame,
11362 +
11363 + /*
11364 + * root hub support
11365 + */
11366 + .hub_status_data = fotg210_hub_status_data,
11367 + .hub_control = fotg210_hub_control,
11368 + .bus_suspend = fotg210_bus_suspend,
11369 + .bus_resume = fotg210_bus_resume,
11370 +
11371 + .relinquish_port = fotg210_relinquish_port,
11372 + .port_handed_over = fotg210_port_handed_over,
11373 +
11374 + .clear_tt_buffer_complete = fotg210_clear_tt_buffer_complete,
11375 +};
11376 +
11377 +static void fotg210_init(struct fotg210_hcd *fotg210)
11378 +{
11379 + u32 value;
11380 +
11381 + iowrite32(GMIR_MDEV_INT | GMIR_MOTG_INT | GMIR_INT_POLARITY,
11382 + &fotg210->regs->gmir);
11383 +
11384 + value = ioread32(&fotg210->regs->otgcsr);
11385 + value &= ~OTGCSR_A_BUS_DROP;
11386 + value |= OTGCSR_A_BUS_REQ;
11387 + iowrite32(value, &fotg210->regs->otgcsr);
11388 +}
11389 +
11390 +/*
11391 + * fotg210_hcd_probe - initialize faraday FOTG210 HCDs
11392 + *
11393 + * Allocates basic resources for this USB host controller, and
11394 + * then invokes the start() method for the HCD associated with it
11395 + * through the hotplug entry's driver_data.
11396 + */
11397 +static int fotg210_hcd_probe(struct platform_device *pdev)
11398 +{
11399 + struct device *dev = &pdev->dev;
11400 + struct usb_hcd *hcd;
11401 + struct resource *res;
11402 + int irq;
11403 + int retval;
11404 + struct fotg210_hcd *fotg210;
11405 +
11406 + if (usb_disabled())
11407 + return -ENODEV;
11408 +
11409 + pdev->dev.power.power_state = PMSG_ON;
11410 +
11411 + irq = platform_get_irq(pdev, 0);
11412 + if (irq < 0)
11413 + return irq;
11414 +
11415 + hcd = usb_create_hcd(&fotg210_fotg210_hc_driver, dev,
11416 + dev_name(dev));
11417 + if (!hcd) {
11418 + dev_err(dev, "failed to create hcd\n");
11419 + retval = -ENOMEM;
11420 + goto fail_create_hcd;
11421 + }
11422 +
11423 + hcd->has_tt = 1;
11424 +
11425 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
11426 + hcd->regs = devm_ioremap_resource(&pdev->dev, res);
11427 + if (IS_ERR(hcd->regs)) {
11428 + retval = PTR_ERR(hcd->regs);
11429 + goto failed_put_hcd;
11430 + }
11431 +
11432 + hcd->rsrc_start = res->start;
11433 + hcd->rsrc_len = resource_size(res);
11434 +
11435 + fotg210 = hcd_to_fotg210(hcd);
11436 +
11437 + fotg210->caps = hcd->regs;
11438 +
11439 + /* It's OK not to supply this clock */
11440 + fotg210->pclk = clk_get(dev, "PCLK");
11441 + if (!IS_ERR(fotg210->pclk)) {
11442 + retval = clk_prepare_enable(fotg210->pclk);
11443 + if (retval) {
11444 + dev_err(dev, "failed to enable PCLK\n");
11445 + goto failed_put_hcd;
11446 + }
11447 + } else if (PTR_ERR(fotg210->pclk) == -EPROBE_DEFER) {
11448 + /*
11449 + * Percolate deferrals, for anything else,
11450 + * just live without the clocking.
11451 + */
11452 + retval = PTR_ERR(fotg210->pclk);
11453 + goto failed_dis_clk;
11454 + }
11455 +
11456 + retval = fotg210_setup(hcd);
11457 + if (retval)
11458 + goto failed_dis_clk;
11459 +
11460 + fotg210_init(fotg210);
11461 +
11462 + retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
11463 + if (retval) {
11464 + dev_err(dev, "failed to add hcd with err %d\n", retval);
11465 + goto failed_dis_clk;
11466 + }
11467 + device_wakeup_enable(hcd->self.controller);
11468 + platform_set_drvdata(pdev, hcd);
11469 +
11470 + return retval;
11471 +
11472 +failed_dis_clk:
11473 + if (!IS_ERR(fotg210->pclk)) {
11474 + clk_disable_unprepare(fotg210->pclk);
11475 + clk_put(fotg210->pclk);
11476 + }
11477 +failed_put_hcd:
11478 + usb_put_hcd(hcd);
11479 +fail_create_hcd:
11480 + dev_err(dev, "init %s fail, %d\n", dev_name(dev), retval);
11481 + return retval;
11482 +}
11483 +
11484 +/*
11485 + * fotg210_hcd_remove - shutdown processing for EHCI HCDs
11486 + * @dev: USB Host Controller being removed
11487 + *
11488 + */
11489 +static int fotg210_hcd_remove(struct platform_device *pdev)
11490 +{
11491 + struct usb_hcd *hcd = platform_get_drvdata(pdev);
11492 + struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
11493 +
11494 + if (!IS_ERR(fotg210->pclk)) {
11495 + clk_disable_unprepare(fotg210->pclk);
11496 + clk_put(fotg210->pclk);
11497 + }
11498 +
11499 + usb_remove_hcd(hcd);
11500 + usb_put_hcd(hcd);
11501 +
11502 + return 0;
11503 +}
11504 +
11505 +#ifdef CONFIG_OF
11506 +static const struct of_device_id fotg210_of_match[] = {
11507 + { .compatible = "faraday,fotg210" },
11508 + {},
11509 +};
11510 +MODULE_DEVICE_TABLE(of, fotg210_of_match);
11511 +#endif
11512 +
11513 +static struct platform_driver fotg210_hcd_driver = {
11514 + .driver = {
11515 + .name = "fotg210-hcd",
11516 + .of_match_table = of_match_ptr(fotg210_of_match),
11517 + },
11518 + .probe = fotg210_hcd_probe,
11519 + .remove = fotg210_hcd_remove,
11520 +};
11521 +
11522 +static int __init fotg210_hcd_init(void)
11523 +{
11524 + int retval = 0;
11525 +
11526 + if (usb_disabled())
11527 + return -ENODEV;
11528 +
11529 + set_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
11530 + if (test_bit(USB_UHCI_LOADED, &usb_hcds_loaded) ||
11531 + test_bit(USB_OHCI_LOADED, &usb_hcds_loaded))
11532 + pr_warn("Warning! fotg210_hcd should always be loaded before uhci_hcd and ohci_hcd, not after\n");
11533 +
11534 + pr_debug("%s: block sizes: qh %zd qtd %zd itd %zd\n",
11535 + hcd_name, sizeof(struct fotg210_qh),
11536 + sizeof(struct fotg210_qtd),
11537 + sizeof(struct fotg210_itd));
11538 +
11539 + fotg210_debug_root = debugfs_create_dir("fotg210", usb_debug_root);
11540 +
11541 + retval = platform_driver_register(&fotg210_hcd_driver);
11542 + if (retval < 0)
11543 + goto clean;
11544 + return retval;
11545 +
11546 +clean:
11547 + debugfs_remove(fotg210_debug_root);
11548 + fotg210_debug_root = NULL;
11549 +
11550 + clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
11551 + return retval;
11552 +}
11553 +module_init(fotg210_hcd_init);
11554 +
11555 +static void __exit fotg210_hcd_cleanup(void)
11556 +{
11557 + platform_driver_unregister(&fotg210_hcd_driver);
11558 + debugfs_remove(fotg210_debug_root);
11559 + clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
11560 +}
11561 +module_exit(fotg210_hcd_cleanup);
11562 --- a/drivers/usb/gadget/udc/fotg210-udc.c
11563 +++ /dev/null
11564 @@ -1,1239 +0,0 @@
11565 -// SPDX-License-Identifier: GPL-2.0
11566 -/*
11567 - * FOTG210 UDC Driver supports Bulk transfer so far
11568 - *
11569 - * Copyright (C) 2013 Faraday Technology Corporation
11570 - *
11571 - * Author : Yuan-Hsin Chen <yhchen@faraday-tech.com>
11572 - */
11573 -
11574 -#include <linux/dma-mapping.h>
11575 -#include <linux/err.h>
11576 -#include <linux/interrupt.h>
11577 -#include <linux/io.h>
11578 -#include <linux/module.h>
11579 -#include <linux/platform_device.h>
11580 -#include <linux/usb/ch9.h>
11581 -#include <linux/usb/gadget.h>
11582 -
11583 -#include "fotg210.h"
11584 -
11585 -#define DRIVER_DESC "FOTG210 USB Device Controller Driver"
11586 -#define DRIVER_VERSION "30-April-2013"
11587 -
11588 -static const char udc_name[] = "fotg210_udc";
11589 -static const char * const fotg210_ep_name[] = {
11590 - "ep0", "ep1", "ep2", "ep3", "ep4"};
11591 -
11592 -static void fotg210_disable_fifo_int(struct fotg210_ep *ep)
11593 -{
11594 - u32 value = ioread32(ep->fotg210->reg + FOTG210_DMISGR1);
11595 -
11596 - if (ep->dir_in)
11597 - value |= DMISGR1_MF_IN_INT(ep->epnum - 1);
11598 - else
11599 - value |= DMISGR1_MF_OUTSPK_INT(ep->epnum - 1);
11600 - iowrite32(value, ep->fotg210->reg + FOTG210_DMISGR1);
11601 -}
11602 -
11603 -static void fotg210_enable_fifo_int(struct fotg210_ep *ep)
11604 -{
11605 - u32 value = ioread32(ep->fotg210->reg + FOTG210_DMISGR1);
11606 -
11607 - if (ep->dir_in)
11608 - value &= ~DMISGR1_MF_IN_INT(ep->epnum - 1);
11609 - else
11610 - value &= ~DMISGR1_MF_OUTSPK_INT(ep->epnum - 1);
11611 - iowrite32(value, ep->fotg210->reg + FOTG210_DMISGR1);
11612 -}
11613 -
11614 -static void fotg210_set_cxdone(struct fotg210_udc *fotg210)
11615 -{
11616 - u32 value = ioread32(fotg210->reg + FOTG210_DCFESR);
11617 -
11618 - value |= DCFESR_CX_DONE;
11619 - iowrite32(value, fotg210->reg + FOTG210_DCFESR);
11620 -}
11621 -
11622 -static void fotg210_done(struct fotg210_ep *ep, struct fotg210_request *req,
11623 - int status)
11624 -{
11625 - list_del_init(&req->queue);
11626 -
11627 - /* don't modify queue heads during completion callback */
11628 - if (ep->fotg210->gadget.speed == USB_SPEED_UNKNOWN)
11629 - req->req.status = -ESHUTDOWN;
11630 - else
11631 - req->req.status = status;
11632 -
11633 - spin_unlock(&ep->fotg210->lock);
11634 - usb_gadget_giveback_request(&ep->ep, &req->req);
11635 - spin_lock(&ep->fotg210->lock);
11636 -
11637 - if (ep->epnum) {
11638 - if (list_empty(&ep->queue))
11639 - fotg210_disable_fifo_int(ep);
11640 - } else {
11641 - fotg210_set_cxdone(ep->fotg210);
11642 - }
11643 -}
11644 -
11645 -static void fotg210_fifo_ep_mapping(struct fotg210_ep *ep, u32 epnum,
11646 - u32 dir_in)
11647 -{
11648 - struct fotg210_udc *fotg210 = ep->fotg210;
11649 - u32 val;
11650 -
11651 - /* Driver should map an ep to a fifo and then map the fifo
11652 - * to the ep. What a brain-damaged design!
11653 - */
11654 -
11655 - /* map a fifo to an ep */
11656 - val = ioread32(fotg210->reg + FOTG210_EPMAP);
11657 - val &= ~EPMAP_FIFONOMSK(epnum, dir_in);
11658 - val |= EPMAP_FIFONO(epnum, dir_in);
11659 - iowrite32(val, fotg210->reg + FOTG210_EPMAP);
11660 -
11661 - /* map the ep to the fifo */
11662 - val = ioread32(fotg210->reg + FOTG210_FIFOMAP);
11663 - val &= ~FIFOMAP_EPNOMSK(epnum);
11664 - val |= FIFOMAP_EPNO(epnum);
11665 - iowrite32(val, fotg210->reg + FOTG210_FIFOMAP);
11666 -
11667 - /* enable fifo */
11668 - val = ioread32(fotg210->reg + FOTG210_FIFOCF);
11669 - val |= FIFOCF_FIFO_EN(epnum - 1);
11670 - iowrite32(val, fotg210->reg + FOTG210_FIFOCF);
11671 -}
11672 -
11673 -static void fotg210_set_fifo_dir(struct fotg210_ep *ep, u32 epnum, u32 dir_in)
11674 -{
11675 - struct fotg210_udc *fotg210 = ep->fotg210;
11676 - u32 val;
11677 -
11678 - val = ioread32(fotg210->reg + FOTG210_FIFOMAP);
11679 - val |= (dir_in ? FIFOMAP_DIRIN(epnum - 1) : FIFOMAP_DIROUT(epnum - 1));
11680 - iowrite32(val, fotg210->reg + FOTG210_FIFOMAP);
11681 -}
11682 -
11683 -static void fotg210_set_tfrtype(struct fotg210_ep *ep, u32 epnum, u32 type)
11684 -{
11685 - struct fotg210_udc *fotg210 = ep->fotg210;
11686 - u32 val;
11687 -
11688 - val = ioread32(fotg210->reg + FOTG210_FIFOCF);
11689 - val |= FIFOCF_TYPE(type, epnum - 1);
11690 - iowrite32(val, fotg210->reg + FOTG210_FIFOCF);
11691 -}
11692 -
11693 -static void fotg210_set_mps(struct fotg210_ep *ep, u32 epnum, u32 mps,
11694 - u32 dir_in)
11695 -{
11696 - struct fotg210_udc *fotg210 = ep->fotg210;
11697 - u32 val;
11698 - u32 offset = dir_in ? FOTG210_INEPMPSR(epnum) :
11699 - FOTG210_OUTEPMPSR(epnum);
11700 -
11701 - val = ioread32(fotg210->reg + offset);
11702 - val |= INOUTEPMPSR_MPS(mps);
11703 - iowrite32(val, fotg210->reg + offset);
11704 -}
11705 -
11706 -static int fotg210_config_ep(struct fotg210_ep *ep,
11707 - const struct usb_endpoint_descriptor *desc)
11708 -{
11709 - struct fotg210_udc *fotg210 = ep->fotg210;
11710 -
11711 - fotg210_set_fifo_dir(ep, ep->epnum, ep->dir_in);
11712 - fotg210_set_tfrtype(ep, ep->epnum, ep->type);
11713 - fotg210_set_mps(ep, ep->epnum, ep->ep.maxpacket, ep->dir_in);
11714 - fotg210_fifo_ep_mapping(ep, ep->epnum, ep->dir_in);
11715 -
11716 - fotg210->ep[ep->epnum] = ep;
11717 -
11718 - return 0;
11719 -}
11720 -
11721 -static int fotg210_ep_enable(struct usb_ep *_ep,
11722 - const struct usb_endpoint_descriptor *desc)
11723 -{
11724 - struct fotg210_ep *ep;
11725 -
11726 - ep = container_of(_ep, struct fotg210_ep, ep);
11727 -
11728 - ep->desc = desc;
11729 - ep->epnum = usb_endpoint_num(desc);
11730 - ep->type = usb_endpoint_type(desc);
11731 - ep->dir_in = usb_endpoint_dir_in(desc);
11732 - ep->ep.maxpacket = usb_endpoint_maxp(desc);
11733 -
11734 - return fotg210_config_ep(ep, desc);
11735 -}
11736 -
11737 -static void fotg210_reset_tseq(struct fotg210_udc *fotg210, u8 epnum)
11738 -{
11739 - struct fotg210_ep *ep = fotg210->ep[epnum];
11740 - u32 value;
11741 - void __iomem *reg;
11742 -
11743 - reg = (ep->dir_in) ?
11744 - fotg210->reg + FOTG210_INEPMPSR(epnum) :
11745 - fotg210->reg + FOTG210_OUTEPMPSR(epnum);
11746 -
11747 - /* Note: Driver needs to set and clear INOUTEPMPSR_RESET_TSEQ
11748 - * bit. Controller wouldn't clear this bit. WTF!!!
11749 - */
11750 -
11751 - value = ioread32(reg);
11752 - value |= INOUTEPMPSR_RESET_TSEQ;
11753 - iowrite32(value, reg);
11754 -
11755 - value = ioread32(reg);
11756 - value &= ~INOUTEPMPSR_RESET_TSEQ;
11757 - iowrite32(value, reg);
11758 -}
11759 -
11760 -static int fotg210_ep_release(struct fotg210_ep *ep)
11761 -{
11762 - if (!ep->epnum)
11763 - return 0;
11764 - ep->epnum = 0;
11765 - ep->stall = 0;
11766 - ep->wedged = 0;
11767 -
11768 - fotg210_reset_tseq(ep->fotg210, ep->epnum);
11769 -
11770 - return 0;
11771 -}
11772 -
11773 -static int fotg210_ep_disable(struct usb_ep *_ep)
11774 -{
11775 - struct fotg210_ep *ep;
11776 - struct fotg210_request *req;
11777 - unsigned long flags;
11778 -
11779 - BUG_ON(!_ep);
11780 -
11781 - ep = container_of(_ep, struct fotg210_ep, ep);
11782 -
11783 - while (!list_empty(&ep->queue)) {
11784 - req = list_entry(ep->queue.next,
11785 - struct fotg210_request, queue);
11786 - spin_lock_irqsave(&ep->fotg210->lock, flags);
11787 - fotg210_done(ep, req, -ECONNRESET);
11788 - spin_unlock_irqrestore(&ep->fotg210->lock, flags);
11789 - }
11790 -
11791 - return fotg210_ep_release(ep);
11792 -}
11793 -
11794 -static struct usb_request *fotg210_ep_alloc_request(struct usb_ep *_ep,
11795 - gfp_t gfp_flags)
11796 -{
11797 - struct fotg210_request *req;
11798 -
11799 - req = kzalloc(sizeof(struct fotg210_request), gfp_flags);
11800 - if (!req)
11801 - return NULL;
11802 -
11803 - INIT_LIST_HEAD(&req->queue);
11804 -
11805 - return &req->req;
11806 -}
11807 -
11808 -static void fotg210_ep_free_request(struct usb_ep *_ep,
11809 - struct usb_request *_req)
11810 -{
11811 - struct fotg210_request *req;
11812 -
11813 - req = container_of(_req, struct fotg210_request, req);
11814 - kfree(req);
11815 -}
11816 -
11817 -static void fotg210_enable_dma(struct fotg210_ep *ep,
11818 - dma_addr_t d, u32 len)
11819 -{
11820 - u32 value;
11821 - struct fotg210_udc *fotg210 = ep->fotg210;
11822 -
11823 - /* set transfer length and direction */
11824 - value = ioread32(fotg210->reg + FOTG210_DMACPSR1);
11825 - value &= ~(DMACPSR1_DMA_LEN(0xFFFF) | DMACPSR1_DMA_TYPE(1));
11826 - value |= DMACPSR1_DMA_LEN(len) | DMACPSR1_DMA_TYPE(ep->dir_in);
11827 - iowrite32(value, fotg210->reg + FOTG210_DMACPSR1);
11828 -
11829 - /* set device DMA target FIFO number */
11830 - value = ioread32(fotg210->reg + FOTG210_DMATFNR);
11831 - if (ep->epnum)
11832 - value |= DMATFNR_ACC_FN(ep->epnum - 1);
11833 - else
11834 - value |= DMATFNR_ACC_CXF;
11835 - iowrite32(value, fotg210->reg + FOTG210_DMATFNR);
11836 -
11837 - /* set DMA memory address */
11838 - iowrite32(d, fotg210->reg + FOTG210_DMACPSR2);
11839 -
11840 - /* enable MDMA_EROR and MDMA_CMPLT interrupt */
11841 - value = ioread32(fotg210->reg + FOTG210_DMISGR2);
11842 - value &= ~(DMISGR2_MDMA_CMPLT | DMISGR2_MDMA_ERROR);
11843 - iowrite32(value, fotg210->reg + FOTG210_DMISGR2);
11844 -
11845 - /* start DMA */
11846 - value = ioread32(fotg210->reg + FOTG210_DMACPSR1);
11847 - value |= DMACPSR1_DMA_START;
11848 - iowrite32(value, fotg210->reg + FOTG210_DMACPSR1);
11849 -}
11850 -
11851 -static void fotg210_disable_dma(struct fotg210_ep *ep)
11852 -{
11853 - iowrite32(DMATFNR_DISDMA, ep->fotg210->reg + FOTG210_DMATFNR);
11854 -}
11855 -
11856 -static void fotg210_wait_dma_done(struct fotg210_ep *ep)
11857 -{
11858 - u32 value;
11859 -
11860 - do {
11861 - value = ioread32(ep->fotg210->reg + FOTG210_DISGR2);
11862 - if ((value & DISGR2_USBRST_INT) ||
11863 - (value & DISGR2_DMA_ERROR))
11864 - goto dma_reset;
11865 - } while (!(value & DISGR2_DMA_CMPLT));
11866 -
11867 - value &= ~DISGR2_DMA_CMPLT;
11868 - iowrite32(value, ep->fotg210->reg + FOTG210_DISGR2);
11869 - return;
11870 -
11871 -dma_reset:
11872 - value = ioread32(ep->fotg210->reg + FOTG210_DMACPSR1);
11873 - value |= DMACPSR1_DMA_ABORT;
11874 - iowrite32(value, ep->fotg210->reg + FOTG210_DMACPSR1);
11875 -
11876 - /* reset fifo */
11877 - if (ep->epnum) {
11878 - value = ioread32(ep->fotg210->reg +
11879 - FOTG210_FIBCR(ep->epnum - 1));
11880 - value |= FIBCR_FFRST;
11881 - iowrite32(value, ep->fotg210->reg +
11882 - FOTG210_FIBCR(ep->epnum - 1));
11883 - } else {
11884 - value = ioread32(ep->fotg210->reg + FOTG210_DCFESR);
11885 - value |= DCFESR_CX_CLR;
11886 - iowrite32(value, ep->fotg210->reg + FOTG210_DCFESR);
11887 - }
11888 -}
11889 -
11890 -static void fotg210_start_dma(struct fotg210_ep *ep,
11891 - struct fotg210_request *req)
11892 -{
11893 - struct device *dev = &ep->fotg210->gadget.dev;
11894 - dma_addr_t d;
11895 - u8 *buffer;
11896 - u32 length;
11897 -
11898 - if (ep->epnum) {
11899 - if (ep->dir_in) {
11900 - buffer = req->req.buf;
11901 - length = req->req.length;
11902 - } else {
11903 - buffer = req->req.buf + req->req.actual;
11904 - length = ioread32(ep->fotg210->reg +
11905 - FOTG210_FIBCR(ep->epnum - 1)) & FIBCR_BCFX;
11906 - if (length > req->req.length - req->req.actual)
11907 - length = req->req.length - req->req.actual;
11908 - }
11909 - } else {
11910 - buffer = req->req.buf + req->req.actual;
11911 - if (req->req.length - req->req.actual > ep->ep.maxpacket)
11912 - length = ep->ep.maxpacket;
11913 - else
11914 - length = req->req.length - req->req.actual;
11915 - }
11916 -
11917 - d = dma_map_single(dev, buffer, length,
11918 - ep->dir_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
11919 -
11920 - if (dma_mapping_error(dev, d)) {
11921 - pr_err("dma_mapping_error\n");
11922 - return;
11923 - }
11924 -
11925 - fotg210_enable_dma(ep, d, length);
11926 -
11927 - /* check if dma is done */
11928 - fotg210_wait_dma_done(ep);
11929 -
11930 - fotg210_disable_dma(ep);
11931 -
11932 - /* update actual transfer length */
11933 - req->req.actual += length;
11934 -
11935 - dma_unmap_single(dev, d, length, DMA_TO_DEVICE);
11936 -}
11937 -
11938 -static void fotg210_ep0_queue(struct fotg210_ep *ep,
11939 - struct fotg210_request *req)
11940 -{
11941 - if (!req->req.length) {
11942 - fotg210_done(ep, req, 0);
11943 - return;
11944 - }
11945 - if (ep->dir_in) { /* if IN */
11946 - fotg210_start_dma(ep, req);
11947 - if (req->req.length == req->req.actual)
11948 - fotg210_done(ep, req, 0);
11949 - } else { /* OUT */
11950 - u32 value = ioread32(ep->fotg210->reg + FOTG210_DMISGR0);
11951 -
11952 - value &= ~DMISGR0_MCX_OUT_INT;
11953 - iowrite32(value, ep->fotg210->reg + FOTG210_DMISGR0);
11954 - }
11955 -}
11956 -
11957 -static int fotg210_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
11958 - gfp_t gfp_flags)
11959 -{
11960 - struct fotg210_ep *ep;
11961 - struct fotg210_request *req;
11962 - unsigned long flags;
11963 - int request = 0;
11964 -
11965 - ep = container_of(_ep, struct fotg210_ep, ep);
11966 - req = container_of(_req, struct fotg210_request, req);
11967 -
11968 - if (ep->fotg210->gadget.speed == USB_SPEED_UNKNOWN)
11969 - return -ESHUTDOWN;
11970 -
11971 - spin_lock_irqsave(&ep->fotg210->lock, flags);
11972 -
11973 - if (list_empty(&ep->queue))
11974 - request = 1;
11975 -
11976 - list_add_tail(&req->queue, &ep->queue);
11977 -
11978 - req->req.actual = 0;
11979 - req->req.status = -EINPROGRESS;
11980 -
11981 - if (!ep->epnum) /* ep0 */
11982 - fotg210_ep0_queue(ep, req);
11983 - else if (request && !ep->stall)
11984 - fotg210_enable_fifo_int(ep);
11985 -
11986 - spin_unlock_irqrestore(&ep->fotg210->lock, flags);
11987 -
11988 - return 0;
11989 -}
11990 -
11991 -static int fotg210_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
11992 -{
11993 - struct fotg210_ep *ep;
11994 - struct fotg210_request *req;
11995 - unsigned long flags;
11996 -
11997 - ep = container_of(_ep, struct fotg210_ep, ep);
11998 - req = container_of(_req, struct fotg210_request, req);
11999 -
12000 - spin_lock_irqsave(&ep->fotg210->lock, flags);
12001 - if (!list_empty(&ep->queue))
12002 - fotg210_done(ep, req, -ECONNRESET);
12003 - spin_unlock_irqrestore(&ep->fotg210->lock, flags);
12004 -
12005 - return 0;
12006 -}
12007 -
12008 -static void fotg210_set_epnstall(struct fotg210_ep *ep)
12009 -{
12010 - struct fotg210_udc *fotg210 = ep->fotg210;
12011 - u32 value;
12012 - void __iomem *reg;
12013 -
12014 - /* check if IN FIFO is empty before stall */
12015 - if (ep->dir_in) {
12016 - do {
12017 - value = ioread32(fotg210->reg + FOTG210_DCFESR);
12018 - } while (!(value & DCFESR_FIFO_EMPTY(ep->epnum - 1)));
12019 - }
12020 -
12021 - reg = (ep->dir_in) ?
12022 - fotg210->reg + FOTG210_INEPMPSR(ep->epnum) :
12023 - fotg210->reg + FOTG210_OUTEPMPSR(ep->epnum);
12024 - value = ioread32(reg);
12025 - value |= INOUTEPMPSR_STL_EP;
12026 - iowrite32(value, reg);
12027 -}
12028 -
12029 -static void fotg210_clear_epnstall(struct fotg210_ep *ep)
12030 -{
12031 - struct fotg210_udc *fotg210 = ep->fotg210;
12032 - u32 value;
12033 - void __iomem *reg;
12034 -
12035 - reg = (ep->dir_in) ?
12036 - fotg210->reg + FOTG210_INEPMPSR(ep->epnum) :
12037 - fotg210->reg + FOTG210_OUTEPMPSR(ep->epnum);
12038 - value = ioread32(reg);
12039 - value &= ~INOUTEPMPSR_STL_EP;
12040 - iowrite32(value, reg);
12041 -}
12042 -
12043 -static int fotg210_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedge)
12044 -{
12045 - struct fotg210_ep *ep;
12046 - struct fotg210_udc *fotg210;
12047 - unsigned long flags;
12048 -
12049 - ep = container_of(_ep, struct fotg210_ep, ep);
12050 -
12051 - fotg210 = ep->fotg210;
12052 -
12053 - spin_lock_irqsave(&ep->fotg210->lock, flags);
12054 -
12055 - if (value) {
12056 - fotg210_set_epnstall(ep);
12057 - ep->stall = 1;
12058 - if (wedge)
12059 - ep->wedged = 1;
12060 - } else {
12061 - fotg210_reset_tseq(fotg210, ep->epnum);
12062 - fotg210_clear_epnstall(ep);
12063 - ep->stall = 0;
12064 - ep->wedged = 0;
12065 - if (!list_empty(&ep->queue))
12066 - fotg210_enable_fifo_int(ep);
12067 - }
12068 -
12069 - spin_unlock_irqrestore(&ep->fotg210->lock, flags);
12070 - return 0;
12071 -}
12072 -
12073 -static int fotg210_ep_set_halt(struct usb_ep *_ep, int value)
12074 -{
12075 - return fotg210_set_halt_and_wedge(_ep, value, 0);
12076 -}
12077 -
12078 -static int fotg210_ep_set_wedge(struct usb_ep *_ep)
12079 -{
12080 - return fotg210_set_halt_and_wedge(_ep, 1, 1);
12081 -}
12082 -
12083 -static void fotg210_ep_fifo_flush(struct usb_ep *_ep)
12084 -{
12085 -}
12086 -
12087 -static const struct usb_ep_ops fotg210_ep_ops = {
12088 - .enable = fotg210_ep_enable,
12089 - .disable = fotg210_ep_disable,
12090 -
12091 - .alloc_request = fotg210_ep_alloc_request,
12092 - .free_request = fotg210_ep_free_request,
12093 -
12094 - .queue = fotg210_ep_queue,
12095 - .dequeue = fotg210_ep_dequeue,
12096 -
12097 - .set_halt = fotg210_ep_set_halt,
12098 - .fifo_flush = fotg210_ep_fifo_flush,
12099 - .set_wedge = fotg210_ep_set_wedge,
12100 -};
12101 -
12102 -static void fotg210_clear_tx0byte(struct fotg210_udc *fotg210)
12103 -{
12104 - u32 value = ioread32(fotg210->reg + FOTG210_TX0BYTE);
12105 -
12106 - value &= ~(TX0BYTE_EP1 | TX0BYTE_EP2 | TX0BYTE_EP3
12107 - | TX0BYTE_EP4);
12108 - iowrite32(value, fotg210->reg + FOTG210_TX0BYTE);
12109 -}
12110 -
12111 -static void fotg210_clear_rx0byte(struct fotg210_udc *fotg210)
12112 -{
12113 - u32 value = ioread32(fotg210->reg + FOTG210_RX0BYTE);
12114 -
12115 - value &= ~(RX0BYTE_EP1 | RX0BYTE_EP2 | RX0BYTE_EP3
12116 - | RX0BYTE_EP4);
12117 - iowrite32(value, fotg210->reg + FOTG210_RX0BYTE);
12118 -}
12119 -
12120 -/* read 8-byte setup packet only */
12121 -static void fotg210_rdsetupp(struct fotg210_udc *fotg210,
12122 - u8 *buffer)
12123 -{
12124 - int i = 0;
12125 - u8 *tmp = buffer;
12126 - u32 data;
12127 - u32 length = 8;
12128 -
12129 - iowrite32(DMATFNR_ACC_CXF, fotg210->reg + FOTG210_DMATFNR);
12130 -
12131 - for (i = (length >> 2); i > 0; i--) {
12132 - data = ioread32(fotg210->reg + FOTG210_CXPORT);
12133 - *tmp = data & 0xFF;
12134 - *(tmp + 1) = (data >> 8) & 0xFF;
12135 - *(tmp + 2) = (data >> 16) & 0xFF;
12136 - *(tmp + 3) = (data >> 24) & 0xFF;
12137 - tmp = tmp + 4;
12138 - }
12139 -
12140 - switch (length % 4) {
12141 - case 1:
12142 - data = ioread32(fotg210->reg + FOTG210_CXPORT);
12143 - *tmp = data & 0xFF;
12144 - break;
12145 - case 2:
12146 - data = ioread32(fotg210->reg + FOTG210_CXPORT);
12147 - *tmp = data & 0xFF;
12148 - *(tmp + 1) = (data >> 8) & 0xFF;
12149 - break;
12150 - case 3:
12151 - data = ioread32(fotg210->reg + FOTG210_CXPORT);
12152 - *tmp = data & 0xFF;
12153 - *(tmp + 1) = (data >> 8) & 0xFF;
12154 - *(tmp + 2) = (data >> 16) & 0xFF;
12155 - break;
12156 - default:
12157 - break;
12158 - }
12159 -
12160 - iowrite32(DMATFNR_DISDMA, fotg210->reg + FOTG210_DMATFNR);
12161 -}
12162 -
12163 -static void fotg210_set_configuration(struct fotg210_udc *fotg210)
12164 -{
12165 - u32 value = ioread32(fotg210->reg + FOTG210_DAR);
12166 -
12167 - value |= DAR_AFT_CONF;
12168 - iowrite32(value, fotg210->reg + FOTG210_DAR);
12169 -}
12170 -
12171 -static void fotg210_set_dev_addr(struct fotg210_udc *fotg210, u32 addr)
12172 -{
12173 - u32 value = ioread32(fotg210->reg + FOTG210_DAR);
12174 -
12175 - value |= (addr & 0x7F);
12176 - iowrite32(value, fotg210->reg + FOTG210_DAR);
12177 -}
12178 -
12179 -static void fotg210_set_cxstall(struct fotg210_udc *fotg210)
12180 -{
12181 - u32 value = ioread32(fotg210->reg + FOTG210_DCFESR);
12182 -
12183 - value |= DCFESR_CX_STL;
12184 - iowrite32(value, fotg210->reg + FOTG210_DCFESR);
12185 -}
12186 -
12187 -static void fotg210_request_error(struct fotg210_udc *fotg210)
12188 -{
12189 - fotg210_set_cxstall(fotg210);
12190 - pr_err("request error!!\n");
12191 -}
12192 -
12193 -static void fotg210_set_address(struct fotg210_udc *fotg210,
12194 - struct usb_ctrlrequest *ctrl)
12195 -{
12196 - if (le16_to_cpu(ctrl->wValue) >= 0x0100) {
12197 - fotg210_request_error(fotg210);
12198 - } else {
12199 - fotg210_set_dev_addr(fotg210, le16_to_cpu(ctrl->wValue));
12200 - fotg210_set_cxdone(fotg210);
12201 - }
12202 -}
12203 -
12204 -static void fotg210_set_feature(struct fotg210_udc *fotg210,
12205 - struct usb_ctrlrequest *ctrl)
12206 -{
12207 - switch (ctrl->bRequestType & USB_RECIP_MASK) {
12208 - case USB_RECIP_DEVICE:
12209 - fotg210_set_cxdone(fotg210);
12210 - break;
12211 - case USB_RECIP_INTERFACE:
12212 - fotg210_set_cxdone(fotg210);
12213 - break;
12214 - case USB_RECIP_ENDPOINT: {
12215 - u8 epnum;
12216 - epnum = le16_to_cpu(ctrl->wIndex) & USB_ENDPOINT_NUMBER_MASK;
12217 - if (epnum)
12218 - fotg210_set_epnstall(fotg210->ep[epnum]);
12219 - else
12220 - fotg210_set_cxstall(fotg210);
12221 - fotg210_set_cxdone(fotg210);
12222 - }
12223 - break;
12224 - default:
12225 - fotg210_request_error(fotg210);
12226 - break;
12227 - }
12228 -}
12229 -
12230 -static void fotg210_clear_feature(struct fotg210_udc *fotg210,
12231 - struct usb_ctrlrequest *ctrl)
12232 -{
12233 - struct fotg210_ep *ep =
12234 - fotg210->ep[ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK];
12235 -
12236 - switch (ctrl->bRequestType & USB_RECIP_MASK) {
12237 - case USB_RECIP_DEVICE:
12238 - fotg210_set_cxdone(fotg210);
12239 - break;
12240 - case USB_RECIP_INTERFACE:
12241 - fotg210_set_cxdone(fotg210);
12242 - break;
12243 - case USB_RECIP_ENDPOINT:
12244 - if (ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK) {
12245 - if (ep->wedged) {
12246 - fotg210_set_cxdone(fotg210);
12247 - break;
12248 - }
12249 - if (ep->stall)
12250 - fotg210_set_halt_and_wedge(&ep->ep, 0, 0);
12251 - }
12252 - fotg210_set_cxdone(fotg210);
12253 - break;
12254 - default:
12255 - fotg210_request_error(fotg210);
12256 - break;
12257 - }
12258 -}
12259 -
12260 -static int fotg210_is_epnstall(struct fotg210_ep *ep)
12261 -{
12262 - struct fotg210_udc *fotg210 = ep->fotg210;
12263 - u32 value;
12264 - void __iomem *reg;
12265 -
12266 - reg = (ep->dir_in) ?
12267 - fotg210->reg + FOTG210_INEPMPSR(ep->epnum) :
12268 - fotg210->reg + FOTG210_OUTEPMPSR(ep->epnum);
12269 - value = ioread32(reg);
12270 - return value & INOUTEPMPSR_STL_EP ? 1 : 0;
12271 -}
12272 -
12273 -/* For EP0 requests triggered by this driver (currently GET_STATUS response) */
12274 -static void fotg210_ep0_complete(struct usb_ep *_ep, struct usb_request *req)
12275 -{
12276 - struct fotg210_ep *ep;
12277 - struct fotg210_udc *fotg210;
12278 -
12279 - ep = container_of(_ep, struct fotg210_ep, ep);
12280 - fotg210 = ep->fotg210;
12281 -
12282 - if (req->status || req->actual != req->length) {
12283 - dev_warn(&fotg210->gadget.dev, "EP0 request failed: %d\n", req->status);
12284 - }
12285 -}
12286 -
12287 -static void fotg210_get_status(struct fotg210_udc *fotg210,
12288 - struct usb_ctrlrequest *ctrl)
12289 -{
12290 - u8 epnum;
12291 -
12292 - switch (ctrl->bRequestType & USB_RECIP_MASK) {
12293 - case USB_RECIP_DEVICE:
12294 - fotg210->ep0_data = cpu_to_le16(1 << USB_DEVICE_SELF_POWERED);
12295 - break;
12296 - case USB_RECIP_INTERFACE:
12297 - fotg210->ep0_data = cpu_to_le16(0);
12298 - break;
12299 - case USB_RECIP_ENDPOINT:
12300 - epnum = ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK;
12301 - if (epnum)
12302 - fotg210->ep0_data =
12303 - cpu_to_le16(fotg210_is_epnstall(fotg210->ep[epnum])
12304 - << USB_ENDPOINT_HALT);
12305 - else
12306 - fotg210_request_error(fotg210);
12307 - break;
12308 -
12309 - default:
12310 - fotg210_request_error(fotg210);
12311 - return; /* exit */
12312 - }
12313 -
12314 - fotg210->ep0_req->buf = &fotg210->ep0_data;
12315 - fotg210->ep0_req->length = 2;
12316 -
12317 - spin_unlock(&fotg210->lock);
12318 - fotg210_ep_queue(fotg210->gadget.ep0, fotg210->ep0_req, GFP_ATOMIC);
12319 - spin_lock(&fotg210->lock);
12320 -}
12321 -
12322 -static int fotg210_setup_packet(struct fotg210_udc *fotg210,
12323 - struct usb_ctrlrequest *ctrl)
12324 -{
12325 - u8 *p = (u8 *)ctrl;
12326 - u8 ret = 0;
12327 -
12328 - fotg210_rdsetupp(fotg210, p);
12329 -
12330 - fotg210->ep[0]->dir_in = ctrl->bRequestType & USB_DIR_IN;
12331 -
12332 - if (fotg210->gadget.speed == USB_SPEED_UNKNOWN) {
12333 - u32 value = ioread32(fotg210->reg + FOTG210_DMCR);
12334 - fotg210->gadget.speed = value & DMCR_HS_EN ?
12335 - USB_SPEED_HIGH : USB_SPEED_FULL;
12336 - }
12337 -
12338 - /* check request */
12339 - if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
12340 - switch (ctrl->bRequest) {
12341 - case USB_REQ_GET_STATUS:
12342 - fotg210_get_status(fotg210, ctrl);
12343 - break;
12344 - case USB_REQ_CLEAR_FEATURE:
12345 - fotg210_clear_feature(fotg210, ctrl);
12346 - break;
12347 - case USB_REQ_SET_FEATURE:
12348 - fotg210_set_feature(fotg210, ctrl);
12349 - break;
12350 - case USB_REQ_SET_ADDRESS:
12351 - fotg210_set_address(fotg210, ctrl);
12352 - break;
12353 - case USB_REQ_SET_CONFIGURATION:
12354 - fotg210_set_configuration(fotg210);
12355 - ret = 1;
12356 - break;
12357 - default:
12358 - ret = 1;
12359 - break;
12360 - }
12361 - } else {
12362 - ret = 1;
12363 - }
12364 -
12365 - return ret;
12366 -}
12367 -
12368 -static void fotg210_ep0out(struct fotg210_udc *fotg210)
12369 -{
12370 - struct fotg210_ep *ep = fotg210->ep[0];
12371 -
12372 - if (!list_empty(&ep->queue) && !ep->dir_in) {
12373 - struct fotg210_request *req;
12374 -
12375 - req = list_first_entry(&ep->queue,
12376 - struct fotg210_request, queue);
12377 -
12378 - if (req->req.length)
12379 - fotg210_start_dma(ep, req);
12380 -
12381 - if ((req->req.length - req->req.actual) < ep->ep.maxpacket)
12382 - fotg210_done(ep, req, 0);
12383 - } else {
12384 - pr_err("%s : empty queue\n", __func__);
12385 - }
12386 -}
12387 -
12388 -static void fotg210_ep0in(struct fotg210_udc *fotg210)
12389 -{
12390 - struct fotg210_ep *ep = fotg210->ep[0];
12391 -
12392 - if ((!list_empty(&ep->queue)) && (ep->dir_in)) {
12393 - struct fotg210_request *req;
12394 -
12395 - req = list_entry(ep->queue.next,
12396 - struct fotg210_request, queue);
12397 -
12398 - if (req->req.length)
12399 - fotg210_start_dma(ep, req);
12400 -
12401 - if (req->req.actual == req->req.length)
12402 - fotg210_done(ep, req, 0);
12403 - } else {
12404 - fotg210_set_cxdone(fotg210);
12405 - }
12406 -}
12407 -
12408 -static void fotg210_clear_comabt_int(struct fotg210_udc *fotg210)
12409 -{
12410 - u32 value = ioread32(fotg210->reg + FOTG210_DISGR0);
12411 -
12412 - value &= ~DISGR0_CX_COMABT_INT;
12413 - iowrite32(value, fotg210->reg + FOTG210_DISGR0);
12414 -}
12415 -
12416 -static void fotg210_in_fifo_handler(struct fotg210_ep *ep)
12417 -{
12418 - struct fotg210_request *req = list_entry(ep->queue.next,
12419 - struct fotg210_request, queue);
12420 -
12421 - if (req->req.length)
12422 - fotg210_start_dma(ep, req);
12423 - fotg210_done(ep, req, 0);
12424 -}
12425 -
12426 -static void fotg210_out_fifo_handler(struct fotg210_ep *ep)
12427 -{
12428 - struct fotg210_request *req = list_entry(ep->queue.next,
12429 - struct fotg210_request, queue);
12430 - int disgr1 = ioread32(ep->fotg210->reg + FOTG210_DISGR1);
12431 -
12432 - fotg210_start_dma(ep, req);
12433 -
12434 - /* Complete the request when it's full or a short packet arrived.
12435 - * Like other drivers, short_not_ok isn't handled.
12436 - */
12437 -
12438 - if (req->req.length == req->req.actual ||
12439 - (disgr1 & DISGR1_SPK_INT(ep->epnum - 1)))
12440 - fotg210_done(ep, req, 0);
12441 -}
12442 -
12443 -static irqreturn_t fotg210_irq(int irq, void *_fotg210)
12444 -{
12445 - struct fotg210_udc *fotg210 = _fotg210;
12446 - u32 int_grp = ioread32(fotg210->reg + FOTG210_DIGR);
12447 - u32 int_msk = ioread32(fotg210->reg + FOTG210_DMIGR);
12448 -
12449 - int_grp &= ~int_msk;
12450 -
12451 - spin_lock(&fotg210->lock);
12452 -
12453 - if (int_grp & DIGR_INT_G2) {
12454 - void __iomem *reg = fotg210->reg + FOTG210_DISGR2;
12455 - u32 int_grp2 = ioread32(reg);
12456 - u32 int_msk2 = ioread32(fotg210->reg + FOTG210_DMISGR2);
12457 - u32 value;
12458 -
12459 - int_grp2 &= ~int_msk2;
12460 -
12461 - if (int_grp2 & DISGR2_USBRST_INT) {
12462 - usb_gadget_udc_reset(&fotg210->gadget,
12463 - fotg210->driver);
12464 - value = ioread32(reg);
12465 - value &= ~DISGR2_USBRST_INT;
12466 - iowrite32(value, reg);
12467 - pr_info("fotg210 udc reset\n");
12468 - }
12469 - if (int_grp2 & DISGR2_SUSP_INT) {
12470 - value = ioread32(reg);
12471 - value &= ~DISGR2_SUSP_INT;
12472 - iowrite32(value, reg);
12473 - pr_info("fotg210 udc suspend\n");
12474 - }
12475 - if (int_grp2 & DISGR2_RESM_INT) {
12476 - value = ioread32(reg);
12477 - value &= ~DISGR2_RESM_INT;
12478 - iowrite32(value, reg);
12479 - pr_info("fotg210 udc resume\n");
12480 - }
12481 - if (int_grp2 & DISGR2_ISO_SEQ_ERR_INT) {
12482 - value = ioread32(reg);
12483 - value &= ~DISGR2_ISO_SEQ_ERR_INT;
12484 - iowrite32(value, reg);
12485 - pr_info("fotg210 iso sequence error\n");
12486 - }
12487 - if (int_grp2 & DISGR2_ISO_SEQ_ABORT_INT) {
12488 - value = ioread32(reg);
12489 - value &= ~DISGR2_ISO_SEQ_ABORT_INT;
12490 - iowrite32(value, reg);
12491 - pr_info("fotg210 iso sequence abort\n");
12492 - }
12493 - if (int_grp2 & DISGR2_TX0BYTE_INT) {
12494 - fotg210_clear_tx0byte(fotg210);
12495 - value = ioread32(reg);
12496 - value &= ~DISGR2_TX0BYTE_INT;
12497 - iowrite32(value, reg);
12498 - pr_info("fotg210 transferred 0 byte\n");
12499 - }
12500 - if (int_grp2 & DISGR2_RX0BYTE_INT) {
12501 - fotg210_clear_rx0byte(fotg210);
12502 - value = ioread32(reg);
12503 - value &= ~DISGR2_RX0BYTE_INT;
12504 - iowrite32(value, reg);
12505 - pr_info("fotg210 received 0 byte\n");
12506 - }
12507 - if (int_grp2 & DISGR2_DMA_ERROR) {
12508 - value = ioread32(reg);
12509 - value &= ~DISGR2_DMA_ERROR;
12510 - iowrite32(value, reg);
12511 - }
12512 - }
12513 -
12514 - if (int_grp & DIGR_INT_G0) {
12515 - void __iomem *reg = fotg210->reg + FOTG210_DISGR0;
12516 - u32 int_grp0 = ioread32(reg);
12517 - u32 int_msk0 = ioread32(fotg210->reg + FOTG210_DMISGR0);
12518 - struct usb_ctrlrequest ctrl;
12519 -
12520 - int_grp0 &= ~int_msk0;
12521 -
12522 - /* the highest priority in this source register */
12523 - if (int_grp0 & DISGR0_CX_COMABT_INT) {
12524 - fotg210_clear_comabt_int(fotg210);
12525 - pr_info("fotg210 CX command abort\n");
12526 - }
12527 -
12528 - if (int_grp0 & DISGR0_CX_SETUP_INT) {
12529 - if (fotg210_setup_packet(fotg210, &ctrl)) {
12530 - spin_unlock(&fotg210->lock);
12531 - if (fotg210->driver->setup(&fotg210->gadget,
12532 - &ctrl) < 0)
12533 - fotg210_set_cxstall(fotg210);
12534 - spin_lock(&fotg210->lock);
12535 - }
12536 - }
12537 - if (int_grp0 & DISGR0_CX_COMEND_INT)
12538 - pr_info("fotg210 cmd end\n");
12539 -
12540 - if (int_grp0 & DISGR0_CX_IN_INT)
12541 - fotg210_ep0in(fotg210);
12542 -
12543 - if (int_grp0 & DISGR0_CX_OUT_INT)
12544 - fotg210_ep0out(fotg210);
12545 -
12546 - if (int_grp0 & DISGR0_CX_COMFAIL_INT) {
12547 - fotg210_set_cxstall(fotg210);
12548 - pr_info("fotg210 ep0 fail\n");
12549 - }
12550 - }
12551 -
12552 - if (int_grp & DIGR_INT_G1) {
12553 - void __iomem *reg = fotg210->reg + FOTG210_DISGR1;
12554 - u32 int_grp1 = ioread32(reg);
12555 - u32 int_msk1 = ioread32(fotg210->reg + FOTG210_DMISGR1);
12556 - int fifo;
12557 -
12558 - int_grp1 &= ~int_msk1;
12559 -
12560 - for (fifo = 0; fifo < FOTG210_MAX_FIFO_NUM; fifo++) {
12561 - if (int_grp1 & DISGR1_IN_INT(fifo))
12562 - fotg210_in_fifo_handler(fotg210->ep[fifo + 1]);
12563 -
12564 - if ((int_grp1 & DISGR1_OUT_INT(fifo)) ||
12565 - (int_grp1 & DISGR1_SPK_INT(fifo)))
12566 - fotg210_out_fifo_handler(fotg210->ep[fifo + 1]);
12567 - }
12568 - }
12569 -
12570 - spin_unlock(&fotg210->lock);
12571 -
12572 - return IRQ_HANDLED;
12573 -}
12574 -
12575 -static void fotg210_disable_unplug(struct fotg210_udc *fotg210)
12576 -{
12577 - u32 reg = ioread32(fotg210->reg + FOTG210_PHYTMSR);
12578 -
12579 - reg &= ~PHYTMSR_UNPLUG;
12580 - iowrite32(reg, fotg210->reg + FOTG210_PHYTMSR);
12581 -}
12582 -
12583 -static int fotg210_udc_start(struct usb_gadget *g,
12584 - struct usb_gadget_driver *driver)
12585 -{
12586 - struct fotg210_udc *fotg210 = gadget_to_fotg210(g);
12587 - u32 value;
12588 -
12589 - /* hook up the driver */
12590 - fotg210->driver = driver;
12591 -
12592 - /* enable device global interrupt */
12593 - value = ioread32(fotg210->reg + FOTG210_DMCR);
12594 - value |= DMCR_GLINT_EN;
12595 - iowrite32(value, fotg210->reg + FOTG210_DMCR);
12596 -
12597 - return 0;
12598 -}
12599 -
12600 -static void fotg210_init(struct fotg210_udc *fotg210)
12601 -{
12602 - u32 value;
12603 -
12604 - /* disable global interrupt and set int polarity to active high */
12605 - iowrite32(GMIR_MHC_INT | GMIR_MOTG_INT | GMIR_INT_POLARITY,
12606 - fotg210->reg + FOTG210_GMIR);
12607 -
12608 - /* disable device global interrupt */
12609 - value = ioread32(fotg210->reg + FOTG210_DMCR);
12610 - value &= ~DMCR_GLINT_EN;
12611 - iowrite32(value, fotg210->reg + FOTG210_DMCR);
12612 -
12613 - /* enable only grp2 irqs we handle */
12614 - iowrite32(~(DISGR2_DMA_ERROR | DISGR2_RX0BYTE_INT | DISGR2_TX0BYTE_INT
12615 - | DISGR2_ISO_SEQ_ABORT_INT | DISGR2_ISO_SEQ_ERR_INT
12616 - | DISGR2_RESM_INT | DISGR2_SUSP_INT | DISGR2_USBRST_INT),
12617 - fotg210->reg + FOTG210_DMISGR2);
12618 -
12619 - /* disable all fifo interrupt */
12620 - iowrite32(~(u32)0, fotg210->reg + FOTG210_DMISGR1);
12621 -
12622 - /* disable cmd end */
12623 - value = ioread32(fotg210->reg + FOTG210_DMISGR0);
12624 - value |= DMISGR0_MCX_COMEND;
12625 - iowrite32(value, fotg210->reg + FOTG210_DMISGR0);
12626 -}
12627 -
12628 -static int fotg210_udc_stop(struct usb_gadget *g)
12629 -{
12630 - struct fotg210_udc *fotg210 = gadget_to_fotg210(g);
12631 - unsigned long flags;
12632 -
12633 - spin_lock_irqsave(&fotg210->lock, flags);
12634 -
12635 - fotg210_init(fotg210);
12636 - fotg210->driver = NULL;
12637 -
12638 - spin_unlock_irqrestore(&fotg210->lock, flags);
12639 -
12640 - return 0;
12641 -}
12642 -
12643 -static const struct usb_gadget_ops fotg210_gadget_ops = {
12644 - .udc_start = fotg210_udc_start,
12645 - .udc_stop = fotg210_udc_stop,
12646 -};
12647 -
12648 -static int fotg210_udc_remove(struct platform_device *pdev)
12649 -{
12650 - struct fotg210_udc *fotg210 = platform_get_drvdata(pdev);
12651 - int i;
12652 -
12653 - usb_del_gadget_udc(&fotg210->gadget);
12654 - iounmap(fotg210->reg);
12655 - free_irq(platform_get_irq(pdev, 0), fotg210);
12656 -
12657 - fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req);
12658 - for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
12659 - kfree(fotg210->ep[i]);
12660 - kfree(fotg210);
12661 -
12662 - return 0;
12663 -}
12664 -
12665 -static int fotg210_udc_probe(struct platform_device *pdev)
12666 -{
12667 - struct resource *res, *ires;
12668 - struct fotg210_udc *fotg210 = NULL;
12669 - struct fotg210_ep *_ep[FOTG210_MAX_NUM_EP];
12670 - int ret = 0;
12671 - int i;
12672 -
12673 - res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
12674 - if (!res) {
12675 - pr_err("platform_get_resource error.\n");
12676 - return -ENODEV;
12677 - }
12678 -
12679 - ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
12680 - if (!ires) {
12681 - pr_err("platform_get_resource IORESOURCE_IRQ error.\n");
12682 - return -ENODEV;
12683 - }
12684 -
12685 - ret = -ENOMEM;
12686 -
12687 - /* initialize udc */
12688 - fotg210 = kzalloc(sizeof(struct fotg210_udc), GFP_KERNEL);
12689 - if (fotg210 == NULL)
12690 - goto err;
12691 -
12692 - for (i = 0; i < FOTG210_MAX_NUM_EP; i++) {
12693 - _ep[i] = kzalloc(sizeof(struct fotg210_ep), GFP_KERNEL);
12694 - if (_ep[i] == NULL)
12695 - goto err_alloc;
12696 - fotg210->ep[i] = _ep[i];
12697 - }
12698 -
12699 - fotg210->reg = ioremap(res->start, resource_size(res));
12700 - if (fotg210->reg == NULL) {
12701 - pr_err("ioremap error.\n");
12702 - goto err_alloc;
12703 - }
12704 -
12705 - spin_lock_init(&fotg210->lock);
12706 -
12707 - platform_set_drvdata(pdev, fotg210);
12708 -
12709 - fotg210->gadget.ops = &fotg210_gadget_ops;
12710 -
12711 - fotg210->gadget.max_speed = USB_SPEED_HIGH;
12712 - fotg210->gadget.dev.parent = &pdev->dev;
12713 - fotg210->gadget.dev.dma_mask = pdev->dev.dma_mask;
12714 - fotg210->gadget.name = udc_name;
12715 -
12716 - INIT_LIST_HEAD(&fotg210->gadget.ep_list);
12717 -
12718 - for (i = 0; i < FOTG210_MAX_NUM_EP; i++) {
12719 - struct fotg210_ep *ep = fotg210->ep[i];
12720 -
12721 - if (i) {
12722 - INIT_LIST_HEAD(&fotg210->ep[i]->ep.ep_list);
12723 - list_add_tail(&fotg210->ep[i]->ep.ep_list,
12724 - &fotg210->gadget.ep_list);
12725 - }
12726 - ep->fotg210 = fotg210;
12727 - INIT_LIST_HEAD(&ep->queue);
12728 - ep->ep.name = fotg210_ep_name[i];
12729 - ep->ep.ops = &fotg210_ep_ops;
12730 - usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
12731 -
12732 - if (i == 0) {
12733 - ep->ep.caps.type_control = true;
12734 - } else {
12735 - ep->ep.caps.type_iso = true;
12736 - ep->ep.caps.type_bulk = true;
12737 - ep->ep.caps.type_int = true;
12738 - }
12739 -
12740 - ep->ep.caps.dir_in = true;
12741 - ep->ep.caps.dir_out = true;
12742 - }
12743 - usb_ep_set_maxpacket_limit(&fotg210->ep[0]->ep, 0x40);
12744 - fotg210->gadget.ep0 = &fotg210->ep[0]->ep;
12745 - INIT_LIST_HEAD(&fotg210->gadget.ep0->ep_list);
12746 -
12747 - fotg210->ep0_req = fotg210_ep_alloc_request(&fotg210->ep[0]->ep,
12748 - GFP_KERNEL);
12749 - if (fotg210->ep0_req == NULL)
12750 - goto err_map;
12751 -
12752 - fotg210->ep0_req->complete = fotg210_ep0_complete;
12753 -
12754 - fotg210_init(fotg210);
12755 -
12756 - fotg210_disable_unplug(fotg210);
12757 -
12758 - ret = request_irq(ires->start, fotg210_irq, IRQF_SHARED,
12759 - udc_name, fotg210);
12760 - if (ret < 0) {
12761 - pr_err("request_irq error (%d)\n", ret);
12762 - goto err_req;
12763 - }
12764 -
12765 - ret = usb_add_gadget_udc(&pdev->dev, &fotg210->gadget);
12766 - if (ret)
12767 - goto err_add_udc;
12768 -
12769 - dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION);
12770 -
12771 - return 0;
12772 -
12773 -err_add_udc:
12774 - free_irq(ires->start, fotg210);
12775 -
12776 -err_req:
12777 - fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req);
12778 -
12779 -err_map:
12780 - iounmap(fotg210->reg);
12781 -
12782 -err_alloc:
12783 - for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
12784 - kfree(fotg210->ep[i]);
12785 - kfree(fotg210);
12786 -
12787 -err:
12788 - return ret;
12789 -}
12790 -
12791 -static struct platform_driver fotg210_driver = {
12792 - .driver = {
12793 - .name = udc_name,
12794 - },
12795 - .probe = fotg210_udc_probe,
12796 - .remove = fotg210_udc_remove,
12797 -};
12798 -
12799 -module_platform_driver(fotg210_driver);
12800 -
12801 -MODULE_AUTHOR("Yuan-Hsin Chen, Feng-Hsin Chiang <john453@faraday-tech.com>");
12802 -MODULE_LICENSE("GPL");
12803 -MODULE_DESCRIPTION(DRIVER_DESC);
12804 --- /dev/null
12805 +++ b/drivers/usb/fotg210/fotg210-udc.c
12806 @@ -0,0 +1,1239 @@
12807 +// SPDX-License-Identifier: GPL-2.0
12808 +/*
12809 + * FOTG210 UDC Driver supports Bulk transfer so far
12810 + *
12811 + * Copyright (C) 2013 Faraday Technology Corporation
12812 + *
12813 + * Author : Yuan-Hsin Chen <yhchen@faraday-tech.com>
12814 + */
12815 +
12816 +#include <linux/dma-mapping.h>
12817 +#include <linux/err.h>
12818 +#include <linux/interrupt.h>
12819 +#include <linux/io.h>
12820 +#include <linux/module.h>
12821 +#include <linux/platform_device.h>
12822 +#include <linux/usb/ch9.h>
12823 +#include <linux/usb/gadget.h>
12824 +
12825 +#include "fotg210-udc.h"
12826 +
12827 +#define DRIVER_DESC "FOTG210 USB Device Controller Driver"
12828 +#define DRIVER_VERSION "30-April-2013"
12829 +
12830 +static const char udc_name[] = "fotg210_udc";
12831 +static const char * const fotg210_ep_name[] = {
12832 + "ep0", "ep1", "ep2", "ep3", "ep4"};
12833 +
12834 +static void fotg210_disable_fifo_int(struct fotg210_ep *ep)
12835 +{
12836 + u32 value = ioread32(ep->fotg210->reg + FOTG210_DMISGR1);
12837 +
12838 + if (ep->dir_in)
12839 + value |= DMISGR1_MF_IN_INT(ep->epnum - 1);
12840 + else
12841 + value |= DMISGR1_MF_OUTSPK_INT(ep->epnum - 1);
12842 + iowrite32(value, ep->fotg210->reg + FOTG210_DMISGR1);
12843 +}
12844 +
12845 +static void fotg210_enable_fifo_int(struct fotg210_ep *ep)
12846 +{
12847 + u32 value = ioread32(ep->fotg210->reg + FOTG210_DMISGR1);
12848 +
12849 + if (ep->dir_in)
12850 + value &= ~DMISGR1_MF_IN_INT(ep->epnum - 1);
12851 + else
12852 + value &= ~DMISGR1_MF_OUTSPK_INT(ep->epnum - 1);
12853 + iowrite32(value, ep->fotg210->reg + FOTG210_DMISGR1);
12854 +}
12855 +
12856 +static void fotg210_set_cxdone(struct fotg210_udc *fotg210)
12857 +{
12858 + u32 value = ioread32(fotg210->reg + FOTG210_DCFESR);
12859 +
12860 + value |= DCFESR_CX_DONE;
12861 + iowrite32(value, fotg210->reg + FOTG210_DCFESR);
12862 +}
12863 +
12864 +static void fotg210_done(struct fotg210_ep *ep, struct fotg210_request *req,
12865 + int status)
12866 +{
12867 + list_del_init(&req->queue);
12868 +
12869 + /* don't modify queue heads during completion callback */
12870 + if (ep->fotg210->gadget.speed == USB_SPEED_UNKNOWN)
12871 + req->req.status = -ESHUTDOWN;
12872 + else
12873 + req->req.status = status;
12874 +
12875 + spin_unlock(&ep->fotg210->lock);
12876 + usb_gadget_giveback_request(&ep->ep, &req->req);
12877 + spin_lock(&ep->fotg210->lock);
12878 +
12879 + if (ep->epnum) {
12880 + if (list_empty(&ep->queue))
12881 + fotg210_disable_fifo_int(ep);
12882 + } else {
12883 + fotg210_set_cxdone(ep->fotg210);
12884 + }
12885 +}
12886 +
12887 +static void fotg210_fifo_ep_mapping(struct fotg210_ep *ep, u32 epnum,
12888 + u32 dir_in)
12889 +{
12890 + struct fotg210_udc *fotg210 = ep->fotg210;
12891 + u32 val;
12892 +
12893 + /* Driver should map an ep to a fifo and then map the fifo
12894 + * to the ep. What a brain-damaged design!
12895 + */
12896 +
12897 + /* map a fifo to an ep */
12898 + val = ioread32(fotg210->reg + FOTG210_EPMAP);
12899 + val &= ~EPMAP_FIFONOMSK(epnum, dir_in);
12900 + val |= EPMAP_FIFONO(epnum, dir_in);
12901 + iowrite32(val, fotg210->reg + FOTG210_EPMAP);
12902 +
12903 + /* map the ep to the fifo */
12904 + val = ioread32(fotg210->reg + FOTG210_FIFOMAP);
12905 + val &= ~FIFOMAP_EPNOMSK(epnum);
12906 + val |= FIFOMAP_EPNO(epnum);
12907 + iowrite32(val, fotg210->reg + FOTG210_FIFOMAP);
12908 +
12909 + /* enable fifo */
12910 + val = ioread32(fotg210->reg + FOTG210_FIFOCF);
12911 + val |= FIFOCF_FIFO_EN(epnum - 1);
12912 + iowrite32(val, fotg210->reg + FOTG210_FIFOCF);
12913 +}
12914 +
12915 +static void fotg210_set_fifo_dir(struct fotg210_ep *ep, u32 epnum, u32 dir_in)
12916 +{
12917 + struct fotg210_udc *fotg210 = ep->fotg210;
12918 + u32 val;
12919 +
12920 + val = ioread32(fotg210->reg + FOTG210_FIFOMAP);
12921 + val |= (dir_in ? FIFOMAP_DIRIN(epnum - 1) : FIFOMAP_DIROUT(epnum - 1));
12922 + iowrite32(val, fotg210->reg + FOTG210_FIFOMAP);
12923 +}
12924 +
12925 +static void fotg210_set_tfrtype(struct fotg210_ep *ep, u32 epnum, u32 type)
12926 +{
12927 + struct fotg210_udc *fotg210 = ep->fotg210;
12928 + u32 val;
12929 +
12930 + val = ioread32(fotg210->reg + FOTG210_FIFOCF);
12931 + val |= FIFOCF_TYPE(type, epnum - 1);
12932 + iowrite32(val, fotg210->reg + FOTG210_FIFOCF);
12933 +}
12934 +
12935 +static void fotg210_set_mps(struct fotg210_ep *ep, u32 epnum, u32 mps,
12936 + u32 dir_in)
12937 +{
12938 + struct fotg210_udc *fotg210 = ep->fotg210;
12939 + u32 val;
12940 + u32 offset = dir_in ? FOTG210_INEPMPSR(epnum) :
12941 + FOTG210_OUTEPMPSR(epnum);
12942 +
12943 + val = ioread32(fotg210->reg + offset);
12944 + val |= INOUTEPMPSR_MPS(mps);
12945 + iowrite32(val, fotg210->reg + offset);
12946 +}
12947 +
12948 +static int fotg210_config_ep(struct fotg210_ep *ep,
12949 + const struct usb_endpoint_descriptor *desc)
12950 +{
12951 + struct fotg210_udc *fotg210 = ep->fotg210;
12952 +
12953 + fotg210_set_fifo_dir(ep, ep->epnum, ep->dir_in);
12954 + fotg210_set_tfrtype(ep, ep->epnum, ep->type);
12955 + fotg210_set_mps(ep, ep->epnum, ep->ep.maxpacket, ep->dir_in);
12956 + fotg210_fifo_ep_mapping(ep, ep->epnum, ep->dir_in);
12957 +
12958 + fotg210->ep[ep->epnum] = ep;
12959 +
12960 + return 0;
12961 +}
12962 +
12963 +static int fotg210_ep_enable(struct usb_ep *_ep,
12964 + const struct usb_endpoint_descriptor *desc)
12965 +{
12966 + struct fotg210_ep *ep;
12967 +
12968 + ep = container_of(_ep, struct fotg210_ep, ep);
12969 +
12970 + ep->desc = desc;
12971 + ep->epnum = usb_endpoint_num(desc);
12972 + ep->type = usb_endpoint_type(desc);
12973 + ep->dir_in = usb_endpoint_dir_in(desc);
12974 + ep->ep.maxpacket = usb_endpoint_maxp(desc);
12975 +
12976 + return fotg210_config_ep(ep, desc);
12977 +}
12978 +
12979 +static void fotg210_reset_tseq(struct fotg210_udc *fotg210, u8 epnum)
12980 +{
12981 + struct fotg210_ep *ep = fotg210->ep[epnum];
12982 + u32 value;
12983 + void __iomem *reg;
12984 +
12985 + reg = (ep->dir_in) ?
12986 + fotg210->reg + FOTG210_INEPMPSR(epnum) :
12987 + fotg210->reg + FOTG210_OUTEPMPSR(epnum);
12988 +
12989 + /* Note: Driver needs to set and clear INOUTEPMPSR_RESET_TSEQ
12990 + * bit. Controller wouldn't clear this bit. WTF!!!
12991 + */
12992 +
12993 + value = ioread32(reg);
12994 + value |= INOUTEPMPSR_RESET_TSEQ;
12995 + iowrite32(value, reg);
12996 +
12997 + value = ioread32(reg);
12998 + value &= ~INOUTEPMPSR_RESET_TSEQ;
12999 + iowrite32(value, reg);
13000 +}
13001 +
13002 +static int fotg210_ep_release(struct fotg210_ep *ep)
13003 +{
13004 + if (!ep->epnum)
13005 + return 0;
13006 + ep->epnum = 0;
13007 + ep->stall = 0;
13008 + ep->wedged = 0;
13009 +
13010 + fotg210_reset_tseq(ep->fotg210, ep->epnum);
13011 +
13012 + return 0;
13013 +}
13014 +
13015 +static int fotg210_ep_disable(struct usb_ep *_ep)
13016 +{
13017 + struct fotg210_ep *ep;
13018 + struct fotg210_request *req;
13019 + unsigned long flags;
13020 +
13021 + BUG_ON(!_ep);
13022 +
13023 + ep = container_of(_ep, struct fotg210_ep, ep);
13024 +
13025 + while (!list_empty(&ep->queue)) {
13026 + req = list_entry(ep->queue.next,
13027 + struct fotg210_request, queue);
13028 + spin_lock_irqsave(&ep->fotg210->lock, flags);
13029 + fotg210_done(ep, req, -ECONNRESET);
13030 + spin_unlock_irqrestore(&ep->fotg210->lock, flags);
13031 + }
13032 +
13033 + return fotg210_ep_release(ep);
13034 +}
13035 +
13036 +static struct usb_request *fotg210_ep_alloc_request(struct usb_ep *_ep,
13037 + gfp_t gfp_flags)
13038 +{
13039 + struct fotg210_request *req;
13040 +
13041 + req = kzalloc(sizeof(struct fotg210_request), gfp_flags);
13042 + if (!req)
13043 + return NULL;
13044 +
13045 + INIT_LIST_HEAD(&req->queue);
13046 +
13047 + return &req->req;
13048 +}
13049 +
13050 +static void fotg210_ep_free_request(struct usb_ep *_ep,
13051 + struct usb_request *_req)
13052 +{
13053 + struct fotg210_request *req;
13054 +
13055 + req = container_of(_req, struct fotg210_request, req);
13056 + kfree(req);
13057 +}
13058 +
13059 +static void fotg210_enable_dma(struct fotg210_ep *ep,
13060 + dma_addr_t d, u32 len)
13061 +{
13062 + u32 value;
13063 + struct fotg210_udc *fotg210 = ep->fotg210;
13064 +
13065 + /* set transfer length and direction */
13066 + value = ioread32(fotg210->reg + FOTG210_DMACPSR1);
13067 + value &= ~(DMACPSR1_DMA_LEN(0xFFFF) | DMACPSR1_DMA_TYPE(1));
13068 + value |= DMACPSR1_DMA_LEN(len) | DMACPSR1_DMA_TYPE(ep->dir_in);
13069 + iowrite32(value, fotg210->reg + FOTG210_DMACPSR1);
13070 +
13071 + /* set device DMA target FIFO number */
13072 + value = ioread32(fotg210->reg + FOTG210_DMATFNR);
13073 + if (ep->epnum)
13074 + value |= DMATFNR_ACC_FN(ep->epnum - 1);
13075 + else
13076 + value |= DMATFNR_ACC_CXF;
13077 + iowrite32(value, fotg210->reg + FOTG210_DMATFNR);
13078 +
13079 + /* set DMA memory address */
13080 + iowrite32(d, fotg210->reg + FOTG210_DMACPSR2);
13081 +
13082 + /* enable MDMA_EROR and MDMA_CMPLT interrupt */
13083 + value = ioread32(fotg210->reg + FOTG210_DMISGR2);
13084 + value &= ~(DMISGR2_MDMA_CMPLT | DMISGR2_MDMA_ERROR);
13085 + iowrite32(value, fotg210->reg + FOTG210_DMISGR2);
13086 +
13087 + /* start DMA */
13088 + value = ioread32(fotg210->reg + FOTG210_DMACPSR1);
13089 + value |= DMACPSR1_DMA_START;
13090 + iowrite32(value, fotg210->reg + FOTG210_DMACPSR1);
13091 +}
13092 +
13093 +static void fotg210_disable_dma(struct fotg210_ep *ep)
13094 +{
13095 + iowrite32(DMATFNR_DISDMA, ep->fotg210->reg + FOTG210_DMATFNR);
13096 +}
13097 +
13098 +static void fotg210_wait_dma_done(struct fotg210_ep *ep)
13099 +{
13100 + u32 value;
13101 +
13102 + do {
13103 + value = ioread32(ep->fotg210->reg + FOTG210_DISGR2);
13104 + if ((value & DISGR2_USBRST_INT) ||
13105 + (value & DISGR2_DMA_ERROR))
13106 + goto dma_reset;
13107 + } while (!(value & DISGR2_DMA_CMPLT));
13108 +
13109 + value &= ~DISGR2_DMA_CMPLT;
13110 + iowrite32(value, ep->fotg210->reg + FOTG210_DISGR2);
13111 + return;
13112 +
13113 +dma_reset:
13114 + value = ioread32(ep->fotg210->reg + FOTG210_DMACPSR1);
13115 + value |= DMACPSR1_DMA_ABORT;
13116 + iowrite32(value, ep->fotg210->reg + FOTG210_DMACPSR1);
13117 +
13118 + /* reset fifo */
13119 + if (ep->epnum) {
13120 + value = ioread32(ep->fotg210->reg +
13121 + FOTG210_FIBCR(ep->epnum - 1));
13122 + value |= FIBCR_FFRST;
13123 + iowrite32(value, ep->fotg210->reg +
13124 + FOTG210_FIBCR(ep->epnum - 1));
13125 + } else {
13126 + value = ioread32(ep->fotg210->reg + FOTG210_DCFESR);
13127 + value |= DCFESR_CX_CLR;
13128 + iowrite32(value, ep->fotg210->reg + FOTG210_DCFESR);
13129 + }
13130 +}
13131 +
13132 +static void fotg210_start_dma(struct fotg210_ep *ep,
13133 + struct fotg210_request *req)
13134 +{
13135 + struct device *dev = &ep->fotg210->gadget.dev;
13136 + dma_addr_t d;
13137 + u8 *buffer;
13138 + u32 length;
13139 +
13140 + if (ep->epnum) {
13141 + if (ep->dir_in) {
13142 + buffer = req->req.buf;
13143 + length = req->req.length;
13144 + } else {
13145 + buffer = req->req.buf + req->req.actual;
13146 + length = ioread32(ep->fotg210->reg +
13147 + FOTG210_FIBCR(ep->epnum - 1)) & FIBCR_BCFX;
13148 + if (length > req->req.length - req->req.actual)
13149 + length = req->req.length - req->req.actual;
13150 + }
13151 + } else {
13152 + buffer = req->req.buf + req->req.actual;
13153 + if (req->req.length - req->req.actual > ep->ep.maxpacket)
13154 + length = ep->ep.maxpacket;
13155 + else
13156 + length = req->req.length - req->req.actual;
13157 + }
13158 +
13159 + d = dma_map_single(dev, buffer, length,
13160 + ep->dir_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
13161 +
13162 + if (dma_mapping_error(dev, d)) {
13163 + pr_err("dma_mapping_error\n");
13164 + return;
13165 + }
13166 +
13167 + fotg210_enable_dma(ep, d, length);
13168 +
13169 + /* check if dma is done */
13170 + fotg210_wait_dma_done(ep);
13171 +
13172 + fotg210_disable_dma(ep);
13173 +
13174 + /* update actual transfer length */
13175 + req->req.actual += length;
13176 +
13177 + dma_unmap_single(dev, d, length, DMA_TO_DEVICE);
13178 +}
13179 +
13180 +static void fotg210_ep0_queue(struct fotg210_ep *ep,
13181 + struct fotg210_request *req)
13182 +{
13183 + if (!req->req.length) {
13184 + fotg210_done(ep, req, 0);
13185 + return;
13186 + }
13187 + if (ep->dir_in) { /* if IN */
13188 + fotg210_start_dma(ep, req);
13189 + if (req->req.length == req->req.actual)
13190 + fotg210_done(ep, req, 0);
13191 + } else { /* OUT */
13192 + u32 value = ioread32(ep->fotg210->reg + FOTG210_DMISGR0);
13193 +
13194 + value &= ~DMISGR0_MCX_OUT_INT;
13195 + iowrite32(value, ep->fotg210->reg + FOTG210_DMISGR0);
13196 + }
13197 +}
13198 +
13199 +static int fotg210_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
13200 + gfp_t gfp_flags)
13201 +{
13202 + struct fotg210_ep *ep;
13203 + struct fotg210_request *req;
13204 + unsigned long flags;
13205 + int request = 0;
13206 +
13207 + ep = container_of(_ep, struct fotg210_ep, ep);
13208 + req = container_of(_req, struct fotg210_request, req);
13209 +
13210 + if (ep->fotg210->gadget.speed == USB_SPEED_UNKNOWN)
13211 + return -ESHUTDOWN;
13212 +
13213 + spin_lock_irqsave(&ep->fotg210->lock, flags);
13214 +
13215 + if (list_empty(&ep->queue))
13216 + request = 1;
13217 +
13218 + list_add_tail(&req->queue, &ep->queue);
13219 +
13220 + req->req.actual = 0;
13221 + req->req.status = -EINPROGRESS;
13222 +
13223 + if (!ep->epnum) /* ep0 */
13224 + fotg210_ep0_queue(ep, req);
13225 + else if (request && !ep->stall)
13226 + fotg210_enable_fifo_int(ep);
13227 +
13228 + spin_unlock_irqrestore(&ep->fotg210->lock, flags);
13229 +
13230 + return 0;
13231 +}
13232 +
13233 +static int fotg210_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
13234 +{
13235 + struct fotg210_ep *ep;
13236 + struct fotg210_request *req;
13237 + unsigned long flags;
13238 +
13239 + ep = container_of(_ep, struct fotg210_ep, ep);
13240 + req = container_of(_req, struct fotg210_request, req);
13241 +
13242 + spin_lock_irqsave(&ep->fotg210->lock, flags);
13243 + if (!list_empty(&ep->queue))
13244 + fotg210_done(ep, req, -ECONNRESET);
13245 + spin_unlock_irqrestore(&ep->fotg210->lock, flags);
13246 +
13247 + return 0;
13248 +}
13249 +
13250 +static void fotg210_set_epnstall(struct fotg210_ep *ep)
13251 +{
13252 + struct fotg210_udc *fotg210 = ep->fotg210;
13253 + u32 value;
13254 + void __iomem *reg;
13255 +
13256 + /* check if IN FIFO is empty before stall */
13257 + if (ep->dir_in) {
13258 + do {
13259 + value = ioread32(fotg210->reg + FOTG210_DCFESR);
13260 + } while (!(value & DCFESR_FIFO_EMPTY(ep->epnum - 1)));
13261 + }
13262 +
13263 + reg = (ep->dir_in) ?
13264 + fotg210->reg + FOTG210_INEPMPSR(ep->epnum) :
13265 + fotg210->reg + FOTG210_OUTEPMPSR(ep->epnum);
13266 + value = ioread32(reg);
13267 + value |= INOUTEPMPSR_STL_EP;
13268 + iowrite32(value, reg);
13269 +}
13270 +
13271 +static void fotg210_clear_epnstall(struct fotg210_ep *ep)
13272 +{
13273 + struct fotg210_udc *fotg210 = ep->fotg210;
13274 + u32 value;
13275 + void __iomem *reg;
13276 +
13277 + reg = (ep->dir_in) ?
13278 + fotg210->reg + FOTG210_INEPMPSR(ep->epnum) :
13279 + fotg210->reg + FOTG210_OUTEPMPSR(ep->epnum);
13280 + value = ioread32(reg);
13281 + value &= ~INOUTEPMPSR_STL_EP;
13282 + iowrite32(value, reg);
13283 +}
13284 +
13285 +static int fotg210_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedge)
13286 +{
13287 + struct fotg210_ep *ep;
13288 + struct fotg210_udc *fotg210;
13289 + unsigned long flags;
13290 +
13291 + ep = container_of(_ep, struct fotg210_ep, ep);
13292 +
13293 + fotg210 = ep->fotg210;
13294 +
13295 + spin_lock_irqsave(&ep->fotg210->lock, flags);
13296 +
13297 + if (value) {
13298 + fotg210_set_epnstall(ep);
13299 + ep->stall = 1;
13300 + if (wedge)
13301 + ep->wedged = 1;
13302 + } else {
13303 + fotg210_reset_tseq(fotg210, ep->epnum);
13304 + fotg210_clear_epnstall(ep);
13305 + ep->stall = 0;
13306 + ep->wedged = 0;
13307 + if (!list_empty(&ep->queue))
13308 + fotg210_enable_fifo_int(ep);
13309 + }
13310 +
13311 + spin_unlock_irqrestore(&ep->fotg210->lock, flags);
13312 + return 0;
13313 +}
13314 +
13315 +static int fotg210_ep_set_halt(struct usb_ep *_ep, int value)
13316 +{
13317 + return fotg210_set_halt_and_wedge(_ep, value, 0);
13318 +}
13319 +
13320 +static int fotg210_ep_set_wedge(struct usb_ep *_ep)
13321 +{
13322 + return fotg210_set_halt_and_wedge(_ep, 1, 1);
13323 +}
13324 +
13325 +static void fotg210_ep_fifo_flush(struct usb_ep *_ep)
13326 +{
13327 +}
13328 +
13329 +static const struct usb_ep_ops fotg210_ep_ops = {
13330 + .enable = fotg210_ep_enable,
13331 + .disable = fotg210_ep_disable,
13332 +
13333 + .alloc_request = fotg210_ep_alloc_request,
13334 + .free_request = fotg210_ep_free_request,
13335 +
13336 + .queue = fotg210_ep_queue,
13337 + .dequeue = fotg210_ep_dequeue,
13338 +
13339 + .set_halt = fotg210_ep_set_halt,
13340 + .fifo_flush = fotg210_ep_fifo_flush,
13341 + .set_wedge = fotg210_ep_set_wedge,
13342 +};
13343 +
13344 +static void fotg210_clear_tx0byte(struct fotg210_udc *fotg210)
13345 +{
13346 + u32 value = ioread32(fotg210->reg + FOTG210_TX0BYTE);
13347 +
13348 + value &= ~(TX0BYTE_EP1 | TX0BYTE_EP2 | TX0BYTE_EP3
13349 + | TX0BYTE_EP4);
13350 + iowrite32(value, fotg210->reg + FOTG210_TX0BYTE);
13351 +}
13352 +
13353 +static void fotg210_clear_rx0byte(struct fotg210_udc *fotg210)
13354 +{
13355 + u32 value = ioread32(fotg210->reg + FOTG210_RX0BYTE);
13356 +
13357 + value &= ~(RX0BYTE_EP1 | RX0BYTE_EP2 | RX0BYTE_EP3
13358 + | RX0BYTE_EP4);
13359 + iowrite32(value, fotg210->reg + FOTG210_RX0BYTE);
13360 +}
13361 +
13362 +/* read 8-byte setup packet only */
13363 +static void fotg210_rdsetupp(struct fotg210_udc *fotg210,
13364 + u8 *buffer)
13365 +{
13366 + int i = 0;
13367 + u8 *tmp = buffer;
13368 + u32 data;
13369 + u32 length = 8;
13370 +
13371 + iowrite32(DMATFNR_ACC_CXF, fotg210->reg + FOTG210_DMATFNR);
13372 +
13373 + for (i = (length >> 2); i > 0; i--) {
13374 + data = ioread32(fotg210->reg + FOTG210_CXPORT);
13375 + *tmp = data & 0xFF;
13376 + *(tmp + 1) = (data >> 8) & 0xFF;
13377 + *(tmp + 2) = (data >> 16) & 0xFF;
13378 + *(tmp + 3) = (data >> 24) & 0xFF;
13379 + tmp = tmp + 4;
13380 + }
13381 +
13382 + switch (length % 4) {
13383 + case 1:
13384 + data = ioread32(fotg210->reg + FOTG210_CXPORT);
13385 + *tmp = data & 0xFF;
13386 + break;
13387 + case 2:
13388 + data = ioread32(fotg210->reg + FOTG210_CXPORT);
13389 + *tmp = data & 0xFF;
13390 + *(tmp + 1) = (data >> 8) & 0xFF;
13391 + break;
13392 + case 3:
13393 + data = ioread32(fotg210->reg + FOTG210_CXPORT);
13394 + *tmp = data & 0xFF;
13395 + *(tmp + 1) = (data >> 8) & 0xFF;
13396 + *(tmp + 2) = (data >> 16) & 0xFF;
13397 + break;
13398 + default:
13399 + break;
13400 + }
13401 +
13402 + iowrite32(DMATFNR_DISDMA, fotg210->reg + FOTG210_DMATFNR);
13403 +}
13404 +
13405 +static void fotg210_set_configuration(struct fotg210_udc *fotg210)
13406 +{
13407 + u32 value = ioread32(fotg210->reg + FOTG210_DAR);
13408 +
13409 + value |= DAR_AFT_CONF;
13410 + iowrite32(value, fotg210->reg + FOTG210_DAR);
13411 +}
13412 +
13413 +static void fotg210_set_dev_addr(struct fotg210_udc *fotg210, u32 addr)
13414 +{
13415 + u32 value = ioread32(fotg210->reg + FOTG210_DAR);
13416 +
13417 + value |= (addr & 0x7F);
13418 + iowrite32(value, fotg210->reg + FOTG210_DAR);
13419 +}
13420 +
13421 +static void fotg210_set_cxstall(struct fotg210_udc *fotg210)
13422 +{
13423 + u32 value = ioread32(fotg210->reg + FOTG210_DCFESR);
13424 +
13425 + value |= DCFESR_CX_STL;
13426 + iowrite32(value, fotg210->reg + FOTG210_DCFESR);
13427 +}
13428 +
13429 +static void fotg210_request_error(struct fotg210_udc *fotg210)
13430 +{
13431 + fotg210_set_cxstall(fotg210);
13432 + pr_err("request error!!\n");
13433 +}
13434 +
13435 +static void fotg210_set_address(struct fotg210_udc *fotg210,
13436 + struct usb_ctrlrequest *ctrl)
13437 +{
13438 + if (le16_to_cpu(ctrl->wValue) >= 0x0100) {
13439 + fotg210_request_error(fotg210);
13440 + } else {
13441 + fotg210_set_dev_addr(fotg210, le16_to_cpu(ctrl->wValue));
13442 + fotg210_set_cxdone(fotg210);
13443 + }
13444 +}
13445 +
13446 +static void fotg210_set_feature(struct fotg210_udc *fotg210,
13447 + struct usb_ctrlrequest *ctrl)
13448 +{
13449 + switch (ctrl->bRequestType & USB_RECIP_MASK) {
13450 + case USB_RECIP_DEVICE:
13451 + fotg210_set_cxdone(fotg210);
13452 + break;
13453 + case USB_RECIP_INTERFACE:
13454 + fotg210_set_cxdone(fotg210);
13455 + break;
13456 + case USB_RECIP_ENDPOINT: {
13457 + u8 epnum;
13458 + epnum = le16_to_cpu(ctrl->wIndex) & USB_ENDPOINT_NUMBER_MASK;
13459 + if (epnum)
13460 + fotg210_set_epnstall(fotg210->ep[epnum]);
13461 + else
13462 + fotg210_set_cxstall(fotg210);
13463 + fotg210_set_cxdone(fotg210);
13464 + }
13465 + break;
13466 + default:
13467 + fotg210_request_error(fotg210);
13468 + break;
13469 + }
13470 +}
13471 +
13472 +static void fotg210_clear_feature(struct fotg210_udc *fotg210,
13473 + struct usb_ctrlrequest *ctrl)
13474 +{
13475 + struct fotg210_ep *ep =
13476 + fotg210->ep[ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK];
13477 +
13478 + switch (ctrl->bRequestType & USB_RECIP_MASK) {
13479 + case USB_RECIP_DEVICE:
13480 + fotg210_set_cxdone(fotg210);
13481 + break;
13482 + case USB_RECIP_INTERFACE:
13483 + fotg210_set_cxdone(fotg210);
13484 + break;
13485 + case USB_RECIP_ENDPOINT:
13486 + if (ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK) {
13487 + if (ep->wedged) {
13488 + fotg210_set_cxdone(fotg210);
13489 + break;
13490 + }
13491 + if (ep->stall)
13492 + fotg210_set_halt_and_wedge(&ep->ep, 0, 0);
13493 + }
13494 + fotg210_set_cxdone(fotg210);
13495 + break;
13496 + default:
13497 + fotg210_request_error(fotg210);
13498 + break;
13499 + }
13500 +}
13501 +
13502 +static int fotg210_is_epnstall(struct fotg210_ep *ep)
13503 +{
13504 + struct fotg210_udc *fotg210 = ep->fotg210;
13505 + u32 value;
13506 + void __iomem *reg;
13507 +
13508 + reg = (ep->dir_in) ?
13509 + fotg210->reg + FOTG210_INEPMPSR(ep->epnum) :
13510 + fotg210->reg + FOTG210_OUTEPMPSR(ep->epnum);
13511 + value = ioread32(reg);
13512 + return value & INOUTEPMPSR_STL_EP ? 1 : 0;
13513 +}
13514 +
13515 +/* For EP0 requests triggered by this driver (currently GET_STATUS response) */
13516 +static void fotg210_ep0_complete(struct usb_ep *_ep, struct usb_request *req)
13517 +{
13518 + struct fotg210_ep *ep;
13519 + struct fotg210_udc *fotg210;
13520 +
13521 + ep = container_of(_ep, struct fotg210_ep, ep);
13522 + fotg210 = ep->fotg210;
13523 +
13524 + if (req->status || req->actual != req->length) {
13525 + dev_warn(&fotg210->gadget.dev, "EP0 request failed: %d\n", req->status);
13526 + }
13527 +}
13528 +
13529 +static void fotg210_get_status(struct fotg210_udc *fotg210,
13530 + struct usb_ctrlrequest *ctrl)
13531 +{
13532 + u8 epnum;
13533 +
13534 + switch (ctrl->bRequestType & USB_RECIP_MASK) {
13535 + case USB_RECIP_DEVICE:
13536 + fotg210->ep0_data = cpu_to_le16(1 << USB_DEVICE_SELF_POWERED);
13537 + break;
13538 + case USB_RECIP_INTERFACE:
13539 + fotg210->ep0_data = cpu_to_le16(0);
13540 + break;
13541 + case USB_RECIP_ENDPOINT:
13542 + epnum = ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK;
13543 + if (epnum)
13544 + fotg210->ep0_data =
13545 + cpu_to_le16(fotg210_is_epnstall(fotg210->ep[epnum])
13546 + << USB_ENDPOINT_HALT);
13547 + else
13548 + fotg210_request_error(fotg210);
13549 + break;
13550 +
13551 + default:
13552 + fotg210_request_error(fotg210);
13553 + return; /* exit */
13554 + }
13555 +
13556 + fotg210->ep0_req->buf = &fotg210->ep0_data;
13557 + fotg210->ep0_req->length = 2;
13558 +
13559 + spin_unlock(&fotg210->lock);
13560 + fotg210_ep_queue(fotg210->gadget.ep0, fotg210->ep0_req, GFP_ATOMIC);
13561 + spin_lock(&fotg210->lock);
13562 +}
13563 +
13564 +static int fotg210_setup_packet(struct fotg210_udc *fotg210,
13565 + struct usb_ctrlrequest *ctrl)
13566 +{
13567 + u8 *p = (u8 *)ctrl;
13568 + u8 ret = 0;
13569 +
13570 + fotg210_rdsetupp(fotg210, p);
13571 +
13572 + fotg210->ep[0]->dir_in = ctrl->bRequestType & USB_DIR_IN;
13573 +
13574 + if (fotg210->gadget.speed == USB_SPEED_UNKNOWN) {
13575 + u32 value = ioread32(fotg210->reg + FOTG210_DMCR);
13576 + fotg210->gadget.speed = value & DMCR_HS_EN ?
13577 + USB_SPEED_HIGH : USB_SPEED_FULL;
13578 + }
13579 +
13580 + /* check request */
13581 + if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
13582 + switch (ctrl->bRequest) {
13583 + case USB_REQ_GET_STATUS:
13584 + fotg210_get_status(fotg210, ctrl);
13585 + break;
13586 + case USB_REQ_CLEAR_FEATURE:
13587 + fotg210_clear_feature(fotg210, ctrl);
13588 + break;
13589 + case USB_REQ_SET_FEATURE:
13590 + fotg210_set_feature(fotg210, ctrl);
13591 + break;
13592 + case USB_REQ_SET_ADDRESS:
13593 + fotg210_set_address(fotg210, ctrl);
13594 + break;
13595 + case USB_REQ_SET_CONFIGURATION:
13596 + fotg210_set_configuration(fotg210);
13597 + ret = 1;
13598 + break;
13599 + default:
13600 + ret = 1;
13601 + break;
13602 + }
13603 + } else {
13604 + ret = 1;
13605 + }
13606 +
13607 + return ret;
13608 +}
13609 +
13610 +static void fotg210_ep0out(struct fotg210_udc *fotg210)
13611 +{
13612 + struct fotg210_ep *ep = fotg210->ep[0];
13613 +
13614 + if (!list_empty(&ep->queue) && !ep->dir_in) {
13615 + struct fotg210_request *req;
13616 +
13617 + req = list_first_entry(&ep->queue,
13618 + struct fotg210_request, queue);
13619 +
13620 + if (req->req.length)
13621 + fotg210_start_dma(ep, req);
13622 +
13623 + if ((req->req.length - req->req.actual) < ep->ep.maxpacket)
13624 + fotg210_done(ep, req, 0);
13625 + } else {
13626 + pr_err("%s : empty queue\n", __func__);
13627 + }
13628 +}
13629 +
13630 +static void fotg210_ep0in(struct fotg210_udc *fotg210)
13631 +{
13632 + struct fotg210_ep *ep = fotg210->ep[0];
13633 +
13634 + if ((!list_empty(&ep->queue)) && (ep->dir_in)) {
13635 + struct fotg210_request *req;
13636 +
13637 + req = list_entry(ep->queue.next,
13638 + struct fotg210_request, queue);
13639 +
13640 + if (req->req.length)
13641 + fotg210_start_dma(ep, req);
13642 +
13643 + if (req->req.actual == req->req.length)
13644 + fotg210_done(ep, req, 0);
13645 + } else {
13646 + fotg210_set_cxdone(fotg210);
13647 + }
13648 +}
13649 +
13650 +static void fotg210_clear_comabt_int(struct fotg210_udc *fotg210)
13651 +{
13652 + u32 value = ioread32(fotg210->reg + FOTG210_DISGR0);
13653 +
13654 + value &= ~DISGR0_CX_COMABT_INT;
13655 + iowrite32(value, fotg210->reg + FOTG210_DISGR0);
13656 +}
13657 +
13658 +static void fotg210_in_fifo_handler(struct fotg210_ep *ep)
13659 +{
13660 + struct fotg210_request *req = list_entry(ep->queue.next,
13661 + struct fotg210_request, queue);
13662 +
13663 + if (req->req.length)
13664 + fotg210_start_dma(ep, req);
13665 + fotg210_done(ep, req, 0);
13666 +}
13667 +
13668 +static void fotg210_out_fifo_handler(struct fotg210_ep *ep)
13669 +{
13670 + struct fotg210_request *req = list_entry(ep->queue.next,
13671 + struct fotg210_request, queue);
13672 + int disgr1 = ioread32(ep->fotg210->reg + FOTG210_DISGR1);
13673 +
13674 + fotg210_start_dma(ep, req);
13675 +
13676 + /* Complete the request when it's full or a short packet arrived.
13677 + * Like other drivers, short_not_ok isn't handled.
13678 + */
13679 +
13680 + if (req->req.length == req->req.actual ||
13681 + (disgr1 & DISGR1_SPK_INT(ep->epnum - 1)))
13682 + fotg210_done(ep, req, 0);
13683 +}
13684 +
13685 +static irqreturn_t fotg210_irq(int irq, void *_fotg210)
13686 +{
13687 + struct fotg210_udc *fotg210 = _fotg210;
13688 + u32 int_grp = ioread32(fotg210->reg + FOTG210_DIGR);
13689 + u32 int_msk = ioread32(fotg210->reg + FOTG210_DMIGR);
13690 +
13691 + int_grp &= ~int_msk;
13692 +
13693 + spin_lock(&fotg210->lock);
13694 +
13695 + if (int_grp & DIGR_INT_G2) {
13696 + void __iomem *reg = fotg210->reg + FOTG210_DISGR2;
13697 + u32 int_grp2 = ioread32(reg);
13698 + u32 int_msk2 = ioread32(fotg210->reg + FOTG210_DMISGR2);
13699 + u32 value;
13700 +
13701 + int_grp2 &= ~int_msk2;
13702 +
13703 + if (int_grp2 & DISGR2_USBRST_INT) {
13704 + usb_gadget_udc_reset(&fotg210->gadget,
13705 + fotg210->driver);
13706 + value = ioread32(reg);
13707 + value &= ~DISGR2_USBRST_INT;
13708 + iowrite32(value, reg);
13709 + pr_info("fotg210 udc reset\n");
13710 + }
13711 + if (int_grp2 & DISGR2_SUSP_INT) {
13712 + value = ioread32(reg);
13713 + value &= ~DISGR2_SUSP_INT;
13714 + iowrite32(value, reg);
13715 + pr_info("fotg210 udc suspend\n");
13716 + }
13717 + if (int_grp2 & DISGR2_RESM_INT) {
13718 + value = ioread32(reg);
13719 + value &= ~DISGR2_RESM_INT;
13720 + iowrite32(value, reg);
13721 + pr_info("fotg210 udc resume\n");
13722 + }
13723 + if (int_grp2 & DISGR2_ISO_SEQ_ERR_INT) {
13724 + value = ioread32(reg);
13725 + value &= ~DISGR2_ISO_SEQ_ERR_INT;
13726 + iowrite32(value, reg);
13727 + pr_info("fotg210 iso sequence error\n");
13728 + }
13729 + if (int_grp2 & DISGR2_ISO_SEQ_ABORT_INT) {
13730 + value = ioread32(reg);
13731 + value &= ~DISGR2_ISO_SEQ_ABORT_INT;
13732 + iowrite32(value, reg);
13733 + pr_info("fotg210 iso sequence abort\n");
13734 + }
13735 + if (int_grp2 & DISGR2_TX0BYTE_INT) {
13736 + fotg210_clear_tx0byte(fotg210);
13737 + value = ioread32(reg);
13738 + value &= ~DISGR2_TX0BYTE_INT;
13739 + iowrite32(value, reg);
13740 + pr_info("fotg210 transferred 0 byte\n");
13741 + }
13742 + if (int_grp2 & DISGR2_RX0BYTE_INT) {
13743 + fotg210_clear_rx0byte(fotg210);
13744 + value = ioread32(reg);
13745 + value &= ~DISGR2_RX0BYTE_INT;
13746 + iowrite32(value, reg);
13747 + pr_info("fotg210 received 0 byte\n");
13748 + }
13749 + if (int_grp2 & DISGR2_DMA_ERROR) {
13750 + value = ioread32(reg);
13751 + value &= ~DISGR2_DMA_ERROR;
13752 + iowrite32(value, reg);
13753 + }
13754 + }
13755 +
13756 + if (int_grp & DIGR_INT_G0) {
13757 + void __iomem *reg = fotg210->reg + FOTG210_DISGR0;
13758 + u32 int_grp0 = ioread32(reg);
13759 + u32 int_msk0 = ioread32(fotg210->reg + FOTG210_DMISGR0);
13760 + struct usb_ctrlrequest ctrl;
13761 +
13762 + int_grp0 &= ~int_msk0;
13763 +
13764 + /* the highest priority in this source register */
13765 + if (int_grp0 & DISGR0_CX_COMABT_INT) {
13766 + fotg210_clear_comabt_int(fotg210);
13767 + pr_info("fotg210 CX command abort\n");
13768 + }
13769 +
13770 + if (int_grp0 & DISGR0_CX_SETUP_INT) {
13771 + if (fotg210_setup_packet(fotg210, &ctrl)) {
13772 + spin_unlock(&fotg210->lock);
13773 + if (fotg210->driver->setup(&fotg210->gadget,
13774 + &ctrl) < 0)
13775 + fotg210_set_cxstall(fotg210);
13776 + spin_lock(&fotg210->lock);
13777 + }
13778 + }
13779 + if (int_grp0 & DISGR0_CX_COMEND_INT)
13780 + pr_info("fotg210 cmd end\n");
13781 +
13782 + if (int_grp0 & DISGR0_CX_IN_INT)
13783 + fotg210_ep0in(fotg210);
13784 +
13785 + if (int_grp0 & DISGR0_CX_OUT_INT)
13786 + fotg210_ep0out(fotg210);
13787 +
13788 + if (int_grp0 & DISGR0_CX_COMFAIL_INT) {
13789 + fotg210_set_cxstall(fotg210);
13790 + pr_info("fotg210 ep0 fail\n");
13791 + }
13792 + }
13793 +
13794 + if (int_grp & DIGR_INT_G1) {
13795 + void __iomem *reg = fotg210->reg + FOTG210_DISGR1;
13796 + u32 int_grp1 = ioread32(reg);
13797 + u32 int_msk1 = ioread32(fotg210->reg + FOTG210_DMISGR1);
13798 + int fifo;
13799 +
13800 + int_grp1 &= ~int_msk1;
13801 +
13802 + for (fifo = 0; fifo < FOTG210_MAX_FIFO_NUM; fifo++) {
13803 + if (int_grp1 & DISGR1_IN_INT(fifo))
13804 + fotg210_in_fifo_handler(fotg210->ep[fifo + 1]);
13805 +
13806 + if ((int_grp1 & DISGR1_OUT_INT(fifo)) ||
13807 + (int_grp1 & DISGR1_SPK_INT(fifo)))
13808 + fotg210_out_fifo_handler(fotg210->ep[fifo + 1]);
13809 + }
13810 + }
13811 +
13812 + spin_unlock(&fotg210->lock);
13813 +
13814 + return IRQ_HANDLED;
13815 +}
13816 +
13817 +static void fotg210_disable_unplug(struct fotg210_udc *fotg210)
13818 +{
13819 + u32 reg = ioread32(fotg210->reg + FOTG210_PHYTMSR);
13820 +
13821 + reg &= ~PHYTMSR_UNPLUG;
13822 + iowrite32(reg, fotg210->reg + FOTG210_PHYTMSR);
13823 +}
13824 +
13825 +static int fotg210_udc_start(struct usb_gadget *g,
13826 + struct usb_gadget_driver *driver)
13827 +{
13828 + struct fotg210_udc *fotg210 = gadget_to_fotg210(g);
13829 + u32 value;
13830 +
13831 + /* hook up the driver */
13832 + fotg210->driver = driver;
13833 +
13834 + /* enable device global interrupt */
13835 + value = ioread32(fotg210->reg + FOTG210_DMCR);
13836 + value |= DMCR_GLINT_EN;
13837 + iowrite32(value, fotg210->reg + FOTG210_DMCR);
13838 +
13839 + return 0;
13840 +}
13841 +
13842 +static void fotg210_init(struct fotg210_udc *fotg210)
13843 +{
13844 + u32 value;
13845 +
13846 + /* disable global interrupt and set int polarity to active high */
13847 + iowrite32(GMIR_MHC_INT | GMIR_MOTG_INT | GMIR_INT_POLARITY,
13848 + fotg210->reg + FOTG210_GMIR);
13849 +
13850 + /* disable device global interrupt */
13851 + value = ioread32(fotg210->reg + FOTG210_DMCR);
13852 + value &= ~DMCR_GLINT_EN;
13853 + iowrite32(value, fotg210->reg + FOTG210_DMCR);
13854 +
13855 + /* enable only grp2 irqs we handle */
13856 + iowrite32(~(DISGR2_DMA_ERROR | DISGR2_RX0BYTE_INT | DISGR2_TX0BYTE_INT
13857 + | DISGR2_ISO_SEQ_ABORT_INT | DISGR2_ISO_SEQ_ERR_INT
13858 + | DISGR2_RESM_INT | DISGR2_SUSP_INT | DISGR2_USBRST_INT),
13859 + fotg210->reg + FOTG210_DMISGR2);
13860 +
13861 + /* disable all fifo interrupt */
13862 + iowrite32(~(u32)0, fotg210->reg + FOTG210_DMISGR1);
13863 +
13864 + /* disable cmd end */
13865 + value = ioread32(fotg210->reg + FOTG210_DMISGR0);
13866 + value |= DMISGR0_MCX_COMEND;
13867 + iowrite32(value, fotg210->reg + FOTG210_DMISGR0);
13868 +}
13869 +
13870 +static int fotg210_udc_stop(struct usb_gadget *g)
13871 +{
13872 + struct fotg210_udc *fotg210 = gadget_to_fotg210(g);
13873 + unsigned long flags;
13874 +
13875 + spin_lock_irqsave(&fotg210->lock, flags);
13876 +
13877 + fotg210_init(fotg210);
13878 + fotg210->driver = NULL;
13879 +
13880 + spin_unlock_irqrestore(&fotg210->lock, flags);
13881 +
13882 + return 0;
13883 +}
13884 +
13885 +static const struct usb_gadget_ops fotg210_gadget_ops = {
13886 + .udc_start = fotg210_udc_start,
13887 + .udc_stop = fotg210_udc_stop,
13888 +};
13889 +
13890 +static int fotg210_udc_remove(struct platform_device *pdev)
13891 +{
13892 + struct fotg210_udc *fotg210 = platform_get_drvdata(pdev);
13893 + int i;
13894 +
13895 + usb_del_gadget_udc(&fotg210->gadget);
13896 + iounmap(fotg210->reg);
13897 + free_irq(platform_get_irq(pdev, 0), fotg210);
13898 +
13899 + fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req);
13900 + for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
13901 + kfree(fotg210->ep[i]);
13902 + kfree(fotg210);
13903 +
13904 + return 0;
13905 +}
13906 +
13907 +static int fotg210_udc_probe(struct platform_device *pdev)
13908 +{
13909 + struct resource *res, *ires;
13910 + struct fotg210_udc *fotg210 = NULL;
13911 + struct fotg210_ep *_ep[FOTG210_MAX_NUM_EP];
13912 + int ret = 0;
13913 + int i;
13914 +
13915 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
13916 + if (!res) {
13917 + pr_err("platform_get_resource error.\n");
13918 + return -ENODEV;
13919 + }
13920 +
13921 + ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
13922 + if (!ires) {
13923 + pr_err("platform_get_resource IORESOURCE_IRQ error.\n");
13924 + return -ENODEV;
13925 + }
13926 +
13927 + ret = -ENOMEM;
13928 +
13929 + /* initialize udc */
13930 + fotg210 = kzalloc(sizeof(struct fotg210_udc), GFP_KERNEL);
13931 + if (fotg210 == NULL)
13932 + goto err;
13933 +
13934 + for (i = 0; i < FOTG210_MAX_NUM_EP; i++) {
13935 + _ep[i] = kzalloc(sizeof(struct fotg210_ep), GFP_KERNEL);
13936 + if (_ep[i] == NULL)
13937 + goto err_alloc;
13938 + fotg210->ep[i] = _ep[i];
13939 + }
13940 +
13941 + fotg210->reg = ioremap(res->start, resource_size(res));
13942 + if (fotg210->reg == NULL) {
13943 + pr_err("ioremap error.\n");
13944 + goto err_alloc;
13945 + }
13946 +
13947 + spin_lock_init(&fotg210->lock);
13948 +
13949 + platform_set_drvdata(pdev, fotg210);
13950 +
13951 + fotg210->gadget.ops = &fotg210_gadget_ops;
13952 +
13953 + fotg210->gadget.max_speed = USB_SPEED_HIGH;
13954 + fotg210->gadget.dev.parent = &pdev->dev;
13955 + fotg210->gadget.dev.dma_mask = pdev->dev.dma_mask;
13956 + fotg210->gadget.name = udc_name;
13957 +
13958 + INIT_LIST_HEAD(&fotg210->gadget.ep_list);
13959 +
13960 + for (i = 0; i < FOTG210_MAX_NUM_EP; i++) {
13961 + struct fotg210_ep *ep = fotg210->ep[i];
13962 +
13963 + if (i) {
13964 + INIT_LIST_HEAD(&fotg210->ep[i]->ep.ep_list);
13965 + list_add_tail(&fotg210->ep[i]->ep.ep_list,
13966 + &fotg210->gadget.ep_list);
13967 + }
13968 + ep->fotg210 = fotg210;
13969 + INIT_LIST_HEAD(&ep->queue);
13970 + ep->ep.name = fotg210_ep_name[i];
13971 + ep->ep.ops = &fotg210_ep_ops;
13972 + usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
13973 +
13974 + if (i == 0) {
13975 + ep->ep.caps.type_control = true;
13976 + } else {
13977 + ep->ep.caps.type_iso = true;
13978 + ep->ep.caps.type_bulk = true;
13979 + ep->ep.caps.type_int = true;
13980 + }
13981 +
13982 + ep->ep.caps.dir_in = true;
13983 + ep->ep.caps.dir_out = true;
13984 + }
13985 + usb_ep_set_maxpacket_limit(&fotg210->ep[0]->ep, 0x40);
13986 + fotg210->gadget.ep0 = &fotg210->ep[0]->ep;
13987 + INIT_LIST_HEAD(&fotg210->gadget.ep0->ep_list);
13988 +
13989 + fotg210->ep0_req = fotg210_ep_alloc_request(&fotg210->ep[0]->ep,
13990 + GFP_KERNEL);
13991 + if (fotg210->ep0_req == NULL)
13992 + goto err_map;
13993 +
13994 + fotg210->ep0_req->complete = fotg210_ep0_complete;
13995 +
13996 + fotg210_init(fotg210);
13997 +
13998 + fotg210_disable_unplug(fotg210);
13999 +
14000 + ret = request_irq(ires->start, fotg210_irq, IRQF_SHARED,
14001 + udc_name, fotg210);
14002 + if (ret < 0) {
14003 + pr_err("request_irq error (%d)\n", ret);
14004 + goto err_req;
14005 + }
14006 +
14007 + ret = usb_add_gadget_udc(&pdev->dev, &fotg210->gadget);
14008 + if (ret)
14009 + goto err_add_udc;
14010 +
14011 + dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION);
14012 +
14013 + return 0;
14014 +
14015 +err_add_udc:
14016 + free_irq(ires->start, fotg210);
14017 +
14018 +err_req:
14019 + fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req);
14020 +
14021 +err_map:
14022 + iounmap(fotg210->reg);
14023 +
14024 +err_alloc:
14025 + for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
14026 + kfree(fotg210->ep[i]);
14027 + kfree(fotg210);
14028 +
14029 +err:
14030 + return ret;
14031 +}
14032 +
14033 +static struct platform_driver fotg210_driver = {
14034 + .driver = {
14035 + .name = udc_name,
14036 + },
14037 + .probe = fotg210_udc_probe,
14038 + .remove = fotg210_udc_remove,
14039 +};
14040 +
14041 +module_platform_driver(fotg210_driver);
14042 +
14043 +MODULE_AUTHOR("Yuan-Hsin Chen, Feng-Hsin Chiang <john453@faraday-tech.com>");
14044 +MODULE_LICENSE("GPL");
14045 +MODULE_DESCRIPTION(DRIVER_DESC);
14046 --- a/drivers/usb/gadget/udc/Kconfig
14047 +++ b/drivers/usb/gadget/udc/Kconfig
14048 @@ -108,17 +108,6 @@ config USB_FUSB300
14049 help
14050 Faraday usb device controller FUSB300 driver
14051
14052 -config USB_FOTG210_UDC
14053 - depends on HAS_DMA
14054 - tristate "Faraday FOTG210 USB Peripheral Controller"
14055 - help
14056 - Faraday USB2.0 OTG controller which can be configured as
14057 - high speed or full speed USB device. This driver supppors
14058 - Bulk Transfer so far.
14059 -
14060 - Say "y" to link the driver statically, or "m" to build a
14061 - dynamically linked module called "fotg210_udc".
14062 -
14063 config USB_GR_UDC
14064 tristate "Aeroflex Gaisler GRUSBDC USB Peripheral Controller Driver"
14065 depends on HAS_DMA
14066 --- a/drivers/usb/gadget/udc/Makefile
14067 +++ b/drivers/usb/gadget/udc/Makefile
14068 @@ -34,7 +34,6 @@ obj-$(CONFIG_USB_EG20T) += pch_udc.o
14069 obj-$(CONFIG_USB_MV_UDC) += mv_udc.o
14070 mv_udc-y := mv_udc_core.o
14071 obj-$(CONFIG_USB_FUSB300) += fusb300_udc.o
14072 -obj-$(CONFIG_USB_FOTG210_UDC) += fotg210-udc.o
14073 obj-$(CONFIG_USB_MV_U3D) += mv_u3d_core.o
14074 obj-$(CONFIG_USB_GR_UDC) += gr_udc.o
14075 obj-$(CONFIG_USB_GADGET_XILINX) += udc-xilinx.o
14076 --- a/drivers/usb/host/Kconfig
14077 +++ b/drivers/usb/host/Kconfig
14078 @@ -389,17 +389,6 @@ config USB_ISP1362_HCD
14079 To compile this driver as a module, choose M here: the
14080 module will be called isp1362-hcd.
14081
14082 -config USB_FOTG210_HCD
14083 - tristate "FOTG210 HCD support"
14084 - depends on USB && HAS_DMA && HAS_IOMEM
14085 - help
14086 - Faraday FOTG210 is an OTG controller which can be configured as
14087 - an USB2.0 host. It is designed to meet USB2.0 EHCI specification
14088 - with minor modification.
14089 -
14090 - To compile this driver as a module, choose M here: the
14091 - module will be called fotg210-hcd.
14092 -
14093 config USB_MAX3421_HCD
14094 tristate "MAX3421 HCD (USB-over-SPI) support"
14095 depends on USB && SPI
14096 --- a/drivers/usb/host/Makefile
14097 +++ b/drivers/usb/host/Makefile
14098 @@ -84,6 +84,5 @@ obj-$(CONFIG_USB_EHCI_FSL) += ehci-fsl.o
14099 obj-$(CONFIG_USB_EHCI_MV) += ehci-mv.o
14100 obj-$(CONFIG_USB_HCD_BCMA) += bcma-hcd.o
14101 obj-$(CONFIG_USB_HCD_SSB) += ssb-hcd.o
14102 -obj-$(CONFIG_USB_FOTG210_HCD) += fotg210-hcd.o
14103 obj-$(CONFIG_USB_MAX3421_HCD) += max3421-hcd.o
14104 obj-$(CONFIG_USB_XEN_HCD) += xen-hcd.o
14105 --- /dev/null
14106 +++ b/drivers/usb/fotg210/fotg210-hcd.h
14107 @@ -0,0 +1,688 @@
14108 +/* SPDX-License-Identifier: GPL-2.0 */
14109 +#ifndef __LINUX_FOTG210_H
14110 +#define __LINUX_FOTG210_H
14111 +
14112 +#include <linux/usb/ehci-dbgp.h>
14113 +
14114 +/* definitions used for the EHCI driver */
14115 +
14116 +/*
14117 + * __hc32 and __hc16 are "Host Controller" types, they may be equivalent to
14118 + * __leXX (normally) or __beXX (given FOTG210_BIG_ENDIAN_DESC), depending on
14119 + * the host controller implementation.
14120 + *
14121 + * To facilitate the strongest possible byte-order checking from "sparse"
14122 + * and so on, we use __leXX unless that's not practical.
14123 + */
14124 +#define __hc32 __le32
14125 +#define __hc16 __le16
14126 +
14127 +/* statistics can be kept for tuning/monitoring */
14128 +struct fotg210_stats {
14129 + /* irq usage */
14130 + unsigned long normal;
14131 + unsigned long error;
14132 + unsigned long iaa;
14133 + unsigned long lost_iaa;
14134 +
14135 + /* termination of urbs from core */
14136 + unsigned long complete;
14137 + unsigned long unlink;
14138 +};
14139 +
14140 +/* fotg210_hcd->lock guards shared data against other CPUs:
14141 + * fotg210_hcd: async, unlink, periodic (and shadow), ...
14142 + * usb_host_endpoint: hcpriv
14143 + * fotg210_qh: qh_next, qtd_list
14144 + * fotg210_qtd: qtd_list
14145 + *
14146 + * Also, hold this lock when talking to HC registers or
14147 + * when updating hw_* fields in shared qh/qtd/... structures.
14148 + */
14149 +
14150 +#define FOTG210_MAX_ROOT_PORTS 1 /* see HCS_N_PORTS */
14151 +
14152 +/*
14153 + * fotg210_rh_state values of FOTG210_RH_RUNNING or above mean that the
14154 + * controller may be doing DMA. Lower values mean there's no DMA.
14155 + */
14156 +enum fotg210_rh_state {
14157 + FOTG210_RH_HALTED,
14158 + FOTG210_RH_SUSPENDED,
14159 + FOTG210_RH_RUNNING,
14160 + FOTG210_RH_STOPPING
14161 +};
14162 +
14163 +/*
14164 + * Timer events, ordered by increasing delay length.
14165 + * Always update event_delays_ns[] and event_handlers[] (defined in
14166 + * ehci-timer.c) in parallel with this list.
14167 + */
14168 +enum fotg210_hrtimer_event {
14169 + FOTG210_HRTIMER_POLL_ASS, /* Poll for async schedule off */
14170 + FOTG210_HRTIMER_POLL_PSS, /* Poll for periodic schedule off */
14171 + FOTG210_HRTIMER_POLL_DEAD, /* Wait for dead controller to stop */
14172 + FOTG210_HRTIMER_UNLINK_INTR, /* Wait for interrupt QH unlink */
14173 + FOTG210_HRTIMER_FREE_ITDS, /* Wait for unused iTDs and siTDs */
14174 + FOTG210_HRTIMER_ASYNC_UNLINKS, /* Unlink empty async QHs */
14175 + FOTG210_HRTIMER_IAA_WATCHDOG, /* Handle lost IAA interrupts */
14176 + FOTG210_HRTIMER_DISABLE_PERIODIC, /* Wait to disable periodic sched */
14177 + FOTG210_HRTIMER_DISABLE_ASYNC, /* Wait to disable async sched */
14178 + FOTG210_HRTIMER_IO_WATCHDOG, /* Check for missing IRQs */
14179 + FOTG210_HRTIMER_NUM_EVENTS /* Must come last */
14180 +};
14181 +#define FOTG210_HRTIMER_NO_EVENT 99
14182 +
14183 +struct fotg210_hcd { /* one per controller */
14184 + /* timing support */
14185 + enum fotg210_hrtimer_event next_hrtimer_event;
14186 + unsigned enabled_hrtimer_events;
14187 + ktime_t hr_timeouts[FOTG210_HRTIMER_NUM_EVENTS];
14188 + struct hrtimer hrtimer;
14189 +
14190 + int PSS_poll_count;
14191 + int ASS_poll_count;
14192 + int died_poll_count;
14193 +
14194 + /* glue to PCI and HCD framework */
14195 + struct fotg210_caps __iomem *caps;
14196 + struct fotg210_regs __iomem *regs;
14197 + struct ehci_dbg_port __iomem *debug;
14198 +
14199 + __u32 hcs_params; /* cached register copy */
14200 + spinlock_t lock;
14201 + enum fotg210_rh_state rh_state;
14202 +
14203 + /* general schedule support */
14204 + bool scanning:1;
14205 + bool need_rescan:1;
14206 + bool intr_unlinking:1;
14207 + bool async_unlinking:1;
14208 + bool shutdown:1;
14209 + struct fotg210_qh *qh_scan_next;
14210 +
14211 + /* async schedule support */
14212 + struct fotg210_qh *async;
14213 + struct fotg210_qh *dummy; /* For AMD quirk use */
14214 + struct fotg210_qh *async_unlink;
14215 + struct fotg210_qh *async_unlink_last;
14216 + struct fotg210_qh *async_iaa;
14217 + unsigned async_unlink_cycle;
14218 + unsigned async_count; /* async activity count */
14219 +
14220 + /* periodic schedule support */
14221 +#define DEFAULT_I_TDPS 1024 /* some HCs can do less */
14222 + unsigned periodic_size;
14223 + __hc32 *periodic; /* hw periodic table */
14224 + dma_addr_t periodic_dma;
14225 + struct list_head intr_qh_list;
14226 + unsigned i_thresh; /* uframes HC might cache */
14227 +
14228 + union fotg210_shadow *pshadow; /* mirror hw periodic table */
14229 + struct fotg210_qh *intr_unlink;
14230 + struct fotg210_qh *intr_unlink_last;
14231 + unsigned intr_unlink_cycle;
14232 + unsigned now_frame; /* frame from HC hardware */
14233 + unsigned next_frame; /* scan periodic, start here */
14234 + unsigned intr_count; /* intr activity count */
14235 + unsigned isoc_count; /* isoc activity count */
14236 + unsigned periodic_count; /* periodic activity count */
14237 + /* max periodic time per uframe */
14238 + unsigned uframe_periodic_max;
14239 +
14240 +
14241 + /* list of itds completed while now_frame was still active */
14242 + struct list_head cached_itd_list;
14243 + struct fotg210_itd *last_itd_to_free;
14244 +
14245 + /* per root hub port */
14246 + unsigned long reset_done[FOTG210_MAX_ROOT_PORTS];
14247 +
14248 + /* bit vectors (one bit per port)
14249 + * which ports were already suspended at the start of a bus suspend
14250 + */
14251 + unsigned long bus_suspended;
14252 +
14253 + /* which ports are edicated to the companion controller */
14254 + unsigned long companion_ports;
14255 +
14256 + /* which ports are owned by the companion during a bus suspend */
14257 + unsigned long owned_ports;
14258 +
14259 + /* which ports have the change-suspend feature turned on */
14260 + unsigned long port_c_suspend;
14261 +
14262 + /* which ports are suspended */
14263 + unsigned long suspended_ports;
14264 +
14265 + /* which ports have started to resume */
14266 + unsigned long resuming_ports;
14267 +
14268 + /* per-HC memory pools (could be per-bus, but ...) */
14269 + struct dma_pool *qh_pool; /* qh per active urb */
14270 + struct dma_pool *qtd_pool; /* one or more per qh */
14271 + struct dma_pool *itd_pool; /* itd per iso urb */
14272 +
14273 + unsigned random_frame;
14274 + unsigned long next_statechange;
14275 + ktime_t last_periodic_enable;
14276 + u32 command;
14277 +
14278 + /* SILICON QUIRKS */
14279 + unsigned need_io_watchdog:1;
14280 + unsigned fs_i_thresh:1; /* Intel iso scheduling */
14281 +
14282 + u8 sbrn; /* packed release number */
14283 +
14284 + /* irq statistics */
14285 +#ifdef FOTG210_STATS
14286 + struct fotg210_stats stats;
14287 +# define INCR(x) ((x)++)
14288 +#else
14289 +# define INCR(x) do {} while (0)
14290 +#endif
14291 +
14292 + /* silicon clock */
14293 + struct clk *pclk;
14294 +};
14295 +
14296 +/* convert between an HCD pointer and the corresponding FOTG210_HCD */
14297 +static inline struct fotg210_hcd *hcd_to_fotg210(struct usb_hcd *hcd)
14298 +{
14299 + return (struct fotg210_hcd *)(hcd->hcd_priv);
14300 +}
14301 +static inline struct usb_hcd *fotg210_to_hcd(struct fotg210_hcd *fotg210)
14302 +{
14303 + return container_of((void *) fotg210, struct usb_hcd, hcd_priv);
14304 +}
14305 +
14306 +/*-------------------------------------------------------------------------*/
14307 +
14308 +/* EHCI register interface, corresponds to EHCI Revision 0.95 specification */
14309 +
14310 +/* Section 2.2 Host Controller Capability Registers */
14311 +struct fotg210_caps {
14312 + /* these fields are specified as 8 and 16 bit registers,
14313 + * but some hosts can't perform 8 or 16 bit PCI accesses.
14314 + * some hosts treat caplength and hciversion as parts of a 32-bit
14315 + * register, others treat them as two separate registers, this
14316 + * affects the memory map for big endian controllers.
14317 + */
14318 + u32 hc_capbase;
14319 +#define HC_LENGTH(fotg210, p) (0x00ff&((p) >> /* bits 7:0 / offset 00h */ \
14320 + (fotg210_big_endian_capbase(fotg210) ? 24 : 0)))
14321 +#define HC_VERSION(fotg210, p) (0xffff&((p) >> /* bits 31:16 / offset 02h */ \
14322 + (fotg210_big_endian_capbase(fotg210) ? 0 : 16)))
14323 + u32 hcs_params; /* HCSPARAMS - offset 0x4 */
14324 +#define HCS_N_PORTS(p) (((p)>>0)&0xf) /* bits 3:0, ports on HC */
14325 +
14326 + u32 hcc_params; /* HCCPARAMS - offset 0x8 */
14327 +#define HCC_CANPARK(p) ((p)&(1 << 2)) /* true: can park on async qh */
14328 +#define HCC_PGM_FRAMELISTLEN(p) ((p)&(1 << 1)) /* true: periodic_size changes*/
14329 + u8 portroute[8]; /* nibbles for routing - offset 0xC */
14330 +};
14331 +
14332 +
14333 +/* Section 2.3 Host Controller Operational Registers */
14334 +struct fotg210_regs {
14335 +
14336 + /* USBCMD: offset 0x00 */
14337 + u32 command;
14338 +
14339 +/* EHCI 1.1 addendum */
14340 +/* 23:16 is r/w intr rate, in microframes; default "8" == 1/msec */
14341 +#define CMD_PARK (1<<11) /* enable "park" on async qh */
14342 +#define CMD_PARK_CNT(c) (((c)>>8)&3) /* how many transfers to park for */
14343 +#define CMD_IAAD (1<<6) /* "doorbell" interrupt async advance */
14344 +#define CMD_ASE (1<<5) /* async schedule enable */
14345 +#define CMD_PSE (1<<4) /* periodic schedule enable */
14346 +/* 3:2 is periodic frame list size */
14347 +#define CMD_RESET (1<<1) /* reset HC not bus */
14348 +#define CMD_RUN (1<<0) /* start/stop HC */
14349 +
14350 + /* USBSTS: offset 0x04 */
14351 + u32 status;
14352 +#define STS_ASS (1<<15) /* Async Schedule Status */
14353 +#define STS_PSS (1<<14) /* Periodic Schedule Status */
14354 +#define STS_RECL (1<<13) /* Reclamation */
14355 +#define STS_HALT (1<<12) /* Not running (any reason) */
14356 +/* some bits reserved */
14357 + /* these STS_* flags are also intr_enable bits (USBINTR) */
14358 +#define STS_IAA (1<<5) /* Interrupted on async advance */
14359 +#define STS_FATAL (1<<4) /* such as some PCI access errors */
14360 +#define STS_FLR (1<<3) /* frame list rolled over */
14361 +#define STS_PCD (1<<2) /* port change detect */
14362 +#define STS_ERR (1<<1) /* "error" completion (overflow, ...) */
14363 +#define STS_INT (1<<0) /* "normal" completion (short, ...) */
14364 +
14365 + /* USBINTR: offset 0x08 */
14366 + u32 intr_enable;
14367 +
14368 + /* FRINDEX: offset 0x0C */
14369 + u32 frame_index; /* current microframe number */
14370 + /* CTRLDSSEGMENT: offset 0x10 */
14371 + u32 segment; /* address bits 63:32 if needed */
14372 + /* PERIODICLISTBASE: offset 0x14 */
14373 + u32 frame_list; /* points to periodic list */
14374 + /* ASYNCLISTADDR: offset 0x18 */
14375 + u32 async_next; /* address of next async queue head */
14376 +
14377 + u32 reserved1;
14378 + /* PORTSC: offset 0x20 */
14379 + u32 port_status;
14380 +/* 31:23 reserved */
14381 +#define PORT_USB11(x) (((x)&(3<<10)) == (1<<10)) /* USB 1.1 device */
14382 +#define PORT_RESET (1<<8) /* reset port */
14383 +#define PORT_SUSPEND (1<<7) /* suspend port */
14384 +#define PORT_RESUME (1<<6) /* resume it */
14385 +#define PORT_PEC (1<<3) /* port enable change */
14386 +#define PORT_PE (1<<2) /* port enable */
14387 +#define PORT_CSC (1<<1) /* connect status change */
14388 +#define PORT_CONNECT (1<<0) /* device connected */
14389 +#define PORT_RWC_BITS (PORT_CSC | PORT_PEC)
14390 + u32 reserved2[19];
14391 +
14392 + /* OTGCSR: offet 0x70 */
14393 + u32 otgcsr;
14394 +#define OTGCSR_HOST_SPD_TYP (3 << 22)
14395 +#define OTGCSR_A_BUS_DROP (1 << 5)
14396 +#define OTGCSR_A_BUS_REQ (1 << 4)
14397 +
14398 + /* OTGISR: offset 0x74 */
14399 + u32 otgisr;
14400 +#define OTGISR_OVC (1 << 10)
14401 +
14402 + u32 reserved3[15];
14403 +
14404 + /* GMIR: offset 0xB4 */
14405 + u32 gmir;
14406 +#define GMIR_INT_POLARITY (1 << 3) /*Active High*/
14407 +#define GMIR_MHC_INT (1 << 2)
14408 +#define GMIR_MOTG_INT (1 << 1)
14409 +#define GMIR_MDEV_INT (1 << 0)
14410 +};
14411 +
14412 +/*-------------------------------------------------------------------------*/
14413 +
14414 +#define QTD_NEXT(fotg210, dma) cpu_to_hc32(fotg210, (u32)dma)
14415 +
14416 +/*
14417 + * EHCI Specification 0.95 Section 3.5
14418 + * QTD: describe data transfer components (buffer, direction, ...)
14419 + * See Fig 3-6 "Queue Element Transfer Descriptor Block Diagram".
14420 + *
14421 + * These are associated only with "QH" (Queue Head) structures,
14422 + * used with control, bulk, and interrupt transfers.
14423 + */
14424 +struct fotg210_qtd {
14425 + /* first part defined by EHCI spec */
14426 + __hc32 hw_next; /* see EHCI 3.5.1 */
14427 + __hc32 hw_alt_next; /* see EHCI 3.5.2 */
14428 + __hc32 hw_token; /* see EHCI 3.5.3 */
14429 +#define QTD_TOGGLE (1 << 31) /* data toggle */
14430 +#define QTD_LENGTH(tok) (((tok)>>16) & 0x7fff)
14431 +#define QTD_IOC (1 << 15) /* interrupt on complete */
14432 +#define QTD_CERR(tok) (((tok)>>10) & 0x3)
14433 +#define QTD_PID(tok) (((tok)>>8) & 0x3)
14434 +#define QTD_STS_ACTIVE (1 << 7) /* HC may execute this */
14435 +#define QTD_STS_HALT (1 << 6) /* halted on error */
14436 +#define QTD_STS_DBE (1 << 5) /* data buffer error (in HC) */
14437 +#define QTD_STS_BABBLE (1 << 4) /* device was babbling (qtd halted) */
14438 +#define QTD_STS_XACT (1 << 3) /* device gave illegal response */
14439 +#define QTD_STS_MMF (1 << 2) /* incomplete split transaction */
14440 +#define QTD_STS_STS (1 << 1) /* split transaction state */
14441 +#define QTD_STS_PING (1 << 0) /* issue PING? */
14442 +
14443 +#define ACTIVE_BIT(fotg210) cpu_to_hc32(fotg210, QTD_STS_ACTIVE)
14444 +#define HALT_BIT(fotg210) cpu_to_hc32(fotg210, QTD_STS_HALT)
14445 +#define STATUS_BIT(fotg210) cpu_to_hc32(fotg210, QTD_STS_STS)
14446 +
14447 + __hc32 hw_buf[5]; /* see EHCI 3.5.4 */
14448 + __hc32 hw_buf_hi[5]; /* Appendix B */
14449 +
14450 + /* the rest is HCD-private */
14451 + dma_addr_t qtd_dma; /* qtd address */
14452 + struct list_head qtd_list; /* sw qtd list */
14453 + struct urb *urb; /* qtd's urb */
14454 + size_t length; /* length of buffer */
14455 +} __aligned(32);
14456 +
14457 +/* mask NakCnt+T in qh->hw_alt_next */
14458 +#define QTD_MASK(fotg210) cpu_to_hc32(fotg210, ~0x1f)
14459 +
14460 +#define IS_SHORT_READ(token) (QTD_LENGTH(token) != 0 && QTD_PID(token) == 1)
14461 +
14462 +/*-------------------------------------------------------------------------*/
14463 +
14464 +/* type tag from {qh,itd,fstn}->hw_next */
14465 +#define Q_NEXT_TYPE(fotg210, dma) ((dma) & cpu_to_hc32(fotg210, 3 << 1))
14466 +
14467 +/*
14468 + * Now the following defines are not converted using the
14469 + * cpu_to_le32() macro anymore, since we have to support
14470 + * "dynamic" switching between be and le support, so that the driver
14471 + * can be used on one system with SoC EHCI controller using big-endian
14472 + * descriptors as well as a normal little-endian PCI EHCI controller.
14473 + */
14474 +/* values for that type tag */
14475 +#define Q_TYPE_ITD (0 << 1)
14476 +#define Q_TYPE_QH (1 << 1)
14477 +#define Q_TYPE_SITD (2 << 1)
14478 +#define Q_TYPE_FSTN (3 << 1)
14479 +
14480 +/* next async queue entry, or pointer to interrupt/periodic QH */
14481 +#define QH_NEXT(fotg210, dma) \
14482 + (cpu_to_hc32(fotg210, (((u32)dma)&~0x01f)|Q_TYPE_QH))
14483 +
14484 +/* for periodic/async schedules and qtd lists, mark end of list */
14485 +#define FOTG210_LIST_END(fotg210) \
14486 + cpu_to_hc32(fotg210, 1) /* "null pointer" to hw */
14487 +
14488 +/*
14489 + * Entries in periodic shadow table are pointers to one of four kinds
14490 + * of data structure. That's dictated by the hardware; a type tag is
14491 + * encoded in the low bits of the hardware's periodic schedule. Use
14492 + * Q_NEXT_TYPE to get the tag.
14493 + *
14494 + * For entries in the async schedule, the type tag always says "qh".
14495 + */
14496 +union fotg210_shadow {
14497 + struct fotg210_qh *qh; /* Q_TYPE_QH */
14498 + struct fotg210_itd *itd; /* Q_TYPE_ITD */
14499 + struct fotg210_fstn *fstn; /* Q_TYPE_FSTN */
14500 + __hc32 *hw_next; /* (all types) */
14501 + void *ptr;
14502 +};
14503 +
14504 +/*-------------------------------------------------------------------------*/
14505 +
14506 +/*
14507 + * EHCI Specification 0.95 Section 3.6
14508 + * QH: describes control/bulk/interrupt endpoints
14509 + * See Fig 3-7 "Queue Head Structure Layout".
14510 + *
14511 + * These appear in both the async and (for interrupt) periodic schedules.
14512 + */
14513 +
14514 +/* first part defined by EHCI spec */
14515 +struct fotg210_qh_hw {
14516 + __hc32 hw_next; /* see EHCI 3.6.1 */
14517 + __hc32 hw_info1; /* see EHCI 3.6.2 */
14518 +#define QH_CONTROL_EP (1 << 27) /* FS/LS control endpoint */
14519 +#define QH_HEAD (1 << 15) /* Head of async reclamation list */
14520 +#define QH_TOGGLE_CTL (1 << 14) /* Data toggle control */
14521 +#define QH_HIGH_SPEED (2 << 12) /* Endpoint speed */
14522 +#define QH_LOW_SPEED (1 << 12)
14523 +#define QH_FULL_SPEED (0 << 12)
14524 +#define QH_INACTIVATE (1 << 7) /* Inactivate on next transaction */
14525 + __hc32 hw_info2; /* see EHCI 3.6.2 */
14526 +#define QH_SMASK 0x000000ff
14527 +#define QH_CMASK 0x0000ff00
14528 +#define QH_HUBADDR 0x007f0000
14529 +#define QH_HUBPORT 0x3f800000
14530 +#define QH_MULT 0xc0000000
14531 + __hc32 hw_current; /* qtd list - see EHCI 3.6.4 */
14532 +
14533 + /* qtd overlay (hardware parts of a struct fotg210_qtd) */
14534 + __hc32 hw_qtd_next;
14535 + __hc32 hw_alt_next;
14536 + __hc32 hw_token;
14537 + __hc32 hw_buf[5];
14538 + __hc32 hw_buf_hi[5];
14539 +} __aligned(32);
14540 +
14541 +struct fotg210_qh {
14542 + struct fotg210_qh_hw *hw; /* Must come first */
14543 + /* the rest is HCD-private */
14544 + dma_addr_t qh_dma; /* address of qh */
14545 + union fotg210_shadow qh_next; /* ptr to qh; or periodic */
14546 + struct list_head qtd_list; /* sw qtd list */
14547 + struct list_head intr_node; /* list of intr QHs */
14548 + struct fotg210_qtd *dummy;
14549 + struct fotg210_qh *unlink_next; /* next on unlink list */
14550 +
14551 + unsigned unlink_cycle;
14552 +
14553 + u8 needs_rescan; /* Dequeue during giveback */
14554 + u8 qh_state;
14555 +#define QH_STATE_LINKED 1 /* HC sees this */
14556 +#define QH_STATE_UNLINK 2 /* HC may still see this */
14557 +#define QH_STATE_IDLE 3 /* HC doesn't see this */
14558 +#define QH_STATE_UNLINK_WAIT 4 /* LINKED and on unlink q */
14559 +#define QH_STATE_COMPLETING 5 /* don't touch token.HALT */
14560 +
14561 + u8 xacterrs; /* XactErr retry counter */
14562 +#define QH_XACTERR_MAX 32 /* XactErr retry limit */
14563 +
14564 + /* periodic schedule info */
14565 + u8 usecs; /* intr bandwidth */
14566 + u8 gap_uf; /* uframes split/csplit gap */
14567 + u8 c_usecs; /* ... split completion bw */
14568 + u16 tt_usecs; /* tt downstream bandwidth */
14569 + unsigned short period; /* polling interval */
14570 + unsigned short start; /* where polling starts */
14571 +#define NO_FRAME ((unsigned short)~0) /* pick new start */
14572 +
14573 + struct usb_device *dev; /* access to TT */
14574 + unsigned is_out:1; /* bulk or intr OUT */
14575 + unsigned clearing_tt:1; /* Clear-TT-Buf in progress */
14576 +};
14577 +
14578 +/*-------------------------------------------------------------------------*/
14579 +
14580 +/* description of one iso transaction (up to 3 KB data if highspeed) */
14581 +struct fotg210_iso_packet {
14582 + /* These will be copied to iTD when scheduling */
14583 + u64 bufp; /* itd->hw_bufp{,_hi}[pg] |= */
14584 + __hc32 transaction; /* itd->hw_transaction[i] |= */
14585 + u8 cross; /* buf crosses pages */
14586 + /* for full speed OUT splits */
14587 + u32 buf1;
14588 +};
14589 +
14590 +/* temporary schedule data for packets from iso urbs (both speeds)
14591 + * each packet is one logical usb transaction to the device (not TT),
14592 + * beginning at stream->next_uframe
14593 + */
14594 +struct fotg210_iso_sched {
14595 + struct list_head td_list;
14596 + unsigned span;
14597 + struct fotg210_iso_packet packet[];
14598 +};
14599 +
14600 +/*
14601 + * fotg210_iso_stream - groups all (s)itds for this endpoint.
14602 + * acts like a qh would, if EHCI had them for ISO.
14603 + */
14604 +struct fotg210_iso_stream {
14605 + /* first field matches fotg210_hq, but is NULL */
14606 + struct fotg210_qh_hw *hw;
14607 +
14608 + u8 bEndpointAddress;
14609 + u8 highspeed;
14610 + struct list_head td_list; /* queued itds */
14611 + struct list_head free_list; /* list of unused itds */
14612 + struct usb_device *udev;
14613 + struct usb_host_endpoint *ep;
14614 +
14615 + /* output of (re)scheduling */
14616 + int next_uframe;
14617 + __hc32 splits;
14618 +
14619 + /* the rest is derived from the endpoint descriptor,
14620 + * trusting urb->interval == f(epdesc->bInterval) and
14621 + * including the extra info for hw_bufp[0..2]
14622 + */
14623 + u8 usecs, c_usecs;
14624 + u16 interval;
14625 + u16 tt_usecs;
14626 + u16 maxp;
14627 + u16 raw_mask;
14628 + unsigned bandwidth;
14629 +
14630 + /* This is used to initialize iTD's hw_bufp fields */
14631 + __hc32 buf0;
14632 + __hc32 buf1;
14633 + __hc32 buf2;
14634 +
14635 + /* this is used to initialize sITD's tt info */
14636 + __hc32 address;
14637 +};
14638 +
14639 +/*-------------------------------------------------------------------------*/
14640 +
14641 +/*
14642 + * EHCI Specification 0.95 Section 3.3
14643 + * Fig 3-4 "Isochronous Transaction Descriptor (iTD)"
14644 + *
14645 + * Schedule records for high speed iso xfers
14646 + */
14647 +struct fotg210_itd {
14648 + /* first part defined by EHCI spec */
14649 + __hc32 hw_next; /* see EHCI 3.3.1 */
14650 + __hc32 hw_transaction[8]; /* see EHCI 3.3.2 */
14651 +#define FOTG210_ISOC_ACTIVE (1<<31) /* activate transfer this slot */
14652 +#define FOTG210_ISOC_BUF_ERR (1<<30) /* Data buffer error */
14653 +#define FOTG210_ISOC_BABBLE (1<<29) /* babble detected */
14654 +#define FOTG210_ISOC_XACTERR (1<<28) /* XactErr - transaction error */
14655 +#define FOTG210_ITD_LENGTH(tok) (((tok)>>16) & 0x0fff)
14656 +#define FOTG210_ITD_IOC (1 << 15) /* interrupt on complete */
14657 +
14658 +#define ITD_ACTIVE(fotg210) cpu_to_hc32(fotg210, FOTG210_ISOC_ACTIVE)
14659 +
14660 + __hc32 hw_bufp[7]; /* see EHCI 3.3.3 */
14661 + __hc32 hw_bufp_hi[7]; /* Appendix B */
14662 +
14663 + /* the rest is HCD-private */
14664 + dma_addr_t itd_dma; /* for this itd */
14665 + union fotg210_shadow itd_next; /* ptr to periodic q entry */
14666 +
14667 + struct urb *urb;
14668 + struct fotg210_iso_stream *stream; /* endpoint's queue */
14669 + struct list_head itd_list; /* list of stream's itds */
14670 +
14671 + /* any/all hw_transactions here may be used by that urb */
14672 + unsigned frame; /* where scheduled */
14673 + unsigned pg;
14674 + unsigned index[8]; /* in urb->iso_frame_desc */
14675 +} __aligned(32);
14676 +
14677 +/*-------------------------------------------------------------------------*/
14678 +
14679 +/*
14680 + * EHCI Specification 0.96 Section 3.7
14681 + * Periodic Frame Span Traversal Node (FSTN)
14682 + *
14683 + * Manages split interrupt transactions (using TT) that span frame boundaries
14684 + * into uframes 0/1; see 4.12.2.2. In those uframes, a "save place" FSTN
14685 + * makes the HC jump (back) to a QH to scan for fs/ls QH completions until
14686 + * it hits a "restore" FSTN; then it returns to finish other uframe 0/1 work.
14687 + */
14688 +struct fotg210_fstn {
14689 + __hc32 hw_next; /* any periodic q entry */
14690 + __hc32 hw_prev; /* qh or FOTG210_LIST_END */
14691 +
14692 + /* the rest is HCD-private */
14693 + dma_addr_t fstn_dma;
14694 + union fotg210_shadow fstn_next; /* ptr to periodic q entry */
14695 +} __aligned(32);
14696 +
14697 +/*-------------------------------------------------------------------------*/
14698 +
14699 +/* Prepare the PORTSC wakeup flags during controller suspend/resume */
14700 +
14701 +#define fotg210_prepare_ports_for_controller_suspend(fotg210, do_wakeup) \
14702 + fotg210_adjust_port_wakeup_flags(fotg210, true, do_wakeup)
14703 +
14704 +#define fotg210_prepare_ports_for_controller_resume(fotg210) \
14705 + fotg210_adjust_port_wakeup_flags(fotg210, false, false)
14706 +
14707 +/*-------------------------------------------------------------------------*/
14708 +
14709 +/*
14710 + * Some EHCI controllers have a Transaction Translator built into the
14711 + * root hub. This is a non-standard feature. Each controller will need
14712 + * to add code to the following inline functions, and call them as
14713 + * needed (mostly in root hub code).
14714 + */
14715 +
14716 +static inline unsigned int
14717 +fotg210_get_speed(struct fotg210_hcd *fotg210, unsigned int portsc)
14718 +{
14719 + return (readl(&fotg210->regs->otgcsr)
14720 + & OTGCSR_HOST_SPD_TYP) >> 22;
14721 +}
14722 +
14723 +/* Returns the speed of a device attached to a port on the root hub. */
14724 +static inline unsigned int
14725 +fotg210_port_speed(struct fotg210_hcd *fotg210, unsigned int portsc)
14726 +{
14727 + switch (fotg210_get_speed(fotg210, portsc)) {
14728 + case 0:
14729 + return 0;
14730 + case 1:
14731 + return USB_PORT_STAT_LOW_SPEED;
14732 + case 2:
14733 + default:
14734 + return USB_PORT_STAT_HIGH_SPEED;
14735 + }
14736 +}
14737 +
14738 +/*-------------------------------------------------------------------------*/
14739 +
14740 +#define fotg210_has_fsl_portno_bug(e) (0)
14741 +
14742 +/*
14743 + * While most USB host controllers implement their registers in
14744 + * little-endian format, a minority (celleb companion chip) implement
14745 + * them in big endian format.
14746 + *
14747 + * This attempts to support either format at compile time without a
14748 + * runtime penalty, or both formats with the additional overhead
14749 + * of checking a flag bit.
14750 + *
14751 + */
14752 +
14753 +#define fotg210_big_endian_mmio(e) 0
14754 +#define fotg210_big_endian_capbase(e) 0
14755 +
14756 +static inline unsigned int fotg210_readl(const struct fotg210_hcd *fotg210,
14757 + __u32 __iomem *regs)
14758 +{
14759 + return readl(regs);
14760 +}
14761 +
14762 +static inline void fotg210_writel(const struct fotg210_hcd *fotg210,
14763 + const unsigned int val, __u32 __iomem *regs)
14764 +{
14765 + writel(val, regs);
14766 +}
14767 +
14768 +/* cpu to fotg210 */
14769 +static inline __hc32 cpu_to_hc32(const struct fotg210_hcd *fotg210, const u32 x)
14770 +{
14771 + return cpu_to_le32(x);
14772 +}
14773 +
14774 +/* fotg210 to cpu */
14775 +static inline u32 hc32_to_cpu(const struct fotg210_hcd *fotg210, const __hc32 x)
14776 +{
14777 + return le32_to_cpu(x);
14778 +}
14779 +
14780 +static inline u32 hc32_to_cpup(const struct fotg210_hcd *fotg210,
14781 + const __hc32 *x)
14782 +{
14783 + return le32_to_cpup(x);
14784 +}
14785 +
14786 +/*-------------------------------------------------------------------------*/
14787 +
14788 +static inline unsigned fotg210_read_frame_index(struct fotg210_hcd *fotg210)
14789 +{
14790 + return fotg210_readl(fotg210, &fotg210->regs->frame_index);
14791 +}
14792 +
14793 +/*-------------------------------------------------------------------------*/
14794 +
14795 +#endif /* __LINUX_FOTG210_H */
14796 --- /dev/null
14797 +++ b/drivers/usb/fotg210/fotg210-udc.h
14798 @@ -0,0 +1,249 @@
14799 +// SPDX-License-Identifier: GPL-2.0+
14800 +/*
14801 + * Faraday FOTG210 USB OTG controller
14802 + *
14803 + * Copyright (C) 2013 Faraday Technology Corporation
14804 + * Author: Yuan-Hsin Chen <yhchen@faraday-tech.com>
14805 + */
14806 +
14807 +#include <linux/kernel.h>
14808 +
14809 +#define FOTG210_MAX_NUM_EP 5 /* ep0...ep4 */
14810 +#define FOTG210_MAX_FIFO_NUM 4 /* fifo0...fifo4 */
14811 +
14812 +/* Global Mask of HC/OTG/DEV interrupt Register(0xC4) */
14813 +#define FOTG210_GMIR 0xC4
14814 +#define GMIR_INT_POLARITY 0x8 /*Active High*/
14815 +#define GMIR_MHC_INT 0x4
14816 +#define GMIR_MOTG_INT 0x2
14817 +#define GMIR_MDEV_INT 0x1
14818 +
14819 +/* Device Main Control Register(0x100) */
14820 +#define FOTG210_DMCR 0x100
14821 +#define DMCR_HS_EN (1 << 6)
14822 +#define DMCR_CHIP_EN (1 << 5)
14823 +#define DMCR_SFRST (1 << 4)
14824 +#define DMCR_GOSUSP (1 << 3)
14825 +#define DMCR_GLINT_EN (1 << 2)
14826 +#define DMCR_HALF_SPEED (1 << 1)
14827 +#define DMCR_CAP_RMWAKUP (1 << 0)
14828 +
14829 +/* Device Address Register(0x104) */
14830 +#define FOTG210_DAR 0x104
14831 +#define DAR_AFT_CONF (1 << 7)
14832 +
14833 +/* Device Test Register(0x108) */
14834 +#define FOTG210_DTR 0x108
14835 +#define DTR_TST_CLRFF (1 << 0)
14836 +
14837 +/* PHY Test Mode Selector register(0x114) */
14838 +#define FOTG210_PHYTMSR 0x114
14839 +#define PHYTMSR_TST_PKT (1 << 4)
14840 +#define PHYTMSR_TST_SE0NAK (1 << 3)
14841 +#define PHYTMSR_TST_KSTA (1 << 2)
14842 +#define PHYTMSR_TST_JSTA (1 << 1)
14843 +#define PHYTMSR_UNPLUG (1 << 0)
14844 +
14845 +/* Cx configuration and FIFO Empty Status register(0x120) */
14846 +#define FOTG210_DCFESR 0x120
14847 +#define DCFESR_FIFO_EMPTY(fifo) (1 << 8 << (fifo))
14848 +#define DCFESR_CX_EMP (1 << 5)
14849 +#define DCFESR_CX_CLR (1 << 3)
14850 +#define DCFESR_CX_STL (1 << 2)
14851 +#define DCFESR_TST_PKDONE (1 << 1)
14852 +#define DCFESR_CX_DONE (1 << 0)
14853 +
14854 +/* Device IDLE Counter Register(0x124) */
14855 +#define FOTG210_DICR 0x124
14856 +
14857 +/* Device Mask of Interrupt Group Register (0x130) */
14858 +#define FOTG210_DMIGR 0x130
14859 +#define DMIGR_MINT_G0 (1 << 0)
14860 +
14861 +/* Device Mask of Interrupt Source Group 0(0x134) */
14862 +#define FOTG210_DMISGR0 0x134
14863 +#define DMISGR0_MCX_COMEND (1 << 3)
14864 +#define DMISGR0_MCX_OUT_INT (1 << 2)
14865 +#define DMISGR0_MCX_IN_INT (1 << 1)
14866 +#define DMISGR0_MCX_SETUP_INT (1 << 0)
14867 +
14868 +/* Device Mask of Interrupt Source Group 1 Register(0x138)*/
14869 +#define FOTG210_DMISGR1 0x138
14870 +#define DMISGR1_MF3_IN_INT (1 << 19)
14871 +#define DMISGR1_MF2_IN_INT (1 << 18)
14872 +#define DMISGR1_MF1_IN_INT (1 << 17)
14873 +#define DMISGR1_MF0_IN_INT (1 << 16)
14874 +#define DMISGR1_MF_IN_INT(fifo) (1 << (16 + (fifo)))
14875 +#define DMISGR1_MF3_SPK_INT (1 << 7)
14876 +#define DMISGR1_MF3_OUT_INT (1 << 6)
14877 +#define DMISGR1_MF2_SPK_INT (1 << 5)
14878 +#define DMISGR1_MF2_OUT_INT (1 << 4)
14879 +#define DMISGR1_MF1_SPK_INT (1 << 3)
14880 +#define DMISGR1_MF1_OUT_INT (1 << 2)
14881 +#define DMISGR1_MF0_SPK_INT (1 << 1)
14882 +#define DMISGR1_MF0_OUT_INT (1 << 0)
14883 +#define DMISGR1_MF_OUTSPK_INT(fifo) (0x3 << (fifo) * 2)
14884 +
14885 +/* Device Mask of Interrupt Source Group 2 Register (0x13C) */
14886 +#define FOTG210_DMISGR2 0x13C
14887 +#define DMISGR2_MDMA_ERROR (1 << 8)
14888 +#define DMISGR2_MDMA_CMPLT (1 << 7)
14889 +
14890 +/* Device Interrupt group Register (0x140) */
14891 +#define FOTG210_DIGR 0x140
14892 +#define DIGR_INT_G2 (1 << 2)
14893 +#define DIGR_INT_G1 (1 << 1)
14894 +#define DIGR_INT_G0 (1 << 0)
14895 +
14896 +/* Device Interrupt Source Group 0 Register (0x144) */
14897 +#define FOTG210_DISGR0 0x144
14898 +#define DISGR0_CX_COMABT_INT (1 << 5)
14899 +#define DISGR0_CX_COMFAIL_INT (1 << 4)
14900 +#define DISGR0_CX_COMEND_INT (1 << 3)
14901 +#define DISGR0_CX_OUT_INT (1 << 2)
14902 +#define DISGR0_CX_IN_INT (1 << 1)
14903 +#define DISGR0_CX_SETUP_INT (1 << 0)
14904 +
14905 +/* Device Interrupt Source Group 1 Register (0x148) */
14906 +#define FOTG210_DISGR1 0x148
14907 +#define DISGR1_OUT_INT(fifo) (1 << ((fifo) * 2))
14908 +#define DISGR1_SPK_INT(fifo) (1 << 1 << ((fifo) * 2))
14909 +#define DISGR1_IN_INT(fifo) (1 << 16 << (fifo))
14910 +
14911 +/* Device Interrupt Source Group 2 Register (0x14C) */
14912 +#define FOTG210_DISGR2 0x14C
14913 +#define DISGR2_DMA_ERROR (1 << 8)
14914 +#define DISGR2_DMA_CMPLT (1 << 7)
14915 +#define DISGR2_RX0BYTE_INT (1 << 6)
14916 +#define DISGR2_TX0BYTE_INT (1 << 5)
14917 +#define DISGR2_ISO_SEQ_ABORT_INT (1 << 4)
14918 +#define DISGR2_ISO_SEQ_ERR_INT (1 << 3)
14919 +#define DISGR2_RESM_INT (1 << 2)
14920 +#define DISGR2_SUSP_INT (1 << 1)
14921 +#define DISGR2_USBRST_INT (1 << 0)
14922 +
14923 +/* Device Receive Zero-Length Data Packet Register (0x150)*/
14924 +#define FOTG210_RX0BYTE 0x150
14925 +#define RX0BYTE_EP8 (1 << 7)
14926 +#define RX0BYTE_EP7 (1 << 6)
14927 +#define RX0BYTE_EP6 (1 << 5)
14928 +#define RX0BYTE_EP5 (1 << 4)
14929 +#define RX0BYTE_EP4 (1 << 3)
14930 +#define RX0BYTE_EP3 (1 << 2)
14931 +#define RX0BYTE_EP2 (1 << 1)
14932 +#define RX0BYTE_EP1 (1 << 0)
14933 +
14934 +/* Device Transfer Zero-Length Data Packet Register (0x154)*/
14935 +#define FOTG210_TX0BYTE 0x154
14936 +#define TX0BYTE_EP8 (1 << 7)
14937 +#define TX0BYTE_EP7 (1 << 6)
14938 +#define TX0BYTE_EP6 (1 << 5)
14939 +#define TX0BYTE_EP5 (1 << 4)
14940 +#define TX0BYTE_EP4 (1 << 3)
14941 +#define TX0BYTE_EP3 (1 << 2)
14942 +#define TX0BYTE_EP2 (1 << 1)
14943 +#define TX0BYTE_EP1 (1 << 0)
14944 +
14945 +/* Device IN Endpoint x MaxPacketSize Register(0x160+4*(x-1)) */
14946 +#define FOTG210_INEPMPSR(ep) (0x160 + 4 * ((ep) - 1))
14947 +#define INOUTEPMPSR_MPS(mps) ((mps) & 0x2FF)
14948 +#define INOUTEPMPSR_STL_EP (1 << 11)
14949 +#define INOUTEPMPSR_RESET_TSEQ (1 << 12)
14950 +
14951 +/* Device OUT Endpoint x MaxPacketSize Register(0x180+4*(x-1)) */
14952 +#define FOTG210_OUTEPMPSR(ep) (0x180 + 4 * ((ep) - 1))
14953 +
14954 +/* Device Endpoint 1~4 Map Register (0x1A0) */
14955 +#define FOTG210_EPMAP 0x1A0
14956 +#define EPMAP_FIFONO(ep, dir) \
14957 + ((((ep) - 1) << ((ep) - 1) * 8) << ((dir) ? 0 : 4))
14958 +#define EPMAP_FIFONOMSK(ep, dir) \
14959 + ((3 << ((ep) - 1) * 8) << ((dir) ? 0 : 4))
14960 +
14961 +/* Device FIFO Map Register (0x1A8) */
14962 +#define FOTG210_FIFOMAP 0x1A8
14963 +#define FIFOMAP_DIROUT(fifo) (0x0 << 4 << (fifo) * 8)
14964 +#define FIFOMAP_DIRIN(fifo) (0x1 << 4 << (fifo) * 8)
14965 +#define FIFOMAP_BIDIR(fifo) (0x2 << 4 << (fifo) * 8)
14966 +#define FIFOMAP_NA(fifo) (0x3 << 4 << (fifo) * 8)
14967 +#define FIFOMAP_EPNO(ep) ((ep) << ((ep) - 1) * 8)
14968 +#define FIFOMAP_EPNOMSK(ep) (0xF << ((ep) - 1) * 8)
14969 +
14970 +/* Device FIFO Confuguration Register (0x1AC) */
14971 +#define FOTG210_FIFOCF 0x1AC
14972 +#define FIFOCF_TYPE(type, fifo) ((type) << (fifo) * 8)
14973 +#define FIFOCF_BLK_SIN(fifo) (0x0 << (fifo) * 8 << 2)
14974 +#define FIFOCF_BLK_DUB(fifo) (0x1 << (fifo) * 8 << 2)
14975 +#define FIFOCF_BLK_TRI(fifo) (0x2 << (fifo) * 8 << 2)
14976 +#define FIFOCF_BLKSZ_512(fifo) (0x0 << (fifo) * 8 << 4)
14977 +#define FIFOCF_BLKSZ_1024(fifo) (0x1 << (fifo) * 8 << 4)
14978 +#define FIFOCF_FIFO_EN(fifo) (0x1 << (fifo) * 8 << 5)
14979 +
14980 +/* Device FIFO n Instruction and Byte Count Register (0x1B0+4*n) */
14981 +#define FOTG210_FIBCR(fifo) (0x1B0 + (fifo) * 4)
14982 +#define FIBCR_BCFX 0x7FF
14983 +#define FIBCR_FFRST (1 << 12)
14984 +
14985 +/* Device DMA Target FIFO Number Register (0x1C0) */
14986 +#define FOTG210_DMATFNR 0x1C0
14987 +#define DMATFNR_ACC_CXF (1 << 4)
14988 +#define DMATFNR_ACC_F3 (1 << 3)
14989 +#define DMATFNR_ACC_F2 (1 << 2)
14990 +#define DMATFNR_ACC_F1 (1 << 1)
14991 +#define DMATFNR_ACC_F0 (1 << 0)
14992 +#define DMATFNR_ACC_FN(fifo) (1 << (fifo))
14993 +#define DMATFNR_DISDMA 0
14994 +
14995 +/* Device DMA Controller Parameter setting 1 Register (0x1C8) */
14996 +#define FOTG210_DMACPSR1 0x1C8
14997 +#define DMACPSR1_DMA_LEN(len) (((len) & 0xFFFF) << 8)
14998 +#define DMACPSR1_DMA_ABORT (1 << 3)
14999 +#define DMACPSR1_DMA_TYPE(dir_in) (((dir_in) ? 1 : 0) << 1)
15000 +#define DMACPSR1_DMA_START (1 << 0)
15001 +
15002 +/* Device DMA Controller Parameter setting 2 Register (0x1CC) */
15003 +#define FOTG210_DMACPSR2 0x1CC
15004 +
15005 +/* Device DMA Controller Parameter setting 3 Register (0x1CC) */
15006 +#define FOTG210_CXPORT 0x1D0
15007 +
15008 +struct fotg210_request {
15009 + struct usb_request req;
15010 + struct list_head queue;
15011 +};
15012 +
15013 +struct fotg210_ep {
15014 + struct usb_ep ep;
15015 + struct fotg210_udc *fotg210;
15016 +
15017 + struct list_head queue;
15018 + unsigned stall:1;
15019 + unsigned wedged:1;
15020 + unsigned use_dma:1;
15021 +
15022 + unsigned char epnum;
15023 + unsigned char type;
15024 + unsigned char dir_in;
15025 + unsigned int maxp;
15026 + const struct usb_endpoint_descriptor *desc;
15027 +};
15028 +
15029 +struct fotg210_udc {
15030 + spinlock_t lock; /* protect the struct */
15031 + void __iomem *reg;
15032 +
15033 + unsigned long irq_trigger;
15034 +
15035 + struct usb_gadget gadget;
15036 + struct usb_gadget_driver *driver;
15037 +
15038 + struct fotg210_ep *ep[FOTG210_MAX_NUM_EP];
15039 +
15040 + struct usb_request *ep0_req; /* for internal request */
15041 + __le16 ep0_data;
15042 + u8 ep0_dir; /* 0/0x80 out/in */
15043 +
15044 + u8 reenum; /* if re-enumeration */
15045 +};
15046 +
15047 +#define gadget_to_fotg210(g) container_of((g), struct fotg210_udc, gadget)
15048 --- a/drivers/usb/gadget/udc/fotg210.h
15049 +++ /dev/null
15050 @@ -1,249 +0,0 @@
15051 -// SPDX-License-Identifier: GPL-2.0+
15052 -/*
15053 - * Faraday FOTG210 USB OTG controller
15054 - *
15055 - * Copyright (C) 2013 Faraday Technology Corporation
15056 - * Author: Yuan-Hsin Chen <yhchen@faraday-tech.com>
15057 - */
15058 -
15059 -#include <linux/kernel.h>
15060 -
15061 -#define FOTG210_MAX_NUM_EP 5 /* ep0...ep4 */
15062 -#define FOTG210_MAX_FIFO_NUM 4 /* fifo0...fifo4 */
15063 -
15064 -/* Global Mask of HC/OTG/DEV interrupt Register(0xC4) */
15065 -#define FOTG210_GMIR 0xC4
15066 -#define GMIR_INT_POLARITY 0x8 /*Active High*/
15067 -#define GMIR_MHC_INT 0x4
15068 -#define GMIR_MOTG_INT 0x2
15069 -#define GMIR_MDEV_INT 0x1
15070 -
15071 -/* Device Main Control Register(0x100) */
15072 -#define FOTG210_DMCR 0x100
15073 -#define DMCR_HS_EN (1 << 6)
15074 -#define DMCR_CHIP_EN (1 << 5)
15075 -#define DMCR_SFRST (1 << 4)
15076 -#define DMCR_GOSUSP (1 << 3)
15077 -#define DMCR_GLINT_EN (1 << 2)
15078 -#define DMCR_HALF_SPEED (1 << 1)
15079 -#define DMCR_CAP_RMWAKUP (1 << 0)
15080 -
15081 -/* Device Address Register(0x104) */
15082 -#define FOTG210_DAR 0x104
15083 -#define DAR_AFT_CONF (1 << 7)
15084 -
15085 -/* Device Test Register(0x108) */
15086 -#define FOTG210_DTR 0x108
15087 -#define DTR_TST_CLRFF (1 << 0)
15088 -
15089 -/* PHY Test Mode Selector register(0x114) */
15090 -#define FOTG210_PHYTMSR 0x114
15091 -#define PHYTMSR_TST_PKT (1 << 4)
15092 -#define PHYTMSR_TST_SE0NAK (1 << 3)
15093 -#define PHYTMSR_TST_KSTA (1 << 2)
15094 -#define PHYTMSR_TST_JSTA (1 << 1)
15095 -#define PHYTMSR_UNPLUG (1 << 0)
15096 -
15097 -/* Cx configuration and FIFO Empty Status register(0x120) */
15098 -#define FOTG210_DCFESR 0x120
15099 -#define DCFESR_FIFO_EMPTY(fifo) (1 << 8 << (fifo))
15100 -#define DCFESR_CX_EMP (1 << 5)
15101 -#define DCFESR_CX_CLR (1 << 3)
15102 -#define DCFESR_CX_STL (1 << 2)
15103 -#define DCFESR_TST_PKDONE (1 << 1)
15104 -#define DCFESR_CX_DONE (1 << 0)
15105 -
15106 -/* Device IDLE Counter Register(0x124) */
15107 -#define FOTG210_DICR 0x124
15108 -
15109 -/* Device Mask of Interrupt Group Register (0x130) */
15110 -#define FOTG210_DMIGR 0x130
15111 -#define DMIGR_MINT_G0 (1 << 0)
15112 -
15113 -/* Device Mask of Interrupt Source Group 0(0x134) */
15114 -#define FOTG210_DMISGR0 0x134
15115 -#define DMISGR0_MCX_COMEND (1 << 3)
15116 -#define DMISGR0_MCX_OUT_INT (1 << 2)
15117 -#define DMISGR0_MCX_IN_INT (1 << 1)
15118 -#define DMISGR0_MCX_SETUP_INT (1 << 0)
15119 -
15120 -/* Device Mask of Interrupt Source Group 1 Register(0x138)*/
15121 -#define FOTG210_DMISGR1 0x138
15122 -#define DMISGR1_MF3_IN_INT (1 << 19)
15123 -#define DMISGR1_MF2_IN_INT (1 << 18)
15124 -#define DMISGR1_MF1_IN_INT (1 << 17)
15125 -#define DMISGR1_MF0_IN_INT (1 << 16)
15126 -#define DMISGR1_MF_IN_INT(fifo) (1 << (16 + (fifo)))
15127 -#define DMISGR1_MF3_SPK_INT (1 << 7)
15128 -#define DMISGR1_MF3_OUT_INT (1 << 6)
15129 -#define DMISGR1_MF2_SPK_INT (1 << 5)
15130 -#define DMISGR1_MF2_OUT_INT (1 << 4)
15131 -#define DMISGR1_MF1_SPK_INT (1 << 3)
15132 -#define DMISGR1_MF1_OUT_INT (1 << 2)
15133 -#define DMISGR1_MF0_SPK_INT (1 << 1)
15134 -#define DMISGR1_MF0_OUT_INT (1 << 0)
15135 -#define DMISGR1_MF_OUTSPK_INT(fifo) (0x3 << (fifo) * 2)
15136 -
15137 -/* Device Mask of Interrupt Source Group 2 Register (0x13C) */
15138 -#define FOTG210_DMISGR2 0x13C
15139 -#define DMISGR2_MDMA_ERROR (1 << 8)
15140 -#define DMISGR2_MDMA_CMPLT (1 << 7)
15141 -
15142 -/* Device Interrupt group Register (0x140) */
15143 -#define FOTG210_DIGR 0x140
15144 -#define DIGR_INT_G2 (1 << 2)
15145 -#define DIGR_INT_G1 (1 << 1)
15146 -#define DIGR_INT_G0 (1 << 0)
15147 -
15148 -/* Device Interrupt Source Group 0 Register (0x144) */
15149 -#define FOTG210_DISGR0 0x144
15150 -#define DISGR0_CX_COMABT_INT (1 << 5)
15151 -#define DISGR0_CX_COMFAIL_INT (1 << 4)
15152 -#define DISGR0_CX_COMEND_INT (1 << 3)
15153 -#define DISGR0_CX_OUT_INT (1 << 2)
15154 -#define DISGR0_CX_IN_INT (1 << 1)
15155 -#define DISGR0_CX_SETUP_INT (1 << 0)
15156 -
15157 -/* Device Interrupt Source Group 1 Register (0x148) */
15158 -#define FOTG210_DISGR1 0x148
15159 -#define DISGR1_OUT_INT(fifo) (1 << ((fifo) * 2))
15160 -#define DISGR1_SPK_INT(fifo) (1 << 1 << ((fifo) * 2))
15161 -#define DISGR1_IN_INT(fifo) (1 << 16 << (fifo))
15162 -
15163 -/* Device Interrupt Source Group 2 Register (0x14C) */
15164 -#define FOTG210_DISGR2 0x14C
15165 -#define DISGR2_DMA_ERROR (1 << 8)
15166 -#define DISGR2_DMA_CMPLT (1 << 7)
15167 -#define DISGR2_RX0BYTE_INT (1 << 6)
15168 -#define DISGR2_TX0BYTE_INT (1 << 5)
15169 -#define DISGR2_ISO_SEQ_ABORT_INT (1 << 4)
15170 -#define DISGR2_ISO_SEQ_ERR_INT (1 << 3)
15171 -#define DISGR2_RESM_INT (1 << 2)
15172 -#define DISGR2_SUSP_INT (1 << 1)
15173 -#define DISGR2_USBRST_INT (1 << 0)
15174 -
15175 -/* Device Receive Zero-Length Data Packet Register (0x150)*/
15176 -#define FOTG210_RX0BYTE 0x150
15177 -#define RX0BYTE_EP8 (1 << 7)
15178 -#define RX0BYTE_EP7 (1 << 6)
15179 -#define RX0BYTE_EP6 (1 << 5)
15180 -#define RX0BYTE_EP5 (1 << 4)
15181 -#define RX0BYTE_EP4 (1 << 3)
15182 -#define RX0BYTE_EP3 (1 << 2)
15183 -#define RX0BYTE_EP2 (1 << 1)
15184 -#define RX0BYTE_EP1 (1 << 0)
15185 -
15186 -/* Device Transfer Zero-Length Data Packet Register (0x154)*/
15187 -#define FOTG210_TX0BYTE 0x154
15188 -#define TX0BYTE_EP8 (1 << 7)
15189 -#define TX0BYTE_EP7 (1 << 6)
15190 -#define TX0BYTE_EP6 (1 << 5)
15191 -#define TX0BYTE_EP5 (1 << 4)
15192 -#define TX0BYTE_EP4 (1 << 3)
15193 -#define TX0BYTE_EP3 (1 << 2)
15194 -#define TX0BYTE_EP2 (1 << 1)
15195 -#define TX0BYTE_EP1 (1 << 0)
15196 -
15197 -/* Device IN Endpoint x MaxPacketSize Register(0x160+4*(x-1)) */
15198 -#define FOTG210_INEPMPSR(ep) (0x160 + 4 * ((ep) - 1))
15199 -#define INOUTEPMPSR_MPS(mps) ((mps) & 0x2FF)
15200 -#define INOUTEPMPSR_STL_EP (1 << 11)
15201 -#define INOUTEPMPSR_RESET_TSEQ (1 << 12)
15202 -
15203 -/* Device OUT Endpoint x MaxPacketSize Register(0x180+4*(x-1)) */
15204 -#define FOTG210_OUTEPMPSR(ep) (0x180 + 4 * ((ep) - 1))
15205 -
15206 -/* Device Endpoint 1~4 Map Register (0x1A0) */
15207 -#define FOTG210_EPMAP 0x1A0
15208 -#define EPMAP_FIFONO(ep, dir) \
15209 - ((((ep) - 1) << ((ep) - 1) * 8) << ((dir) ? 0 : 4))
15210 -#define EPMAP_FIFONOMSK(ep, dir) \
15211 - ((3 << ((ep) - 1) * 8) << ((dir) ? 0 : 4))
15212 -
15213 -/* Device FIFO Map Register (0x1A8) */
15214 -#define FOTG210_FIFOMAP 0x1A8
15215 -#define FIFOMAP_DIROUT(fifo) (0x0 << 4 << (fifo) * 8)
15216 -#define FIFOMAP_DIRIN(fifo) (0x1 << 4 << (fifo) * 8)
15217 -#define FIFOMAP_BIDIR(fifo) (0x2 << 4 << (fifo) * 8)
15218 -#define FIFOMAP_NA(fifo) (0x3 << 4 << (fifo) * 8)
15219 -#define FIFOMAP_EPNO(ep) ((ep) << ((ep) - 1) * 8)
15220 -#define FIFOMAP_EPNOMSK(ep) (0xF << ((ep) - 1) * 8)
15221 -
15222 -/* Device FIFO Confuguration Register (0x1AC) */
15223 -#define FOTG210_FIFOCF 0x1AC
15224 -#define FIFOCF_TYPE(type, fifo) ((type) << (fifo) * 8)
15225 -#define FIFOCF_BLK_SIN(fifo) (0x0 << (fifo) * 8 << 2)
15226 -#define FIFOCF_BLK_DUB(fifo) (0x1 << (fifo) * 8 << 2)
15227 -#define FIFOCF_BLK_TRI(fifo) (0x2 << (fifo) * 8 << 2)
15228 -#define FIFOCF_BLKSZ_512(fifo) (0x0 << (fifo) * 8 << 4)
15229 -#define FIFOCF_BLKSZ_1024(fifo) (0x1 << (fifo) * 8 << 4)
15230 -#define FIFOCF_FIFO_EN(fifo) (0x1 << (fifo) * 8 << 5)
15231 -
15232 -/* Device FIFO n Instruction and Byte Count Register (0x1B0+4*n) */
15233 -#define FOTG210_FIBCR(fifo) (0x1B0 + (fifo) * 4)
15234 -#define FIBCR_BCFX 0x7FF
15235 -#define FIBCR_FFRST (1 << 12)
15236 -
15237 -/* Device DMA Target FIFO Number Register (0x1C0) */
15238 -#define FOTG210_DMATFNR 0x1C0
15239 -#define DMATFNR_ACC_CXF (1 << 4)
15240 -#define DMATFNR_ACC_F3 (1 << 3)
15241 -#define DMATFNR_ACC_F2 (1 << 2)
15242 -#define DMATFNR_ACC_F1 (1 << 1)
15243 -#define DMATFNR_ACC_F0 (1 << 0)
15244 -#define DMATFNR_ACC_FN(fifo) (1 << (fifo))
15245 -#define DMATFNR_DISDMA 0
15246 -
15247 -/* Device DMA Controller Parameter setting 1 Register (0x1C8) */
15248 -#define FOTG210_DMACPSR1 0x1C8
15249 -#define DMACPSR1_DMA_LEN(len) (((len) & 0xFFFF) << 8)
15250 -#define DMACPSR1_DMA_ABORT (1 << 3)
15251 -#define DMACPSR1_DMA_TYPE(dir_in) (((dir_in) ? 1 : 0) << 1)
15252 -#define DMACPSR1_DMA_START (1 << 0)
15253 -
15254 -/* Device DMA Controller Parameter setting 2 Register (0x1CC) */
15255 -#define FOTG210_DMACPSR2 0x1CC
15256 -
15257 -/* Device DMA Controller Parameter setting 3 Register (0x1CC) */
15258 -#define FOTG210_CXPORT 0x1D0
15259 -
15260 -struct fotg210_request {
15261 - struct usb_request req;
15262 - struct list_head queue;
15263 -};
15264 -
15265 -struct fotg210_ep {
15266 - struct usb_ep ep;
15267 - struct fotg210_udc *fotg210;
15268 -
15269 - struct list_head queue;
15270 - unsigned stall:1;
15271 - unsigned wedged:1;
15272 - unsigned use_dma:1;
15273 -
15274 - unsigned char epnum;
15275 - unsigned char type;
15276 - unsigned char dir_in;
15277 - unsigned int maxp;
15278 - const struct usb_endpoint_descriptor *desc;
15279 -};
15280 -
15281 -struct fotg210_udc {
15282 - spinlock_t lock; /* protect the struct */
15283 - void __iomem *reg;
15284 -
15285 - unsigned long irq_trigger;
15286 -
15287 - struct usb_gadget gadget;
15288 - struct usb_gadget_driver *driver;
15289 -
15290 - struct fotg210_ep *ep[FOTG210_MAX_NUM_EP];
15291 -
15292 - struct usb_request *ep0_req; /* for internal request */
15293 - __le16 ep0_data;
15294 - u8 ep0_dir; /* 0/0x80 out/in */
15295 -
15296 - u8 reenum; /* if re-enumeration */
15297 -};
15298 -
15299 -#define gadget_to_fotg210(g) container_of((g), struct fotg210_udc, gadget)
15300 --- a/drivers/usb/host/fotg210.h
15301 +++ /dev/null
15302 @@ -1,688 +0,0 @@
15303 -/* SPDX-License-Identifier: GPL-2.0 */
15304 -#ifndef __LINUX_FOTG210_H
15305 -#define __LINUX_FOTG210_H
15306 -
15307 -#include <linux/usb/ehci-dbgp.h>
15308 -
15309 -/* definitions used for the EHCI driver */
15310 -
15311 -/*
15312 - * __hc32 and __hc16 are "Host Controller" types, they may be equivalent to
15313 - * __leXX (normally) or __beXX (given FOTG210_BIG_ENDIAN_DESC), depending on
15314 - * the host controller implementation.
15315 - *
15316 - * To facilitate the strongest possible byte-order checking from "sparse"
15317 - * and so on, we use __leXX unless that's not practical.
15318 - */
15319 -#define __hc32 __le32
15320 -#define __hc16 __le16
15321 -
15322 -/* statistics can be kept for tuning/monitoring */
15323 -struct fotg210_stats {
15324 - /* irq usage */
15325 - unsigned long normal;
15326 - unsigned long error;
15327 - unsigned long iaa;
15328 - unsigned long lost_iaa;
15329 -
15330 - /* termination of urbs from core */
15331 - unsigned long complete;
15332 - unsigned long unlink;
15333 -};
15334 -
15335 -/* fotg210_hcd->lock guards shared data against other CPUs:
15336 - * fotg210_hcd: async, unlink, periodic (and shadow), ...
15337 - * usb_host_endpoint: hcpriv
15338 - * fotg210_qh: qh_next, qtd_list
15339 - * fotg210_qtd: qtd_list
15340 - *
15341 - * Also, hold this lock when talking to HC registers or
15342 - * when updating hw_* fields in shared qh/qtd/... structures.
15343 - */
15344 -
15345 -#define FOTG210_MAX_ROOT_PORTS 1 /* see HCS_N_PORTS */
15346 -
15347 -/*
15348 - * fotg210_rh_state values of FOTG210_RH_RUNNING or above mean that the
15349 - * controller may be doing DMA. Lower values mean there's no DMA.
15350 - */
15351 -enum fotg210_rh_state {
15352 - FOTG210_RH_HALTED,
15353 - FOTG210_RH_SUSPENDED,
15354 - FOTG210_RH_RUNNING,
15355 - FOTG210_RH_STOPPING
15356 -};
15357 -
15358 -/*
15359 - * Timer events, ordered by increasing delay length.
15360 - * Always update event_delays_ns[] and event_handlers[] (defined in
15361 - * ehci-timer.c) in parallel with this list.
15362 - */
15363 -enum fotg210_hrtimer_event {
15364 - FOTG210_HRTIMER_POLL_ASS, /* Poll for async schedule off */
15365 - FOTG210_HRTIMER_POLL_PSS, /* Poll for periodic schedule off */
15366 - FOTG210_HRTIMER_POLL_DEAD, /* Wait for dead controller to stop */
15367 - FOTG210_HRTIMER_UNLINK_INTR, /* Wait for interrupt QH unlink */
15368 - FOTG210_HRTIMER_FREE_ITDS, /* Wait for unused iTDs and siTDs */
15369 - FOTG210_HRTIMER_ASYNC_UNLINKS, /* Unlink empty async QHs */
15370 - FOTG210_HRTIMER_IAA_WATCHDOG, /* Handle lost IAA interrupts */
15371 - FOTG210_HRTIMER_DISABLE_PERIODIC, /* Wait to disable periodic sched */
15372 - FOTG210_HRTIMER_DISABLE_ASYNC, /* Wait to disable async sched */
15373 - FOTG210_HRTIMER_IO_WATCHDOG, /* Check for missing IRQs */
15374 - FOTG210_HRTIMER_NUM_EVENTS /* Must come last */
15375 -};
15376 -#define FOTG210_HRTIMER_NO_EVENT 99
15377 -
15378 -struct fotg210_hcd { /* one per controller */
15379 - /* timing support */
15380 - enum fotg210_hrtimer_event next_hrtimer_event;
15381 - unsigned enabled_hrtimer_events;
15382 - ktime_t hr_timeouts[FOTG210_HRTIMER_NUM_EVENTS];
15383 - struct hrtimer hrtimer;
15384 -
15385 - int PSS_poll_count;
15386 - int ASS_poll_count;
15387 - int died_poll_count;
15388 -
15389 - /* glue to PCI and HCD framework */
15390 - struct fotg210_caps __iomem *caps;
15391 - struct fotg210_regs __iomem *regs;
15392 - struct ehci_dbg_port __iomem *debug;
15393 -
15394 - __u32 hcs_params; /* cached register copy */
15395 - spinlock_t lock;
15396 - enum fotg210_rh_state rh_state;
15397 -
15398 - /* general schedule support */
15399 - bool scanning:1;
15400 - bool need_rescan:1;
15401 - bool intr_unlinking:1;
15402 - bool async_unlinking:1;
15403 - bool shutdown:1;
15404 - struct fotg210_qh *qh_scan_next;
15405 -
15406 - /* async schedule support */
15407 - struct fotg210_qh *async;
15408 - struct fotg210_qh *dummy; /* For AMD quirk use */
15409 - struct fotg210_qh *async_unlink;
15410 - struct fotg210_qh *async_unlink_last;
15411 - struct fotg210_qh *async_iaa;
15412 - unsigned async_unlink_cycle;
15413 - unsigned async_count; /* async activity count */
15414 -
15415 - /* periodic schedule support */
15416 -#define DEFAULT_I_TDPS 1024 /* some HCs can do less */
15417 - unsigned periodic_size;
15418 - __hc32 *periodic; /* hw periodic table */
15419 - dma_addr_t periodic_dma;
15420 - struct list_head intr_qh_list;
15421 - unsigned i_thresh; /* uframes HC might cache */
15422 -
15423 - union fotg210_shadow *pshadow; /* mirror hw periodic table */
15424 - struct fotg210_qh *intr_unlink;
15425 - struct fotg210_qh *intr_unlink_last;
15426 - unsigned intr_unlink_cycle;
15427 - unsigned now_frame; /* frame from HC hardware */
15428 - unsigned next_frame; /* scan periodic, start here */
15429 - unsigned intr_count; /* intr activity count */
15430 - unsigned isoc_count; /* isoc activity count */
15431 - unsigned periodic_count; /* periodic activity count */
15432 - /* max periodic time per uframe */
15433 - unsigned uframe_periodic_max;
15434 -
15435 -
15436 - /* list of itds completed while now_frame was still active */
15437 - struct list_head cached_itd_list;
15438 - struct fotg210_itd *last_itd_to_free;
15439 -
15440 - /* per root hub port */
15441 - unsigned long reset_done[FOTG210_MAX_ROOT_PORTS];
15442 -
15443 - /* bit vectors (one bit per port)
15444 - * which ports were already suspended at the start of a bus suspend
15445 - */
15446 - unsigned long bus_suspended;
15447 -
15448 - /* which ports are edicated to the companion controller */
15449 - unsigned long companion_ports;
15450 -
15451 - /* which ports are owned by the companion during a bus suspend */
15452 - unsigned long owned_ports;
15453 -
15454 - /* which ports have the change-suspend feature turned on */
15455 - unsigned long port_c_suspend;
15456 -
15457 - /* which ports are suspended */
15458 - unsigned long suspended_ports;
15459 -
15460 - /* which ports have started to resume */
15461 - unsigned long resuming_ports;
15462 -
15463 - /* per-HC memory pools (could be per-bus, but ...) */
15464 - struct dma_pool *qh_pool; /* qh per active urb */
15465 - struct dma_pool *qtd_pool; /* one or more per qh */
15466 - struct dma_pool *itd_pool; /* itd per iso urb */
15467 -
15468 - unsigned random_frame;
15469 - unsigned long next_statechange;
15470 - ktime_t last_periodic_enable;
15471 - u32 command;
15472 -
15473 - /* SILICON QUIRKS */
15474 - unsigned need_io_watchdog:1;
15475 - unsigned fs_i_thresh:1; /* Intel iso scheduling */
15476 -
15477 - u8 sbrn; /* packed release number */
15478 -
15479 - /* irq statistics */
15480 -#ifdef FOTG210_STATS
15481 - struct fotg210_stats stats;
15482 -# define INCR(x) ((x)++)
15483 -#else
15484 -# define INCR(x) do {} while (0)
15485 -#endif
15486 -
15487 - /* silicon clock */
15488 - struct clk *pclk;
15489 -};
15490 -
15491 -/* convert between an HCD pointer and the corresponding FOTG210_HCD */
15492 -static inline struct fotg210_hcd *hcd_to_fotg210(struct usb_hcd *hcd)
15493 -{
15494 - return (struct fotg210_hcd *)(hcd->hcd_priv);
15495 -}
15496 -static inline struct usb_hcd *fotg210_to_hcd(struct fotg210_hcd *fotg210)
15497 -{
15498 - return container_of((void *) fotg210, struct usb_hcd, hcd_priv);
15499 -}
15500 -
15501 -/*-------------------------------------------------------------------------*/
15502 -
15503 -/* EHCI register interface, corresponds to EHCI Revision 0.95 specification */
15504 -
15505 -/* Section 2.2 Host Controller Capability Registers */
15506 -struct fotg210_caps {
15507 - /* these fields are specified as 8 and 16 bit registers,
15508 - * but some hosts can't perform 8 or 16 bit PCI accesses.
15509 - * some hosts treat caplength and hciversion as parts of a 32-bit
15510 - * register, others treat them as two separate registers, this
15511 - * affects the memory map for big endian controllers.
15512 - */
15513 - u32 hc_capbase;
15514 -#define HC_LENGTH(fotg210, p) (0x00ff&((p) >> /* bits 7:0 / offset 00h */ \
15515 - (fotg210_big_endian_capbase(fotg210) ? 24 : 0)))
15516 -#define HC_VERSION(fotg210, p) (0xffff&((p) >> /* bits 31:16 / offset 02h */ \
15517 - (fotg210_big_endian_capbase(fotg210) ? 0 : 16)))
15518 - u32 hcs_params; /* HCSPARAMS - offset 0x4 */
15519 -#define HCS_N_PORTS(p) (((p)>>0)&0xf) /* bits 3:0, ports on HC */
15520 -
15521 - u32 hcc_params; /* HCCPARAMS - offset 0x8 */
15522 -#define HCC_CANPARK(p) ((p)&(1 << 2)) /* true: can park on async qh */
15523 -#define HCC_PGM_FRAMELISTLEN(p) ((p)&(1 << 1)) /* true: periodic_size changes*/
15524 - u8 portroute[8]; /* nibbles for routing - offset 0xC */
15525 -};
15526 -
15527 -
15528 -/* Section 2.3 Host Controller Operational Registers */
15529 -struct fotg210_regs {
15530 -
15531 - /* USBCMD: offset 0x00 */
15532 - u32 command;
15533 -
15534 -/* EHCI 1.1 addendum */
15535 -/* 23:16 is r/w intr rate, in microframes; default "8" == 1/msec */
15536 -#define CMD_PARK (1<<11) /* enable "park" on async qh */
15537 -#define CMD_PARK_CNT(c) (((c)>>8)&3) /* how many transfers to park for */
15538 -#define CMD_IAAD (1<<6) /* "doorbell" interrupt async advance */
15539 -#define CMD_ASE (1<<5) /* async schedule enable */
15540 -#define CMD_PSE (1<<4) /* periodic schedule enable */
15541 -/* 3:2 is periodic frame list size */
15542 -#define CMD_RESET (1<<1) /* reset HC not bus */
15543 -#define CMD_RUN (1<<0) /* start/stop HC */
15544 -
15545 - /* USBSTS: offset 0x04 */
15546 - u32 status;
15547 -#define STS_ASS (1<<15) /* Async Schedule Status */
15548 -#define STS_PSS (1<<14) /* Periodic Schedule Status */
15549 -#define STS_RECL (1<<13) /* Reclamation */
15550 -#define STS_HALT (1<<12) /* Not running (any reason) */
15551 -/* some bits reserved */
15552 - /* these STS_* flags are also intr_enable bits (USBINTR) */
15553 -#define STS_IAA (1<<5) /* Interrupted on async advance */
15554 -#define STS_FATAL (1<<4) /* such as some PCI access errors */
15555 -#define STS_FLR (1<<3) /* frame list rolled over */
15556 -#define STS_PCD (1<<2) /* port change detect */
15557 -#define STS_ERR (1<<1) /* "error" completion (overflow, ...) */
15558 -#define STS_INT (1<<0) /* "normal" completion (short, ...) */
15559 -
15560 - /* USBINTR: offset 0x08 */
15561 - u32 intr_enable;
15562 -
15563 - /* FRINDEX: offset 0x0C */
15564 - u32 frame_index; /* current microframe number */
15565 - /* CTRLDSSEGMENT: offset 0x10 */
15566 - u32 segment; /* address bits 63:32 if needed */
15567 - /* PERIODICLISTBASE: offset 0x14 */
15568 - u32 frame_list; /* points to periodic list */
15569 - /* ASYNCLISTADDR: offset 0x18 */
15570 - u32 async_next; /* address of next async queue head */
15571 -
15572 - u32 reserved1;
15573 - /* PORTSC: offset 0x20 */
15574 - u32 port_status;
15575 -/* 31:23 reserved */
15576 -#define PORT_USB11(x) (((x)&(3<<10)) == (1<<10)) /* USB 1.1 device */
15577 -#define PORT_RESET (1<<8) /* reset port */
15578 -#define PORT_SUSPEND (1<<7) /* suspend port */
15579 -#define PORT_RESUME (1<<6) /* resume it */
15580 -#define PORT_PEC (1<<3) /* port enable change */
15581 -#define PORT_PE (1<<2) /* port enable */
15582 -#define PORT_CSC (1<<1) /* connect status change */
15583 -#define PORT_CONNECT (1<<0) /* device connected */
15584 -#define PORT_RWC_BITS (PORT_CSC | PORT_PEC)
15585 - u32 reserved2[19];
15586 -
15587 - /* OTGCSR: offet 0x70 */
15588 - u32 otgcsr;
15589 -#define OTGCSR_HOST_SPD_TYP (3 << 22)
15590 -#define OTGCSR_A_BUS_DROP (1 << 5)
15591 -#define OTGCSR_A_BUS_REQ (1 << 4)
15592 -
15593 - /* OTGISR: offset 0x74 */
15594 - u32 otgisr;
15595 -#define OTGISR_OVC (1 << 10)
15596 -
15597 - u32 reserved3[15];
15598 -
15599 - /* GMIR: offset 0xB4 */
15600 - u32 gmir;
15601 -#define GMIR_INT_POLARITY (1 << 3) /*Active High*/
15602 -#define GMIR_MHC_INT (1 << 2)
15603 -#define GMIR_MOTG_INT (1 << 1)
15604 -#define GMIR_MDEV_INT (1 << 0)
15605 -};
15606 -
15607 -/*-------------------------------------------------------------------------*/
15608 -
15609 -#define QTD_NEXT(fotg210, dma) cpu_to_hc32(fotg210, (u32)dma)
15610 -
15611 -/*
15612 - * EHCI Specification 0.95 Section 3.5
15613 - * QTD: describe data transfer components (buffer, direction, ...)
15614 - * See Fig 3-6 "Queue Element Transfer Descriptor Block Diagram".
15615 - *
15616 - * These are associated only with "QH" (Queue Head) structures,
15617 - * used with control, bulk, and interrupt transfers.
15618 - */
15619 -struct fotg210_qtd {
15620 - /* first part defined by EHCI spec */
15621 - __hc32 hw_next; /* see EHCI 3.5.1 */
15622 - __hc32 hw_alt_next; /* see EHCI 3.5.2 */
15623 - __hc32 hw_token; /* see EHCI 3.5.3 */
15624 -#define QTD_TOGGLE (1 << 31) /* data toggle */
15625 -#define QTD_LENGTH(tok) (((tok)>>16) & 0x7fff)
15626 -#define QTD_IOC (1 << 15) /* interrupt on complete */
15627 -#define QTD_CERR(tok) (((tok)>>10) & 0x3)
15628 -#define QTD_PID(tok) (((tok)>>8) & 0x3)
15629 -#define QTD_STS_ACTIVE (1 << 7) /* HC may execute this */
15630 -#define QTD_STS_HALT (1 << 6) /* halted on error */
15631 -#define QTD_STS_DBE (1 << 5) /* data buffer error (in HC) */
15632 -#define QTD_STS_BABBLE (1 << 4) /* device was babbling (qtd halted) */
15633 -#define QTD_STS_XACT (1 << 3) /* device gave illegal response */
15634 -#define QTD_STS_MMF (1 << 2) /* incomplete split transaction */
15635 -#define QTD_STS_STS (1 << 1) /* split transaction state */
15636 -#define QTD_STS_PING (1 << 0) /* issue PING? */
15637 -
15638 -#define ACTIVE_BIT(fotg210) cpu_to_hc32(fotg210, QTD_STS_ACTIVE)
15639 -#define HALT_BIT(fotg210) cpu_to_hc32(fotg210, QTD_STS_HALT)
15640 -#define STATUS_BIT(fotg210) cpu_to_hc32(fotg210, QTD_STS_STS)
15641 -
15642 - __hc32 hw_buf[5]; /* see EHCI 3.5.4 */
15643 - __hc32 hw_buf_hi[5]; /* Appendix B */
15644 -
15645 - /* the rest is HCD-private */
15646 - dma_addr_t qtd_dma; /* qtd address */
15647 - struct list_head qtd_list; /* sw qtd list */
15648 - struct urb *urb; /* qtd's urb */
15649 - size_t length; /* length of buffer */
15650 -} __aligned(32);
15651 -
15652 -/* mask NakCnt+T in qh->hw_alt_next */
15653 -#define QTD_MASK(fotg210) cpu_to_hc32(fotg210, ~0x1f)
15654 -
15655 -#define IS_SHORT_READ(token) (QTD_LENGTH(token) != 0 && QTD_PID(token) == 1)
15656 -
15657 -/*-------------------------------------------------------------------------*/
15658 -
15659 -/* type tag from {qh,itd,fstn}->hw_next */
15660 -#define Q_NEXT_TYPE(fotg210, dma) ((dma) & cpu_to_hc32(fotg210, 3 << 1))
15661 -
15662 -/*
15663 - * Now the following defines are not converted using the
15664 - * cpu_to_le32() macro anymore, since we have to support
15665 - * "dynamic" switching between be and le support, so that the driver
15666 - * can be used on one system with SoC EHCI controller using big-endian
15667 - * descriptors as well as a normal little-endian PCI EHCI controller.
15668 - */
15669 -/* values for that type tag */
15670 -#define Q_TYPE_ITD (0 << 1)
15671 -#define Q_TYPE_QH (1 << 1)
15672 -#define Q_TYPE_SITD (2 << 1)
15673 -#define Q_TYPE_FSTN (3 << 1)
15674 -
15675 -/* next async queue entry, or pointer to interrupt/periodic QH */
15676 -#define QH_NEXT(fotg210, dma) \
15677 - (cpu_to_hc32(fotg210, (((u32)dma)&~0x01f)|Q_TYPE_QH))
15678 -
15679 -/* for periodic/async schedules and qtd lists, mark end of list */
15680 -#define FOTG210_LIST_END(fotg210) \
15681 - cpu_to_hc32(fotg210, 1) /* "null pointer" to hw */
15682 -
15683 -/*
15684 - * Entries in periodic shadow table are pointers to one of four kinds
15685 - * of data structure. That's dictated by the hardware; a type tag is
15686 - * encoded in the low bits of the hardware's periodic schedule. Use
15687 - * Q_NEXT_TYPE to get the tag.
15688 - *
15689 - * For entries in the async schedule, the type tag always says "qh".
15690 - */
15691 -union fotg210_shadow {
15692 - struct fotg210_qh *qh; /* Q_TYPE_QH */
15693 - struct fotg210_itd *itd; /* Q_TYPE_ITD */
15694 - struct fotg210_fstn *fstn; /* Q_TYPE_FSTN */
15695 - __hc32 *hw_next; /* (all types) */
15696 - void *ptr;
15697 -};
15698 -
15699 -/*-------------------------------------------------------------------------*/
15700 -
15701 -/*
15702 - * EHCI Specification 0.95 Section 3.6
15703 - * QH: describes control/bulk/interrupt endpoints
15704 - * See Fig 3-7 "Queue Head Structure Layout".
15705 - *
15706 - * These appear in both the async and (for interrupt) periodic schedules.
15707 - */
15708 -
15709 -/* first part defined by EHCI spec */
15710 -struct fotg210_qh_hw {
15711 - __hc32 hw_next; /* see EHCI 3.6.1 */
15712 - __hc32 hw_info1; /* see EHCI 3.6.2 */
15713 -#define QH_CONTROL_EP (1 << 27) /* FS/LS control endpoint */
15714 -#define QH_HEAD (1 << 15) /* Head of async reclamation list */
15715 -#define QH_TOGGLE_CTL (1 << 14) /* Data toggle control */
15716 -#define QH_HIGH_SPEED (2 << 12) /* Endpoint speed */
15717 -#define QH_LOW_SPEED (1 << 12)
15718 -#define QH_FULL_SPEED (0 << 12)
15719 -#define QH_INACTIVATE (1 << 7) /* Inactivate on next transaction */
15720 - __hc32 hw_info2; /* see EHCI 3.6.2 */
15721 -#define QH_SMASK 0x000000ff
15722 -#define QH_CMASK 0x0000ff00
15723 -#define QH_HUBADDR 0x007f0000
15724 -#define QH_HUBPORT 0x3f800000
15725 -#define QH_MULT 0xc0000000
15726 - __hc32 hw_current; /* qtd list - see EHCI 3.6.4 */
15727 -
15728 - /* qtd overlay (hardware parts of a struct fotg210_qtd) */
15729 - __hc32 hw_qtd_next;
15730 - __hc32 hw_alt_next;
15731 - __hc32 hw_token;
15732 - __hc32 hw_buf[5];
15733 - __hc32 hw_buf_hi[5];
15734 -} __aligned(32);
15735 -
15736 -struct fotg210_qh {
15737 - struct fotg210_qh_hw *hw; /* Must come first */
15738 - /* the rest is HCD-private */
15739 - dma_addr_t qh_dma; /* address of qh */
15740 - union fotg210_shadow qh_next; /* ptr to qh; or periodic */
15741 - struct list_head qtd_list; /* sw qtd list */
15742 - struct list_head intr_node; /* list of intr QHs */
15743 - struct fotg210_qtd *dummy;
15744 - struct fotg210_qh *unlink_next; /* next on unlink list */
15745 -
15746 - unsigned unlink_cycle;
15747 -
15748 - u8 needs_rescan; /* Dequeue during giveback */
15749 - u8 qh_state;
15750 -#define QH_STATE_LINKED 1 /* HC sees this */
15751 -#define QH_STATE_UNLINK 2 /* HC may still see this */
15752 -#define QH_STATE_IDLE 3 /* HC doesn't see this */
15753 -#define QH_STATE_UNLINK_WAIT 4 /* LINKED and on unlink q */
15754 -#define QH_STATE_COMPLETING 5 /* don't touch token.HALT */
15755 -
15756 - u8 xacterrs; /* XactErr retry counter */
15757 -#define QH_XACTERR_MAX 32 /* XactErr retry limit */
15758 -
15759 - /* periodic schedule info */
15760 - u8 usecs; /* intr bandwidth */
15761 - u8 gap_uf; /* uframes split/csplit gap */
15762 - u8 c_usecs; /* ... split completion bw */
15763 - u16 tt_usecs; /* tt downstream bandwidth */
15764 - unsigned short period; /* polling interval */
15765 - unsigned short start; /* where polling starts */
15766 -#define NO_FRAME ((unsigned short)~0) /* pick new start */
15767 -
15768 - struct usb_device *dev; /* access to TT */
15769 - unsigned is_out:1; /* bulk or intr OUT */
15770 - unsigned clearing_tt:1; /* Clear-TT-Buf in progress */
15771 -};
15772 -
15773 -/*-------------------------------------------------------------------------*/
15774 -
15775 -/* description of one iso transaction (up to 3 KB data if highspeed) */
15776 -struct fotg210_iso_packet {
15777 - /* These will be copied to iTD when scheduling */
15778 - u64 bufp; /* itd->hw_bufp{,_hi}[pg] |= */
15779 - __hc32 transaction; /* itd->hw_transaction[i] |= */
15780 - u8 cross; /* buf crosses pages */
15781 - /* for full speed OUT splits */
15782 - u32 buf1;
15783 -};
15784 -
15785 -/* temporary schedule data for packets from iso urbs (both speeds)
15786 - * each packet is one logical usb transaction to the device (not TT),
15787 - * beginning at stream->next_uframe
15788 - */
15789 -struct fotg210_iso_sched {
15790 - struct list_head td_list;
15791 - unsigned span;
15792 - struct fotg210_iso_packet packet[];
15793 -};
15794 -
15795 -/*
15796 - * fotg210_iso_stream - groups all (s)itds for this endpoint.
15797 - * acts like a qh would, if EHCI had them for ISO.
15798 - */
15799 -struct fotg210_iso_stream {
15800 - /* first field matches fotg210_hq, but is NULL */
15801 - struct fotg210_qh_hw *hw;
15802 -
15803 - u8 bEndpointAddress;
15804 - u8 highspeed;
15805 - struct list_head td_list; /* queued itds */
15806 - struct list_head free_list; /* list of unused itds */
15807 - struct usb_device *udev;
15808 - struct usb_host_endpoint *ep;
15809 -
15810 - /* output of (re)scheduling */
15811 - int next_uframe;
15812 - __hc32 splits;
15813 -
15814 - /* the rest is derived from the endpoint descriptor,
15815 - * trusting urb->interval == f(epdesc->bInterval) and
15816 - * including the extra info for hw_bufp[0..2]
15817 - */
15818 - u8 usecs, c_usecs;
15819 - u16 interval;
15820 - u16 tt_usecs;
15821 - u16 maxp;
15822 - u16 raw_mask;
15823 - unsigned bandwidth;
15824 -
15825 - /* This is used to initialize iTD's hw_bufp fields */
15826 - __hc32 buf0;
15827 - __hc32 buf1;
15828 - __hc32 buf2;
15829 -
15830 - /* this is used to initialize sITD's tt info */
15831 - __hc32 address;
15832 -};
15833 -
15834 -/*-------------------------------------------------------------------------*/
15835 -
15836 -/*
15837 - * EHCI Specification 0.95 Section 3.3
15838 - * Fig 3-4 "Isochronous Transaction Descriptor (iTD)"
15839 - *
15840 - * Schedule records for high speed iso xfers
15841 - */
15842 -struct fotg210_itd {
15843 - /* first part defined by EHCI spec */
15844 - __hc32 hw_next; /* see EHCI 3.3.1 */
15845 - __hc32 hw_transaction[8]; /* see EHCI 3.3.2 */
15846 -#define FOTG210_ISOC_ACTIVE (1<<31) /* activate transfer this slot */
15847 -#define FOTG210_ISOC_BUF_ERR (1<<30) /* Data buffer error */
15848 -#define FOTG210_ISOC_BABBLE (1<<29) /* babble detected */
15849 -#define FOTG210_ISOC_XACTERR (1<<28) /* XactErr - transaction error */
15850 -#define FOTG210_ITD_LENGTH(tok) (((tok)>>16) & 0x0fff)
15851 -#define FOTG210_ITD_IOC (1 << 15) /* interrupt on complete */
15852 -
15853 -#define ITD_ACTIVE(fotg210) cpu_to_hc32(fotg210, FOTG210_ISOC_ACTIVE)
15854 -
15855 - __hc32 hw_bufp[7]; /* see EHCI 3.3.3 */
15856 - __hc32 hw_bufp_hi[7]; /* Appendix B */
15857 -
15858 - /* the rest is HCD-private */
15859 - dma_addr_t itd_dma; /* for this itd */
15860 - union fotg210_shadow itd_next; /* ptr to periodic q entry */
15861 -
15862 - struct urb *urb;
15863 - struct fotg210_iso_stream *stream; /* endpoint's queue */
15864 - struct list_head itd_list; /* list of stream's itds */
15865 -
15866 - /* any/all hw_transactions here may be used by that urb */
15867 - unsigned frame; /* where scheduled */
15868 - unsigned pg;
15869 - unsigned index[8]; /* in urb->iso_frame_desc */
15870 -} __aligned(32);
15871 -
15872 -/*-------------------------------------------------------------------------*/
15873 -
15874 -/*
15875 - * EHCI Specification 0.96 Section 3.7
15876 - * Periodic Frame Span Traversal Node (FSTN)
15877 - *
15878 - * Manages split interrupt transactions (using TT) that span frame boundaries
15879 - * into uframes 0/1; see 4.12.2.2. In those uframes, a "save place" FSTN
15880 - * makes the HC jump (back) to a QH to scan for fs/ls QH completions until
15881 - * it hits a "restore" FSTN; then it returns to finish other uframe 0/1 work.
15882 - */
15883 -struct fotg210_fstn {
15884 - __hc32 hw_next; /* any periodic q entry */
15885 - __hc32 hw_prev; /* qh or FOTG210_LIST_END */
15886 -
15887 - /* the rest is HCD-private */
15888 - dma_addr_t fstn_dma;
15889 - union fotg210_shadow fstn_next; /* ptr to periodic q entry */
15890 -} __aligned(32);
15891 -
15892 -/*-------------------------------------------------------------------------*/
15893 -
15894 -/* Prepare the PORTSC wakeup flags during controller suspend/resume */
15895 -
15896 -#define fotg210_prepare_ports_for_controller_suspend(fotg210, do_wakeup) \
15897 - fotg210_adjust_port_wakeup_flags(fotg210, true, do_wakeup)
15898 -
15899 -#define fotg210_prepare_ports_for_controller_resume(fotg210) \
15900 - fotg210_adjust_port_wakeup_flags(fotg210, false, false)
15901 -
15902 -/*-------------------------------------------------------------------------*/
15903 -
15904 -/*
15905 - * Some EHCI controllers have a Transaction Translator built into the
15906 - * root hub. This is a non-standard feature. Each controller will need
15907 - * to add code to the following inline functions, and call them as
15908 - * needed (mostly in root hub code).
15909 - */
15910 -
15911 -static inline unsigned int
15912 -fotg210_get_speed(struct fotg210_hcd *fotg210, unsigned int portsc)
15913 -{
15914 - return (readl(&fotg210->regs->otgcsr)
15915 - & OTGCSR_HOST_SPD_TYP) >> 22;
15916 -}
15917 -
15918 -/* Returns the speed of a device attached to a port on the root hub. */
15919 -static inline unsigned int
15920 -fotg210_port_speed(struct fotg210_hcd *fotg210, unsigned int portsc)
15921 -{
15922 - switch (fotg210_get_speed(fotg210, portsc)) {
15923 - case 0:
15924 - return 0;
15925 - case 1:
15926 - return USB_PORT_STAT_LOW_SPEED;
15927 - case 2:
15928 - default:
15929 - return USB_PORT_STAT_HIGH_SPEED;
15930 - }
15931 -}
15932 -
15933 -/*-------------------------------------------------------------------------*/
15934 -
15935 -#define fotg210_has_fsl_portno_bug(e) (0)
15936 -
15937 -/*
15938 - * While most USB host controllers implement their registers in
15939 - * little-endian format, a minority (celleb companion chip) implement
15940 - * them in big endian format.
15941 - *
15942 - * This attempts to support either format at compile time without a
15943 - * runtime penalty, or both formats with the additional overhead
15944 - * of checking a flag bit.
15945 - *
15946 - */
15947 -
15948 -#define fotg210_big_endian_mmio(e) 0
15949 -#define fotg210_big_endian_capbase(e) 0
15950 -
15951 -static inline unsigned int fotg210_readl(const struct fotg210_hcd *fotg210,
15952 - __u32 __iomem *regs)
15953 -{
15954 - return readl(regs);
15955 -}
15956 -
15957 -static inline void fotg210_writel(const struct fotg210_hcd *fotg210,
15958 - const unsigned int val, __u32 __iomem *regs)
15959 -{
15960 - writel(val, regs);
15961 -}
15962 -
15963 -/* cpu to fotg210 */
15964 -static inline __hc32 cpu_to_hc32(const struct fotg210_hcd *fotg210, const u32 x)
15965 -{
15966 - return cpu_to_le32(x);
15967 -}
15968 -
15969 -/* fotg210 to cpu */
15970 -static inline u32 hc32_to_cpu(const struct fotg210_hcd *fotg210, const __hc32 x)
15971 -{
15972 - return le32_to_cpu(x);
15973 -}
15974 -
15975 -static inline u32 hc32_to_cpup(const struct fotg210_hcd *fotg210,
15976 - const __hc32 *x)
15977 -{
15978 - return le32_to_cpup(x);
15979 -}
15980 -
15981 -/*-------------------------------------------------------------------------*/
15982 -
15983 -static inline unsigned fotg210_read_frame_index(struct fotg210_hcd *fotg210)
15984 -{
15985 - return fotg210_readl(fotg210, &fotg210->regs->frame_index);
15986 -}
15987 -
15988 -/*-------------------------------------------------------------------------*/
15989 -
15990 -#endif /* __LINUX_FOTG210_H */