ar71xx: rb95x: use correct SPI flash address
[openwrt/staging/lynxis/omap.git] / target / linux / cns3xxx / files / drivers / usb / dwc / otg_cil.c
1 /* ==========================================================================
2 * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_cil.c $
3 * $Revision: #147 $
4 * $Date: 2008/10/16 $
5 * $Change: 1117667 $
6 *
7 * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
8 * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
9 * otherwise expressly agreed to in writing between Synopsys and you.
10 *
11 * The Software IS NOT an item of Licensed Software or Licensed Product under
12 * any End User Software License Agreement or Agreement for Licensed Product
13 * with Synopsys or any supplement thereto. You are permitted to use and
14 * redistribute this Software in source and binary forms, with or without
15 * modification, provided that redistributions of source code must retain this
16 * notice. You may not view, use, disclose, copy or distribute this file or
17 * any information contained herein except pursuant to this license grant from
18 * Synopsys. If you do not agree with this notice, including the disclaimer
19 * below, then you are not authorized to use the Software.
20 *
21 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
25 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
31 * DAMAGE.
32 * ========================================================================== */
33
34 /** @file
35 *
36 * The Core Interface Layer provides basic services for accessing and
37 * managing the DWC_otg hardware. These services are used by both the
38 * Host Controller Driver and the Peripheral Controller Driver.
39 *
40 * The CIL manages the memory map for the core so that the HCD and PCD
41 * don't have to do this separately. It also handles basic tasks like
42 * reading/writing the registers and data FIFOs in the controller.
43 * Some of the data access functions provide encapsulation of several
44 * operations required to perform a task, such as writing multiple
45 * registers to start a transfer. Finally, the CIL performs basic
46 * services that are not specific to either the host or device modes
47 * of operation. These services include management of the OTG Host
48 * Negotiation Protocol (HNP) and Session Request Protocol (SRP). A
49 * Diagnostic API is also provided to allow testing of the controller
50 * hardware.
51 *
52 * The Core Interface Layer has the following requirements:
53 * - Provides basic controller operations.
54 * - Minimal use of OS services.
55 * - The OS services used will be abstracted by using inline functions
56 * or macros.
57 *
58 */
59 #include <asm/unaligned.h>
60 #include <linux/dma-mapping.h>
61 #ifdef DEBUG
62 #include <linux/jiffies.h>
63 #endif
64
65 #include "otg_plat.h"
66 #include "otg_regs.h"
67 #include "otg_cil.h"
68 #include "otg_pcd.h"
69
70
71 /**
72 * This function is called to initialize the DWC_otg CSR data
73 * structures. The register addresses in the device and host
74 * structures are initialized from the base address supplied by the
75 * caller. The calling function must make the OS calls to get the
76 * base address of the DWC_otg controller registers. The core_params
77 * argument holds the parameters that specify how the core should be
78 * configured.
79 *
80 * @param[in] reg_base_addr Base address of DWC_otg core registers
81 * @param[in] core_params Pointer to the core configuration parameters
82 *
83 */
84 dwc_otg_core_if_t *dwc_otg_cil_init(const uint32_t *reg_base_addr,
85 dwc_otg_core_params_t *core_params)
86 {
87 dwc_otg_core_if_t *core_if = 0;
88 dwc_otg_dev_if_t *dev_if = 0;
89 dwc_otg_host_if_t *host_if = 0;
90 uint8_t *reg_base = (uint8_t *)reg_base_addr;
91 int i = 0;
92
93 DWC_DEBUGPL(DBG_CILV, "%s(%p,%p)\n", __func__, reg_base_addr, core_params);
94
95 core_if = kmalloc(sizeof(dwc_otg_core_if_t), GFP_KERNEL);
96
97 if (core_if == 0) {
98 DWC_DEBUGPL(DBG_CIL, "Allocation of dwc_otg_core_if_t failed\n");
99 return 0;
100 }
101
102 memset(core_if, 0, sizeof(dwc_otg_core_if_t));
103
104 core_if->core_params = core_params;
105 core_if->core_global_regs = (dwc_otg_core_global_regs_t *)reg_base;
106
107 /*
108 * Allocate the Device Mode structures.
109 */
110 dev_if = kmalloc(sizeof(dwc_otg_dev_if_t), GFP_KERNEL);
111
112 if (dev_if == 0) {
113 DWC_DEBUGPL(DBG_CIL, "Allocation of dwc_otg_dev_if_t failed\n");
114 kfree(core_if);
115 return 0;
116 }
117
118 dev_if->dev_global_regs =
119 (dwc_otg_device_global_regs_t *)(reg_base + DWC_DEV_GLOBAL_REG_OFFSET);
120
121 for (i=0; i<MAX_EPS_CHANNELS; i++)
122 {
123 dev_if->in_ep_regs[i] = (dwc_otg_dev_in_ep_regs_t *)
124 (reg_base + DWC_DEV_IN_EP_REG_OFFSET +
125 (i * DWC_EP_REG_OFFSET));
126
127 dev_if->out_ep_regs[i] = (dwc_otg_dev_out_ep_regs_t *)
128 (reg_base + DWC_DEV_OUT_EP_REG_OFFSET +
129 (i * DWC_EP_REG_OFFSET));
130 DWC_DEBUGPL(DBG_CILV, "in_ep_regs[%d]->diepctl=%p\n",
131 i, &dev_if->in_ep_regs[i]->diepctl);
132 DWC_DEBUGPL(DBG_CILV, "out_ep_regs[%d]->doepctl=%p\n",
133 i, &dev_if->out_ep_regs[i]->doepctl);
134 }
135
136 dev_if->speed = 0; // unknown
137
138 core_if->dev_if = dev_if;
139
140 /*
141 * Allocate the Host Mode structures.
142 */
143 host_if = kmalloc(sizeof(dwc_otg_host_if_t), GFP_KERNEL);
144
145 if (host_if == 0) {
146 DWC_DEBUGPL(DBG_CIL, "Allocation of dwc_otg_host_if_t failed\n");
147 kfree(dev_if);
148 kfree(core_if);
149 return 0;
150 }
151
152 host_if->host_global_regs = (dwc_otg_host_global_regs_t *)
153 (reg_base + DWC_OTG_HOST_GLOBAL_REG_OFFSET);
154
155 host_if->hprt0 = (uint32_t*)(reg_base + DWC_OTG_HOST_PORT_REGS_OFFSET);
156
157 for (i=0; i<MAX_EPS_CHANNELS; i++)
158 {
159 host_if->hc_regs[i] = (dwc_otg_hc_regs_t *)
160 (reg_base + DWC_OTG_HOST_CHAN_REGS_OFFSET +
161 (i * DWC_OTG_CHAN_REGS_OFFSET));
162 DWC_DEBUGPL(DBG_CILV, "hc_reg[%d]->hcchar=%p\n",
163 i, &host_if->hc_regs[i]->hcchar);
164 }
165
166 host_if->num_host_channels = MAX_EPS_CHANNELS;
167 core_if->host_if = host_if;
168
169 for (i=0; i<MAX_EPS_CHANNELS; i++)
170 {
171 core_if->data_fifo[i] =
172 (uint32_t *)(reg_base + DWC_OTG_DATA_FIFO_OFFSET +
173 (i * DWC_OTG_DATA_FIFO_SIZE));
174 DWC_DEBUGPL(DBG_CILV, "data_fifo[%d]=0x%08x\n",
175 i, (unsigned)core_if->data_fifo[i]);
176 }
177
178 core_if->pcgcctl = (uint32_t*)(reg_base + DWC_OTG_PCGCCTL_OFFSET);
179
180 /*
181 * Store the contents of the hardware configuration registers here for
182 * easy access later.
183 */
184 core_if->hwcfg1.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg1);
185 core_if->hwcfg2.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg2);
186 core_if->hwcfg3.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg3);
187 core_if->hwcfg4.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg4);
188
189 DWC_DEBUGPL(DBG_CILV,"hwcfg1=%08x\n",core_if->hwcfg1.d32);
190 DWC_DEBUGPL(DBG_CILV,"hwcfg2=%08x\n",core_if->hwcfg2.d32);
191 DWC_DEBUGPL(DBG_CILV,"hwcfg3=%08x\n",core_if->hwcfg3.d32);
192 DWC_DEBUGPL(DBG_CILV,"hwcfg4=%08x\n",core_if->hwcfg4.d32);
193
194 core_if->hcfg.d32 = dwc_read_reg32(&core_if->host_if->host_global_regs->hcfg);
195 core_if->dcfg.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dcfg);
196
197 DWC_DEBUGPL(DBG_CILV,"hcfg=%08x\n",core_if->hcfg.d32);
198 DWC_DEBUGPL(DBG_CILV,"dcfg=%08x\n",core_if->dcfg.d32);
199
200 DWC_DEBUGPL(DBG_CILV,"op_mode=%0x\n",core_if->hwcfg2.b.op_mode);
201 DWC_DEBUGPL(DBG_CILV,"arch=%0x\n",core_if->hwcfg2.b.architecture);
202 DWC_DEBUGPL(DBG_CILV,"num_dev_ep=%d\n",core_if->hwcfg2.b.num_dev_ep);
203 DWC_DEBUGPL(DBG_CILV,"num_host_chan=%d\n",core_if->hwcfg2.b.num_host_chan);
204 DWC_DEBUGPL(DBG_CILV,"nonperio_tx_q_depth=0x%0x\n",core_if->hwcfg2.b.nonperio_tx_q_depth);
205 DWC_DEBUGPL(DBG_CILV,"host_perio_tx_q_depth=0x%0x\n",core_if->hwcfg2.b.host_perio_tx_q_depth);
206 DWC_DEBUGPL(DBG_CILV,"dev_token_q_depth=0x%0x\n",core_if->hwcfg2.b.dev_token_q_depth);
207
208 DWC_DEBUGPL(DBG_CILV,"Total FIFO SZ=%d\n", core_if->hwcfg3.b.dfifo_depth);
209 DWC_DEBUGPL(DBG_CILV,"xfer_size_cntr_width=%0x\n", core_if->hwcfg3.b.xfer_size_cntr_width);
210
211 /*
212 * Set the SRP sucess bit for FS-I2c
213 */
214 core_if->srp_success = 0;
215 core_if->srp_timer_started = 0;
216
217
218 /*
219 * Create new workqueue and init works
220 */
221 core_if->wq_otg = create_singlethread_workqueue("dwc_otg");
222 if(core_if->wq_otg == 0) {
223 DWC_DEBUGPL(DBG_CIL, "Creation of wq_otg failed\n");
224 kfree(host_if);
225 kfree(dev_if);
226 kfree(core_if);
227 return 0 * HZ;
228 }
229 INIT_WORK(&core_if->w_conn_id, w_conn_id_status_change);
230 INIT_DELAYED_WORK(&core_if->w_wkp, w_wakeup_detected);
231
232 return core_if;
233 }
234
235 /**
236 * This function frees the structures allocated by dwc_otg_cil_init().
237 *
238 * @param[in] core_if The core interface pointer returned from
239 * dwc_otg_cil_init().
240 *
241 */
242 void dwc_otg_cil_remove(dwc_otg_core_if_t *core_if)
243 {
244 /* Disable all interrupts */
245 dwc_modify_reg32(&core_if->core_global_regs->gahbcfg, 1, 0);
246 dwc_write_reg32(&core_if->core_global_regs->gintmsk, 0);
247
248 if (core_if->wq_otg) {
249 destroy_workqueue(core_if->wq_otg);
250 }
251 if (core_if->dev_if) {
252 kfree(core_if->dev_if);
253 }
254 if (core_if->host_if) {
255 kfree(core_if->host_if);
256 }
257 kfree(core_if);
258 }
259
260 /**
261 * This function enables the controller's Global Interrupt in the AHB Config
262 * register.
263 *
264 * @param[in] core_if Programming view of DWC_otg controller.
265 */
266 void dwc_otg_enable_global_interrupts(dwc_otg_core_if_t *core_if)
267 {
268 gahbcfg_data_t ahbcfg = { .d32 = 0};
269 ahbcfg.b.glblintrmsk = 1; /* Enable interrupts */
270 dwc_modify_reg32(&core_if->core_global_regs->gahbcfg, 0, ahbcfg.d32);
271 }
272
273 /**
274 * This function disables the controller's Global Interrupt in the AHB Config
275 * register.
276 *
277 * @param[in] core_if Programming view of DWC_otg controller.
278 */
279 void dwc_otg_disable_global_interrupts(dwc_otg_core_if_t *core_if)
280 {
281 gahbcfg_data_t ahbcfg = { .d32 = 0};
282 ahbcfg.b.glblintrmsk = 1; /* Enable interrupts */
283 dwc_modify_reg32(&core_if->core_global_regs->gahbcfg, ahbcfg.d32, 0);
284 }
285
286 /**
287 * This function initializes the commmon interrupts, used in both
288 * device and host modes.
289 *
290 * @param[in] core_if Programming view of the DWC_otg controller
291 *
292 */
293 static void dwc_otg_enable_common_interrupts(dwc_otg_core_if_t *core_if)
294 {
295 dwc_otg_core_global_regs_t *global_regs =
296 core_if->core_global_regs;
297 gintmsk_data_t intr_mask = { .d32 = 0};
298
299 /* Clear any pending OTG Interrupts */
300 dwc_write_reg32(&global_regs->gotgint, 0xFFFFFFFF);
301
302 /* Clear any pending interrupts */
303 dwc_write_reg32(&global_regs->gintsts, 0xFFFFFFFF);
304
305 /*
306 * Enable the interrupts in the GINTMSK.
307 */
308 intr_mask.b.modemismatch = 1;
309 intr_mask.b.otgintr = 1;
310
311 if (!core_if->dma_enable) {
312 intr_mask.b.rxstsqlvl = 1;
313 }
314
315 intr_mask.b.conidstschng = 1;
316 intr_mask.b.wkupintr = 1;
317 intr_mask.b.disconnect = 1;
318 intr_mask.b.usbsuspend = 1;
319 intr_mask.b.sessreqintr = 1;
320 dwc_write_reg32(&global_regs->gintmsk, intr_mask.d32);
321 }
322
323 /**
324 * Initializes the FSLSPClkSel field of the HCFG register depending on the PHY
325 * type.
326 */
327 static void init_fslspclksel(dwc_otg_core_if_t *core_if)
328 {
329 uint32_t val;
330 hcfg_data_t hcfg;
331
332 if (((core_if->hwcfg2.b.hs_phy_type == 2) &&
333 (core_if->hwcfg2.b.fs_phy_type == 1) &&
334 (core_if->core_params->ulpi_fs_ls)) ||
335 (core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS)) {
336 /* Full speed PHY */
337 val = DWC_HCFG_48_MHZ;
338 }
339 else {
340 /* High speed PHY running at full speed or high speed */
341 val = DWC_HCFG_30_60_MHZ;
342 }
343
344 DWC_DEBUGPL(DBG_CIL, "Initializing HCFG.FSLSPClkSel to 0x%1x\n", val);
345 hcfg.d32 = dwc_read_reg32(&core_if->host_if->host_global_regs->hcfg);
346 hcfg.b.fslspclksel = val;
347 dwc_write_reg32(&core_if->host_if->host_global_regs->hcfg, hcfg.d32);
348 }
349
350 /**
351 * Initializes the DevSpd field of the DCFG register depending on the PHY type
352 * and the enumeration speed of the device.
353 */
354 static void init_devspd(dwc_otg_core_if_t *core_if)
355 {
356 uint32_t val;
357 dcfg_data_t dcfg;
358
359 if (((core_if->hwcfg2.b.hs_phy_type == 2) &&
360 (core_if->hwcfg2.b.fs_phy_type == 1) &&
361 (core_if->core_params->ulpi_fs_ls)) ||
362 (core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS)) {
363 /* Full speed PHY */
364 val = 0x3;
365 }
366 else if (core_if->core_params->speed == DWC_SPEED_PARAM_FULL) {
367 /* High speed PHY running at full speed */
368 val = 0x1;
369 }
370 else {
371 /* High speed PHY running at high speed */
372 val = 0x0;
373 }
374
375 DWC_DEBUGPL(DBG_CIL, "Initializing DCFG.DevSpd to 0x%1x\n", val);
376
377 dcfg.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dcfg);
378 dcfg.b.devspd = val;
379 dwc_write_reg32(&core_if->dev_if->dev_global_regs->dcfg, dcfg.d32);
380 }
381
382 /**
383 * This function calculates the number of IN EPS
384 * using GHWCFG1 and GHWCFG2 registers values
385 *
386 * @param core_if Programming view of the DWC_otg controller
387 */
388 static uint32_t calc_num_in_eps(dwc_otg_core_if_t *core_if)
389 {
390 uint32_t num_in_eps = 0;
391 uint32_t num_eps = core_if->hwcfg2.b.num_dev_ep;
392 uint32_t hwcfg1 = core_if->hwcfg1.d32 >> 3;
393 uint32_t num_tx_fifos = core_if->hwcfg4.b.num_in_eps;
394 int i;
395
396
397 for(i = 0; i < num_eps; ++i)
398 {
399 if(!(hwcfg1 & 0x1))
400 num_in_eps++;
401
402 hwcfg1 >>= 2;
403 }
404
405 if(core_if->hwcfg4.b.ded_fifo_en) {
406 num_in_eps = (num_in_eps > num_tx_fifos) ? num_tx_fifos : num_in_eps;
407 }
408
409 return num_in_eps;
410 }
411
412
413 /**
414 * This function calculates the number of OUT EPS
415 * using GHWCFG1 and GHWCFG2 registers values
416 *
417 * @param core_if Programming view of the DWC_otg controller
418 */
419 static uint32_t calc_num_out_eps(dwc_otg_core_if_t *core_if)
420 {
421 uint32_t num_out_eps = 0;
422 uint32_t num_eps = core_if->hwcfg2.b.num_dev_ep;
423 uint32_t hwcfg1 = core_if->hwcfg1.d32 >> 2;
424 int i;
425
426 for(i = 0; i < num_eps; ++i)
427 {
428 if(!(hwcfg1 & 0x2))
429 num_out_eps++;
430
431 hwcfg1 >>= 2;
432 }
433 return num_out_eps;
434 }
435 /**
436 * This function initializes the DWC_otg controller registers and
437 * prepares the core for device mode or host mode operation.
438 *
439 * @param core_if Programming view of the DWC_otg controller
440 *
441 */
442 void dwc_otg_core_init(dwc_otg_core_if_t *core_if)
443 {
444 int i = 0;
445 dwc_otg_core_global_regs_t *global_regs =
446 core_if->core_global_regs;
447 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
448 gahbcfg_data_t ahbcfg = { .d32 = 0 };
449 gusbcfg_data_t usbcfg = { .d32 = 0 };
450 gi2cctl_data_t i2cctl = { .d32 = 0 };
451
452 DWC_DEBUGPL(DBG_CILV, "dwc_otg_core_init(%p)\n", core_if);
453
454 /* Common Initialization */
455
456 usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
457
458 // usbcfg.b.tx_end_delay = 1;
459 /* Program the ULPI External VBUS bit if needed */
460 usbcfg.b.ulpi_ext_vbus_drv =
461 (core_if->core_params->phy_ulpi_ext_vbus == DWC_PHY_ULPI_EXTERNAL_VBUS) ? 1 : 0;
462
463 /* Set external TS Dline pulsing */
464 usbcfg.b.term_sel_dl_pulse = (core_if->core_params->ts_dline == 1) ? 1 : 0;
465 dwc_write_reg32 (&global_regs->gusbcfg, usbcfg.d32);
466
467
468 /* Reset the Controller */
469 dwc_otg_core_reset(core_if);
470
471 /* Initialize parameters from Hardware configuration registers. */
472 dev_if->num_in_eps = calc_num_in_eps(core_if);
473 dev_if->num_out_eps = calc_num_out_eps(core_if);
474
475
476 DWC_DEBUGPL(DBG_CIL, "num_dev_perio_in_ep=%d\n", core_if->hwcfg4.b.num_dev_perio_in_ep);
477
478 for (i=0; i < core_if->hwcfg4.b.num_dev_perio_in_ep; i++)
479 {
480 dev_if->perio_tx_fifo_size[i] =
481 dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i]) >> 16;
482 DWC_DEBUGPL(DBG_CIL, "Periodic Tx FIFO SZ #%d=0x%0x\n",
483 i, dev_if->perio_tx_fifo_size[i]);
484 }
485
486 for (i=0; i < core_if->hwcfg4.b.num_in_eps; i++)
487 {
488 dev_if->tx_fifo_size[i] =
489 dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i]) >> 16;
490 DWC_DEBUGPL(DBG_CIL, "Tx FIFO SZ #%d=0x%0x\n",
491 i, dev_if->perio_tx_fifo_size[i]);
492 }
493
494 core_if->total_fifo_size = core_if->hwcfg3.b.dfifo_depth;
495 core_if->rx_fifo_size =
496 dwc_read_reg32(&global_regs->grxfsiz);
497 core_if->nperio_tx_fifo_size =
498 dwc_read_reg32(&global_regs->gnptxfsiz) >> 16;
499
500 DWC_DEBUGPL(DBG_CIL, "Total FIFO SZ=%d\n", core_if->total_fifo_size);
501 DWC_DEBUGPL(DBG_CIL, "Rx FIFO SZ=%d\n", core_if->rx_fifo_size);
502 DWC_DEBUGPL(DBG_CIL, "NP Tx FIFO SZ=%d\n", core_if->nperio_tx_fifo_size);
503
504 /* This programming sequence needs to happen in FS mode before any other
505 * programming occurs */
506 if ((core_if->core_params->speed == DWC_SPEED_PARAM_FULL) &&
507 (core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS)) {
508 /* If FS mode with FS PHY */
509
510 /* core_init() is now called on every switch so only call the
511 * following for the first time through. */
512 if (!core_if->phy_init_done) {
513 core_if->phy_init_done = 1;
514 DWC_DEBUGPL(DBG_CIL, "FS_PHY detected\n");
515 usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
516 usbcfg.b.physel = 1;
517 dwc_write_reg32 (&global_regs->gusbcfg, usbcfg.d32);
518
519 /* Reset after a PHY select */
520 dwc_otg_core_reset(core_if);
521 }
522
523 /* Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also
524 * do this on HNP Dev/Host mode switches (done in dev_init and
525 * host_init). */
526 if (dwc_otg_is_host_mode(core_if)) {
527 init_fslspclksel(core_if);
528 }
529 else {
530 init_devspd(core_if);
531 }
532
533 if (core_if->core_params->i2c_enable) {
534 DWC_DEBUGPL(DBG_CIL, "FS_PHY Enabling I2c\n");
535 /* Program GUSBCFG.OtgUtmifsSel to I2C */
536 usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
537 usbcfg.b.otgutmifssel = 1;
538 dwc_write_reg32 (&global_regs->gusbcfg, usbcfg.d32);
539
540 /* Program GI2CCTL.I2CEn */
541 i2cctl.d32 = dwc_read_reg32(&global_regs->gi2cctl);
542 i2cctl.b.i2cdevaddr = 1;
543 i2cctl.b.i2cen = 0;
544 dwc_write_reg32 (&global_regs->gi2cctl, i2cctl.d32);
545 i2cctl.b.i2cen = 1;
546 dwc_write_reg32 (&global_regs->gi2cctl, i2cctl.d32);
547 }
548
549 } /* endif speed == DWC_SPEED_PARAM_FULL */
550
551 else {
552 /* High speed PHY. */
553 if (!core_if->phy_init_done) {
554 core_if->phy_init_done = 1;
555 /* HS PHY parameters. These parameters are preserved
556 * during soft reset so only program the first time. Do
557 * a soft reset immediately after setting phyif. */
558 usbcfg.b.ulpi_utmi_sel = core_if->core_params->phy_type;
559 if (usbcfg.b.ulpi_utmi_sel == 1) {
560 /* ULPI interface */
561 usbcfg.b.phyif = 0;
562 usbcfg.b.ddrsel = core_if->core_params->phy_ulpi_ddr;
563 }
564 else {
565 /* UTMI+ interface */
566 if (core_if->core_params->phy_utmi_width == 16) {
567 usbcfg.b.phyif = 1;
568 }
569 else {
570 usbcfg.b.phyif = 0;
571 }
572 }
573
574 dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32);
575
576 /* Reset after setting the PHY parameters */
577 dwc_otg_core_reset(core_if);
578 }
579 }
580
581 if ((core_if->hwcfg2.b.hs_phy_type == 2) &&
582 (core_if->hwcfg2.b.fs_phy_type == 1) &&
583 (core_if->core_params->ulpi_fs_ls)) {
584 DWC_DEBUGPL(DBG_CIL, "Setting ULPI FSLS\n");
585 usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
586 usbcfg.b.ulpi_fsls = 1;
587 usbcfg.b.ulpi_clk_sus_m = 1;
588 dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32);
589 }
590 else {
591 usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
592 usbcfg.b.ulpi_fsls = 0;
593 usbcfg.b.ulpi_clk_sus_m = 0;
594 dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32);
595 }
596
597 /* Program the GAHBCFG Register.*/
598 switch (core_if->hwcfg2.b.architecture) {
599
600 case DWC_SLAVE_ONLY_ARCH:
601 DWC_DEBUGPL(DBG_CIL, "Slave Only Mode\n");
602 ahbcfg.b.nptxfemplvl_txfemplvl = DWC_GAHBCFG_TXFEMPTYLVL_HALFEMPTY;
603 ahbcfg.b.ptxfemplvl = DWC_GAHBCFG_TXFEMPTYLVL_HALFEMPTY;
604 core_if->dma_enable = 0;
605 core_if->dma_desc_enable = 0;
606 break;
607
608 case DWC_EXT_DMA_ARCH:
609 DWC_DEBUGPL(DBG_CIL, "External DMA Mode\n");
610 ahbcfg.b.hburstlen = core_if->core_params->dma_burst_size;
611 core_if->dma_enable = (core_if->core_params->dma_enable != 0);
612 core_if->dma_desc_enable = (core_if->core_params->dma_desc_enable != 0);
613 break;
614
615 case DWC_INT_DMA_ARCH:
616 DWC_DEBUGPL(DBG_CIL, "Internal DMA Mode\n");
617 ahbcfg.b.hburstlen = DWC_GAHBCFG_INT_DMA_BURST_INCR;
618 core_if->dma_enable = (core_if->core_params->dma_enable != 0);
619 core_if->dma_desc_enable = (core_if->core_params->dma_desc_enable != 0);
620 break;
621
622 }
623 ahbcfg.b.dmaenable = core_if->dma_enable;
624 dwc_write_reg32(&global_regs->gahbcfg, ahbcfg.d32);
625
626 core_if->en_multiple_tx_fifo = core_if->hwcfg4.b.ded_fifo_en;
627
628 core_if->pti_enh_enable = core_if->core_params->pti_enable != 0;
629 core_if->multiproc_int_enable = core_if->core_params->mpi_enable;
630 DWC_PRINT("Periodic Transfer Interrupt Enhancement - %s\n", ((core_if->pti_enh_enable) ? "enabled": "disabled"));
631 DWC_PRINT("Multiprocessor Interrupt Enhancement - %s\n", ((core_if->multiproc_int_enable) ? "enabled": "disabled"));
632
633 /*
634 * Program the GUSBCFG register.
635 */
636 usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
637
638 switch (core_if->hwcfg2.b.op_mode) {
639 case DWC_MODE_HNP_SRP_CAPABLE:
640 usbcfg.b.hnpcap = (core_if->core_params->otg_cap ==
641 DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE);
642 usbcfg.b.srpcap = (core_if->core_params->otg_cap !=
643 DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE);
644 break;
645
646 case DWC_MODE_SRP_ONLY_CAPABLE:
647 usbcfg.b.hnpcap = 0;
648 usbcfg.b.srpcap = (core_if->core_params->otg_cap !=
649 DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE);
650 break;
651
652 case DWC_MODE_NO_HNP_SRP_CAPABLE:
653 usbcfg.b.hnpcap = 0;
654 usbcfg.b.srpcap = 0;
655 break;
656
657 case DWC_MODE_SRP_CAPABLE_DEVICE:
658 usbcfg.b.hnpcap = 0;
659 usbcfg.b.srpcap = (core_if->core_params->otg_cap !=
660 DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE);
661 break;
662
663 case DWC_MODE_NO_SRP_CAPABLE_DEVICE:
664 usbcfg.b.hnpcap = 0;
665 usbcfg.b.srpcap = 0;
666 break;
667
668 case DWC_MODE_SRP_CAPABLE_HOST:
669 usbcfg.b.hnpcap = 0;
670 usbcfg.b.srpcap = (core_if->core_params->otg_cap !=
671 DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE);
672 break;
673
674 case DWC_MODE_NO_SRP_CAPABLE_HOST:
675 usbcfg.b.hnpcap = 0;
676 usbcfg.b.srpcap = 0;
677 break;
678 }
679
680 dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32);
681
682 /* Enable common interrupts */
683 dwc_otg_enable_common_interrupts(core_if);
684
685 /* Do device or host intialization based on mode during PCD
686 * and HCD initialization */
687 if (dwc_otg_is_host_mode(core_if)) {
688 DWC_DEBUGPL(DBG_ANY, "Host Mode\n");
689 core_if->op_state = A_HOST;
690 }
691 else {
692 DWC_DEBUGPL(DBG_ANY, "Device Mode\n");
693 core_if->op_state = B_PERIPHERAL;
694 #ifdef DWC_DEVICE_ONLY
695 dwc_otg_core_dev_init(core_if);
696 #endif
697 }
698 }
699
700
701 /**
702 * This function enables the Device mode interrupts.
703 *
704 * @param core_if Programming view of DWC_otg controller
705 */
706 void dwc_otg_enable_device_interrupts(dwc_otg_core_if_t *core_if)
707 {
708 gintmsk_data_t intr_mask = { .d32 = 0};
709 dwc_otg_core_global_regs_t *global_regs =
710 core_if->core_global_regs;
711
712 DWC_DEBUGPL(DBG_CIL, "%s()\n", __func__);
713
714 /* Disable all interrupts. */
715 dwc_write_reg32(&global_regs->gintmsk, 0);
716
717 /* Clear any pending interrupts */
718 dwc_write_reg32(&global_regs->gintsts, 0xFFFFFFFF);
719
720 /* Enable the common interrupts */
721 dwc_otg_enable_common_interrupts(core_if);
722
723 /* Enable interrupts */
724 intr_mask.b.usbreset = 1;
725 intr_mask.b.enumdone = 1;
726
727 if(!core_if->multiproc_int_enable) {
728 intr_mask.b.inepintr = 1;
729 intr_mask.b.outepintr = 1;
730 }
731
732 intr_mask.b.erlysuspend = 1;
733
734 if(core_if->en_multiple_tx_fifo == 0) {
735 intr_mask.b.epmismatch = 1;
736 }
737
738
739 #ifdef DWC_EN_ISOC
740 if(core_if->dma_enable) {
741 if(core_if->dma_desc_enable == 0) {
742 if(core_if->pti_enh_enable) {
743 dctl_data_t dctl = { .d32 = 0 };
744 dctl.b.ifrmnum = 1;
745 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dctl, 0, dctl.d32);
746 } else {
747 intr_mask.b.incomplisoin = 1;
748 intr_mask.b.incomplisoout = 1;
749 }
750 }
751 } else {
752 intr_mask.b.incomplisoin = 1;
753 intr_mask.b.incomplisoout = 1;
754 }
755 #endif // DWC_EN_ISOC
756
757 /** @todo NGS: Should this be a module parameter? */
758 #ifdef USE_PERIODIC_EP
759 intr_mask.b.isooutdrop = 1;
760 intr_mask.b.eopframe = 1;
761 intr_mask.b.incomplisoin = 1;
762 intr_mask.b.incomplisoout = 1;
763 #endif
764
765 dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, intr_mask.d32);
766
767 DWC_DEBUGPL(DBG_CIL, "%s() gintmsk=%0x\n", __func__,
768 dwc_read_reg32(&global_regs->gintmsk));
769 }
770
771 /**
772 * This function initializes the DWC_otg controller registers for
773 * device mode.
774 *
775 * @param core_if Programming view of DWC_otg controller
776 *
777 */
778 void dwc_otg_core_dev_init(dwc_otg_core_if_t *core_if)
779 {
780 int i,size;
781 u_int32_t *default_value_array;
782
783 dwc_otg_core_global_regs_t *global_regs =
784 core_if->core_global_regs;
785 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
786 dwc_otg_core_params_t *params = core_if->core_params;
787 dcfg_data_t dcfg = { .d32 = 0};
788 grstctl_t resetctl = { .d32 = 0 };
789 uint32_t rx_fifo_size;
790 fifosize_data_t nptxfifosize;
791 fifosize_data_t txfifosize;
792 dthrctl_data_t dthrctl;
793
794 /* Restart the Phy Clock */
795 dwc_write_reg32(core_if->pcgcctl, 0);
796
797 /* Device configuration register */
798 init_devspd(core_if);
799 dcfg.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dcfg);
800 dcfg.b.descdma = (core_if->dma_desc_enable) ? 1 : 0;
801 dcfg.b.perfrint = DWC_DCFG_FRAME_INTERVAL_80;
802
803 dwc_write_reg32(&dev_if->dev_global_regs->dcfg, dcfg.d32);
804
805 /* Configure data FIFO sizes */
806 if (core_if->hwcfg2.b.dynamic_fifo && params->enable_dynamic_fifo) {
807 DWC_DEBUGPL(DBG_CIL, "Total FIFO Size=%d\n", core_if->total_fifo_size);
808 DWC_DEBUGPL(DBG_CIL, "Rx FIFO Size=%d\n", params->dev_rx_fifo_size);
809 DWC_DEBUGPL(DBG_CIL, "NP Tx FIFO Size=%d\n", params->dev_nperio_tx_fifo_size);
810
811 /* Rx FIFO */
812 DWC_DEBUGPL(DBG_CIL, "initial grxfsiz=%08x\n",
813 dwc_read_reg32(&global_regs->grxfsiz));
814
815 rx_fifo_size = params->dev_rx_fifo_size;
816 dwc_write_reg32(&global_regs->grxfsiz, rx_fifo_size);
817
818 DWC_DEBUGPL(DBG_CIL, "new grxfsiz=%08x\n",
819 dwc_read_reg32(&global_regs->grxfsiz));
820
821 /** Set Periodic Tx FIFO Mask all bits 0 */
822 core_if->p_tx_msk = 0;
823
824 /** Set Tx FIFO Mask all bits 0 */
825 core_if->tx_msk = 0;
826
827 /* Non-periodic Tx FIFO */
828 DWC_DEBUGPL(DBG_CIL, "initial gnptxfsiz=%08x\n",
829 dwc_read_reg32(&global_regs->gnptxfsiz));
830
831 nptxfifosize.b.depth = params->dev_nperio_tx_fifo_size;
832 nptxfifosize.b.startaddr = params->dev_rx_fifo_size;
833
834 dwc_write_reg32(&global_regs->gnptxfsiz, nptxfifosize.d32);
835
836 DWC_DEBUGPL(DBG_CIL, "new gnptxfsiz=%08x\n",
837 dwc_read_reg32(&global_regs->gnptxfsiz));
838
839 txfifosize.b.startaddr = nptxfifosize.b.startaddr + nptxfifosize.b.depth;
840 if(core_if->en_multiple_tx_fifo == 0) {
841 //core_if->hwcfg4.b.ded_fifo_en==0
842
843 /**@todo NGS: Fix Periodic FIFO Sizing! */
844 /*
845 * Periodic Tx FIFOs These FIFOs are numbered from 1 to 15.
846 * Indexes of the FIFO size module parameters in the
847 * dev_perio_tx_fifo_size array and the FIFO size registers in
848 * the dptxfsiz array run from 0 to 14.
849 */
850 /** @todo Finish debug of this */
851 size=core_if->hwcfg4.b.num_dev_perio_in_ep;
852 default_value_array=params->dev_perio_tx_fifo_size;
853
854 }
855 else {
856 //core_if->hwcfg4.b.ded_fifo_en==1
857 /*
858 * Tx FIFOs These FIFOs are numbered from 1 to 15.
859 * Indexes of the FIFO size module parameters in the
860 * dev_tx_fifo_size array and the FIFO size registers in
861 * the dptxfsiz_dieptxf array run from 0 to 14.
862 */
863
864 size=core_if->hwcfg4.b.num_in_eps;
865 default_value_array=params->dev_tx_fifo_size;
866
867 }
868 for (i=0; i < size; i++)
869 {
870
871 txfifosize.b.depth = default_value_array[i];
872 DWC_DEBUGPL(DBG_CIL, "initial dptxfsiz_dieptxf[%d]=%08x\n", i,
873 dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i]));
874 dwc_write_reg32(&global_regs->dptxfsiz_dieptxf[i],
875 txfifosize.d32);
876 DWC_DEBUGPL(DBG_CIL, "new dptxfsiz_dieptxf[%d]=%08x\n", i,
877 dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i]));
878 txfifosize.b.startaddr += txfifosize.b.depth;
879 }
880 }
881 /* Flush the FIFOs */
882 dwc_otg_flush_tx_fifo(core_if, 0x10); /* all Tx FIFOs */
883 dwc_otg_flush_rx_fifo(core_if);
884
885 /* Flush the Learning Queue. */
886 resetctl.b.intknqflsh = 1;
887 dwc_write_reg32(&core_if->core_global_regs->grstctl, resetctl.d32);
888
889 /* Clear all pending Device Interrupts */
890
891 if(core_if->multiproc_int_enable) {
892 }
893
894 /** @todo - if the condition needed to be checked
895 * or in any case all pending interrutps should be cleared?
896 */
897 if(core_if->multiproc_int_enable) {
898 for(i = 0; i < core_if->dev_if->num_in_eps; ++i) {
899 dwc_write_reg32(&dev_if->dev_global_regs->diepeachintmsk[i], 0);
900 }
901
902 for(i = 0; i < core_if->dev_if->num_out_eps; ++i) {
903 dwc_write_reg32(&dev_if->dev_global_regs->doepeachintmsk[i], 0);
904 }
905
906 dwc_write_reg32(&dev_if->dev_global_regs->deachint, 0xFFFFFFFF);
907 dwc_write_reg32(&dev_if->dev_global_regs->deachintmsk, 0);
908 } else {
909 dwc_write_reg32(&dev_if->dev_global_regs->diepmsk, 0);
910 dwc_write_reg32(&dev_if->dev_global_regs->doepmsk, 0);
911 dwc_write_reg32(&dev_if->dev_global_regs->daint, 0xFFFFFFFF);
912 dwc_write_reg32(&dev_if->dev_global_regs->daintmsk, 0);
913 }
914
915 for (i=0; i <= dev_if->num_in_eps; i++)
916 {
917 depctl_data_t depctl;
918 depctl.d32 = dwc_read_reg32(&dev_if->in_ep_regs[i]->diepctl);
919 if (depctl.b.epena) {
920 depctl.d32 = 0;
921 depctl.b.epdis = 1;
922 depctl.b.snak = 1;
923 }
924 else {
925 depctl.d32 = 0;
926 }
927
928 dwc_write_reg32(&dev_if->in_ep_regs[i]->diepctl, depctl.d32);
929
930
931 dwc_write_reg32(&dev_if->in_ep_regs[i]->dieptsiz, 0);
932 dwc_write_reg32(&dev_if->in_ep_regs[i]->diepdma, 0);
933 dwc_write_reg32(&dev_if->in_ep_regs[i]->diepint, 0xFF);
934 }
935
936 for (i=0; i <= dev_if->num_out_eps; i++)
937 {
938 depctl_data_t depctl;
939 depctl.d32 = dwc_read_reg32(&dev_if->out_ep_regs[i]->doepctl);
940 if (depctl.b.epena) {
941 depctl.d32 = 0;
942 depctl.b.epdis = 1;
943 depctl.b.snak = 1;
944 }
945 else {
946 depctl.d32 = 0;
947 }
948
949 dwc_write_reg32(&dev_if->out_ep_regs[i]->doepctl, depctl.d32);
950
951 dwc_write_reg32(&dev_if->out_ep_regs[i]->doeptsiz, 0);
952 dwc_write_reg32(&dev_if->out_ep_regs[i]->doepdma, 0);
953 dwc_write_reg32(&dev_if->out_ep_regs[i]->doepint, 0xFF);
954 }
955
956 if(core_if->en_multiple_tx_fifo && core_if->dma_enable) {
957 dev_if->non_iso_tx_thr_en = params->thr_ctl & 0x1;
958 dev_if->iso_tx_thr_en = (params->thr_ctl >> 1) & 0x1;
959 dev_if->rx_thr_en = (params->thr_ctl >> 2) & 0x1;
960
961 dev_if->rx_thr_length = params->rx_thr_length;
962 dev_if->tx_thr_length = params->tx_thr_length;
963
964 dev_if->setup_desc_index = 0;
965
966 dthrctl.d32 = 0;
967 dthrctl.b.non_iso_thr_en = dev_if->non_iso_tx_thr_en;
968 dthrctl.b.iso_thr_en = dev_if->iso_tx_thr_en;
969 dthrctl.b.tx_thr_len = dev_if->tx_thr_length;
970 dthrctl.b.rx_thr_en = dev_if->rx_thr_en;
971 dthrctl.b.rx_thr_len = dev_if->rx_thr_length;
972
973 dwc_write_reg32(&dev_if->dev_global_regs->dtknqr3_dthrctl, dthrctl.d32);
974
975 DWC_DEBUGPL(DBG_CIL, "Non ISO Tx Thr - %d\nISO Tx Thr - %d\nRx Thr - %d\nTx Thr Len - %d\nRx Thr Len - %d\n",
976 dthrctl.b.non_iso_thr_en, dthrctl.b.iso_thr_en, dthrctl.b.rx_thr_en, dthrctl.b.tx_thr_len, dthrctl.b.rx_thr_len);
977
978 }
979
980 dwc_otg_enable_device_interrupts(core_if);
981
982 {
983 diepmsk_data_t msk = { .d32 = 0 };
984 msk.b.txfifoundrn = 1;
985 if(core_if->multiproc_int_enable) {
986 dwc_modify_reg32(&dev_if->dev_global_regs->diepeachintmsk[0], msk.d32, msk.d32);
987 } else {
988 dwc_modify_reg32(&dev_if->dev_global_regs->diepmsk, msk.d32, msk.d32);
989 }
990 }
991
992
993 if(core_if->multiproc_int_enable) {
994 /* Set NAK on Babble */
995 dctl_data_t dctl = { .d32 = 0};
996 dctl.b.nakonbble = 1;
997 dwc_modify_reg32(&dev_if->dev_global_regs->dctl, 0, dctl.d32);
998 }
999 }
1000
1001 /**
1002 * This function enables the Host mode interrupts.
1003 *
1004 * @param core_if Programming view of DWC_otg controller
1005 */
1006 void dwc_otg_enable_host_interrupts(dwc_otg_core_if_t *core_if)
1007 {
1008 dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
1009 gintmsk_data_t intr_mask = { .d32 = 0 };
1010
1011 DWC_DEBUGPL(DBG_CIL, "%s()\n", __func__);
1012
1013 /* Disable all interrupts. */
1014 dwc_write_reg32(&global_regs->gintmsk, 0);
1015
1016 /* Clear any pending interrupts. */
1017 dwc_write_reg32(&global_regs->gintsts, 0xFFFFFFFF);
1018
1019 /* Enable the common interrupts */
1020 dwc_otg_enable_common_interrupts(core_if);
1021
1022 /*
1023 * Enable host mode interrupts without disturbing common
1024 * interrupts.
1025 */
1026 intr_mask.b.sofintr = 1;
1027 intr_mask.b.portintr = 1;
1028 intr_mask.b.hcintr = 1;
1029
1030 dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, intr_mask.d32);
1031 }
1032
1033 /**
1034 * This function disables the Host Mode interrupts.
1035 *
1036 * @param core_if Programming view of DWC_otg controller
1037 */
1038 void dwc_otg_disable_host_interrupts(dwc_otg_core_if_t *core_if)
1039 {
1040 dwc_otg_core_global_regs_t *global_regs =
1041 core_if->core_global_regs;
1042 gintmsk_data_t intr_mask = { .d32 = 0 };
1043
1044 DWC_DEBUGPL(DBG_CILV, "%s()\n", __func__);
1045
1046 /*
1047 * Disable host mode interrupts without disturbing common
1048 * interrupts.
1049 */
1050 intr_mask.b.sofintr = 1;
1051 intr_mask.b.portintr = 1;
1052 intr_mask.b.hcintr = 1;
1053 intr_mask.b.ptxfempty = 1;
1054 intr_mask.b.nptxfempty = 1;
1055
1056 dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, 0);
1057 }
1058
1059 /**
1060 * This function initializes the DWC_otg controller registers for
1061 * host mode.
1062 *
1063 * This function flushes the Tx and Rx FIFOs and it flushes any entries in the
1064 * request queues. Host channels are reset to ensure that they are ready for
1065 * performing transfers.
1066 *
1067 * @param core_if Programming view of DWC_otg controller
1068 *
1069 */
1070 void dwc_otg_core_host_init(dwc_otg_core_if_t *core_if)
1071 {
1072 dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
1073 dwc_otg_host_if_t *host_if = core_if->host_if;
1074 dwc_otg_core_params_t *params = core_if->core_params;
1075 hprt0_data_t hprt0 = { .d32 = 0 };
1076 fifosize_data_t nptxfifosize;
1077 fifosize_data_t ptxfifosize;
1078 int i;
1079 hcchar_data_t hcchar;
1080 hcfg_data_t hcfg;
1081 dwc_otg_hc_regs_t *hc_regs;
1082 int num_channels;
1083 gotgctl_data_t gotgctl = { .d32 = 0 };
1084
1085 DWC_DEBUGPL(DBG_CILV,"%s(%p)\n", __func__, core_if);
1086
1087 /* Restart the Phy Clock */
1088 dwc_write_reg32(core_if->pcgcctl, 0);
1089
1090 /* Initialize Host Configuration Register */
1091 init_fslspclksel(core_if);
1092 if (core_if->core_params->speed == DWC_SPEED_PARAM_FULL)
1093 {
1094 hcfg.d32 = dwc_read_reg32(&host_if->host_global_regs->hcfg);
1095 hcfg.b.fslssupp = 1;
1096 dwc_write_reg32(&host_if->host_global_regs->hcfg, hcfg.d32);
1097 }
1098
1099 /* Configure data FIFO sizes */
1100 if (core_if->hwcfg2.b.dynamic_fifo && params->enable_dynamic_fifo) {
1101 DWC_DEBUGPL(DBG_CIL,"Total FIFO Size=%d\n", core_if->total_fifo_size);
1102 DWC_DEBUGPL(DBG_CIL,"Rx FIFO Size=%d\n", params->host_rx_fifo_size);
1103 DWC_DEBUGPL(DBG_CIL,"NP Tx FIFO Size=%d\n", params->host_nperio_tx_fifo_size);
1104 DWC_DEBUGPL(DBG_CIL,"P Tx FIFO Size=%d\n", params->host_perio_tx_fifo_size);
1105
1106 /* Rx FIFO */
1107 DWC_DEBUGPL(DBG_CIL,"initial grxfsiz=%08x\n", dwc_read_reg32(&global_regs->grxfsiz));
1108 dwc_write_reg32(&global_regs->grxfsiz, params->host_rx_fifo_size);
1109 DWC_DEBUGPL(DBG_CIL,"new grxfsiz=%08x\n", dwc_read_reg32(&global_regs->grxfsiz));
1110
1111 /* Non-periodic Tx FIFO */
1112 DWC_DEBUGPL(DBG_CIL,"initial gnptxfsiz=%08x\n", dwc_read_reg32(&global_regs->gnptxfsiz));
1113 nptxfifosize.b.depth = params->host_nperio_tx_fifo_size;
1114 nptxfifosize.b.startaddr = params->host_rx_fifo_size;
1115 dwc_write_reg32(&global_regs->gnptxfsiz, nptxfifosize.d32);
1116 DWC_DEBUGPL(DBG_CIL,"new gnptxfsiz=%08x\n", dwc_read_reg32(&global_regs->gnptxfsiz));
1117
1118 /* Periodic Tx FIFO */
1119 DWC_DEBUGPL(DBG_CIL,"initial hptxfsiz=%08x\n", dwc_read_reg32(&global_regs->hptxfsiz));
1120 ptxfifosize.b.depth = params->host_perio_tx_fifo_size;
1121 ptxfifosize.b.startaddr = nptxfifosize.b.startaddr + nptxfifosize.b.depth;
1122 dwc_write_reg32(&global_regs->hptxfsiz, ptxfifosize.d32);
1123 DWC_DEBUGPL(DBG_CIL,"new hptxfsiz=%08x\n", dwc_read_reg32(&global_regs->hptxfsiz));
1124 }
1125
1126 /* Clear Host Set HNP Enable in the OTG Control Register */
1127 gotgctl.b.hstsethnpen = 1;
1128 dwc_modify_reg32(&global_regs->gotgctl, gotgctl.d32, 0);
1129
1130 /* Make sure the FIFOs are flushed. */
1131 dwc_otg_flush_tx_fifo(core_if, 0x10 /* all Tx FIFOs */);
1132 dwc_otg_flush_rx_fifo(core_if);
1133
1134 /* Flush out any leftover queued requests. */
1135 num_channels = core_if->core_params->host_channels;
1136 for (i = 0; i < num_channels; i++)
1137 {
1138 hc_regs = core_if->host_if->hc_regs[i];
1139 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1140 hcchar.b.chen = 0;
1141 hcchar.b.chdis = 1;
1142 hcchar.b.epdir = 0;
1143 dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
1144 }
1145
1146 /* Halt all channels to put them into a known state. */
1147 for (i = 0; i < num_channels; i++)
1148 {
1149 int count = 0;
1150 hc_regs = core_if->host_if->hc_regs[i];
1151 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1152 hcchar.b.chen = 1;
1153 hcchar.b.chdis = 1;
1154 hcchar.b.epdir = 0;
1155 dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
1156 DWC_DEBUGPL(DBG_HCDV, "%s: Halt channel %d\n", __func__, i);
1157 do {
1158 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1159 if (++count > 1000)
1160 {
1161 DWC_ERROR("%s: Unable to clear halt on channel %d\n",
1162 __func__, i);
1163 break;
1164 }
1165 }
1166 while (hcchar.b.chen);
1167 }
1168
1169 /* Turn on the vbus power. */
1170 DWC_PRINT("Init: Port Power? op_state=%d\n", core_if->op_state);
1171 if (core_if->op_state == A_HOST) {
1172 hprt0.d32 = dwc_otg_read_hprt0(core_if);
1173 DWC_PRINT("Init: Power Port (%d)\n", hprt0.b.prtpwr);
1174 if (hprt0.b.prtpwr == 0) {
1175 hprt0.b.prtpwr = 1;
1176 dwc_write_reg32(host_if->hprt0, hprt0.d32);
1177 }
1178 }
1179
1180 dwc_otg_enable_host_interrupts(core_if);
1181 }
1182
1183 /**
1184 * Prepares a host channel for transferring packets to/from a specific
1185 * endpoint. The HCCHARn register is set up with the characteristics specified
1186 * in _hc. Host channel interrupts that may need to be serviced while this
1187 * transfer is in progress are enabled.
1188 *
1189 * @param core_if Programming view of DWC_otg controller
1190 * @param hc Information needed to initialize the host channel
1191 */
1192 void dwc_otg_hc_init(dwc_otg_core_if_t *core_if, dwc_hc_t *hc)
1193 {
1194 uint32_t intr_enable;
1195 hcintmsk_data_t hc_intr_mask;
1196 gintmsk_data_t gintmsk = { .d32 = 0 };
1197 hcchar_data_t hcchar;
1198 hcsplt_data_t hcsplt;
1199
1200 uint8_t hc_num = hc->hc_num;
1201 dwc_otg_host_if_t *host_if = core_if->host_if;
1202 dwc_otg_hc_regs_t *hc_regs = host_if->hc_regs[hc_num];
1203
1204 /* Clear old interrupt conditions for this host channel. */
1205 hc_intr_mask.d32 = 0xFFFFFFFF;
1206 hc_intr_mask.b.reserved = 0;
1207 dwc_write_reg32(&hc_regs->hcint, hc_intr_mask.d32);
1208
1209 /* Enable channel interrupts required for this transfer. */
1210 hc_intr_mask.d32 = 0;
1211 hc_intr_mask.b.chhltd = 1;
1212 if (core_if->dma_enable) {
1213 hc_intr_mask.b.ahberr = 1;
1214 if (hc->error_state && !hc->do_split &&
1215 hc->ep_type != DWC_OTG_EP_TYPE_ISOC) {
1216 hc_intr_mask.b.ack = 1;
1217 if (hc->ep_is_in) {
1218 hc_intr_mask.b.datatglerr = 1;
1219 if (hc->ep_type != DWC_OTG_EP_TYPE_INTR) {
1220 hc_intr_mask.b.nak = 1;
1221 }
1222 }
1223 }
1224 }
1225 else {
1226 switch (hc->ep_type) {
1227 case DWC_OTG_EP_TYPE_CONTROL:
1228 case DWC_OTG_EP_TYPE_BULK:
1229 hc_intr_mask.b.xfercompl = 1;
1230 hc_intr_mask.b.stall = 1;
1231 hc_intr_mask.b.xacterr = 1;
1232 hc_intr_mask.b.datatglerr = 1;
1233 if (hc->ep_is_in) {
1234 hc_intr_mask.b.bblerr = 1;
1235 }
1236 else {
1237 hc_intr_mask.b.nak = 1;
1238 hc_intr_mask.b.nyet = 1;
1239 if (hc->do_ping) {
1240 hc_intr_mask.b.ack = 1;
1241 }
1242 }
1243
1244 if (hc->do_split) {
1245 hc_intr_mask.b.nak = 1;
1246 if (hc->complete_split) {
1247 hc_intr_mask.b.nyet = 1;
1248 }
1249 else {
1250 hc_intr_mask.b.ack = 1;
1251 }
1252 }
1253
1254 if (hc->error_state) {
1255 hc_intr_mask.b.ack = 1;
1256 }
1257 break;
1258 case DWC_OTG_EP_TYPE_INTR:
1259 hc_intr_mask.b.xfercompl = 1;
1260 hc_intr_mask.b.nak = 1;
1261 hc_intr_mask.b.stall = 1;
1262 hc_intr_mask.b.xacterr = 1;
1263 hc_intr_mask.b.datatglerr = 1;
1264 hc_intr_mask.b.frmovrun = 1;
1265
1266 if (hc->ep_is_in) {
1267 hc_intr_mask.b.bblerr = 1;
1268 }
1269 if (hc->error_state) {
1270 hc_intr_mask.b.ack = 1;
1271 }
1272 if (hc->do_split) {
1273 if (hc->complete_split) {
1274 hc_intr_mask.b.nyet = 1;
1275 }
1276 else {
1277 hc_intr_mask.b.ack = 1;
1278 }
1279 }
1280 break;
1281 case DWC_OTG_EP_TYPE_ISOC:
1282 hc_intr_mask.b.xfercompl = 1;
1283 hc_intr_mask.b.frmovrun = 1;
1284 hc_intr_mask.b.ack = 1;
1285
1286 if (hc->ep_is_in) {
1287 hc_intr_mask.b.xacterr = 1;
1288 hc_intr_mask.b.bblerr = 1;
1289 }
1290 break;
1291 }
1292 }
1293 dwc_write_reg32(&hc_regs->hcintmsk, hc_intr_mask.d32);
1294
1295 // if(hc->ep_type == DWC_OTG_EP_TYPE_BULK && !hc->ep_is_in)
1296 // hc->max_packet = 512;
1297 /* Enable the top level host channel interrupt. */
1298 intr_enable = (1 << hc_num);
1299 dwc_modify_reg32(&host_if->host_global_regs->haintmsk, 0, intr_enable);
1300
1301 /* Make sure host channel interrupts are enabled. */
1302 gintmsk.b.hcintr = 1;
1303 dwc_modify_reg32(&core_if->core_global_regs->gintmsk, 0, gintmsk.d32);
1304
1305 /*
1306 * Program the HCCHARn register with the endpoint characteristics for
1307 * the current transfer.
1308 */
1309 hcchar.d32 = 0;
1310 hcchar.b.devaddr = hc->dev_addr;
1311 hcchar.b.epnum = hc->ep_num;
1312 hcchar.b.epdir = hc->ep_is_in;
1313 hcchar.b.lspddev = (hc->speed == DWC_OTG_EP_SPEED_LOW);
1314 hcchar.b.eptype = hc->ep_type;
1315 hcchar.b.mps = hc->max_packet;
1316
1317 dwc_write_reg32(&host_if->hc_regs[hc_num]->hcchar, hcchar.d32);
1318
1319 DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num);
1320 DWC_DEBUGPL(DBG_HCDV, " Dev Addr: %d\n", hcchar.b.devaddr);
1321 DWC_DEBUGPL(DBG_HCDV, " Ep Num: %d\n", hcchar.b.epnum);
1322 DWC_DEBUGPL(DBG_HCDV, " Is In: %d\n", hcchar.b.epdir);
1323 DWC_DEBUGPL(DBG_HCDV, " Is Low Speed: %d\n", hcchar.b.lspddev);
1324 DWC_DEBUGPL(DBG_HCDV, " Ep Type: %d\n", hcchar.b.eptype);
1325 DWC_DEBUGPL(DBG_HCDV, " Max Pkt: %d\n", hcchar.b.mps);
1326 DWC_DEBUGPL(DBG_HCDV, " Multi Cnt: %d\n", hcchar.b.multicnt);
1327
1328 /*
1329 * Program the HCSPLIT register for SPLITs
1330 */
1331 hcsplt.d32 = 0;
1332 if (hc->do_split) {
1333 DWC_DEBUGPL(DBG_HCDV, "Programming HC %d with split --> %s\n", hc->hc_num,
1334 hc->complete_split ? "CSPLIT" : "SSPLIT");
1335 hcsplt.b.compsplt = hc->complete_split;
1336 hcsplt.b.xactpos = hc->xact_pos;
1337 hcsplt.b.hubaddr = hc->hub_addr;
1338 hcsplt.b.prtaddr = hc->port_addr;
1339 DWC_DEBUGPL(DBG_HCDV, " comp split %d\n", hc->complete_split);
1340 DWC_DEBUGPL(DBG_HCDV, " xact pos %d\n", hc->xact_pos);
1341 DWC_DEBUGPL(DBG_HCDV, " hub addr %d\n", hc->hub_addr);
1342 DWC_DEBUGPL(DBG_HCDV, " port addr %d\n", hc->port_addr);
1343 DWC_DEBUGPL(DBG_HCDV, " is_in %d\n", hc->ep_is_in);
1344 DWC_DEBUGPL(DBG_HCDV, " Max Pkt: %d\n", hcchar.b.mps);
1345 DWC_DEBUGPL(DBG_HCDV, " xferlen: %d\n", hc->xfer_len);
1346 }
1347 dwc_write_reg32(&host_if->hc_regs[hc_num]->hcsplt, hcsplt.d32);
1348
1349 }
1350
1351 /**
1352 * Attempts to halt a host channel. This function should only be called in
1353 * Slave mode or to abort a transfer in either Slave mode or DMA mode. Under
1354 * normal circumstances in DMA mode, the controller halts the channel when the
1355 * transfer is complete or a condition occurs that requires application
1356 * intervention.
1357 *
1358 * In slave mode, checks for a free request queue entry, then sets the Channel
1359 * Enable and Channel Disable bits of the Host Channel Characteristics
1360 * register of the specified channel to intiate the halt. If there is no free
1361 * request queue entry, sets only the Channel Disable bit of the HCCHARn
1362 * register to flush requests for this channel. In the latter case, sets a
1363 * flag to indicate that the host channel needs to be halted when a request
1364 * queue slot is open.
1365 *
1366 * In DMA mode, always sets the Channel Enable and Channel Disable bits of the
1367 * HCCHARn register. The controller ensures there is space in the request
1368 * queue before submitting the halt request.
1369 *
1370 * Some time may elapse before the core flushes any posted requests for this
1371 * host channel and halts. The Channel Halted interrupt handler completes the
1372 * deactivation of the host channel.
1373 *
1374 * @param core_if Controller register interface.
1375 * @param hc Host channel to halt.
1376 * @param halt_status Reason for halting the channel.
1377 */
1378 void dwc_otg_hc_halt(dwc_otg_core_if_t *core_if,
1379 dwc_hc_t *hc,
1380 dwc_otg_halt_status_e halt_status)
1381 {
1382 gnptxsts_data_t nptxsts;
1383 hptxsts_data_t hptxsts;
1384 hcchar_data_t hcchar;
1385 dwc_otg_hc_regs_t *hc_regs;
1386 dwc_otg_core_global_regs_t *global_regs;
1387 dwc_otg_host_global_regs_t *host_global_regs;
1388
1389 hc_regs = core_if->host_if->hc_regs[hc->hc_num];
1390 global_regs = core_if->core_global_regs;
1391 host_global_regs = core_if->host_if->host_global_regs;
1392
1393 WARN_ON(halt_status == DWC_OTG_HC_XFER_NO_HALT_STATUS);
1394
1395 if (halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE ||
1396 halt_status == DWC_OTG_HC_XFER_AHB_ERR) {
1397 /*
1398 * Disable all channel interrupts except Ch Halted. The QTD
1399 * and QH state associated with this transfer has been cleared
1400 * (in the case of URB_DEQUEUE), so the channel needs to be
1401 * shut down carefully to prevent crashes.
1402 */
1403 hcintmsk_data_t hcintmsk;
1404 hcintmsk.d32 = 0;
1405 hcintmsk.b.chhltd = 1;
1406 dwc_write_reg32(&hc_regs->hcintmsk, hcintmsk.d32);
1407
1408 /*
1409 * Make sure no other interrupts besides halt are currently
1410 * pending. Handling another interrupt could cause a crash due
1411 * to the QTD and QH state.
1412 */
1413 dwc_write_reg32(&hc_regs->hcint, ~hcintmsk.d32);
1414
1415 /*
1416 * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR
1417 * even if the channel was already halted for some other
1418 * reason.
1419 */
1420 hc->halt_status = halt_status;
1421
1422 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1423 if (hcchar.b.chen == 0) {
1424 /*
1425 * The channel is either already halted or it hasn't
1426 * started yet. In DMA mode, the transfer may halt if
1427 * it finishes normally or a condition occurs that
1428 * requires driver intervention. Don't want to halt
1429 * the channel again. In either Slave or DMA mode,
1430 * it's possible that the transfer has been assigned
1431 * to a channel, but not started yet when an URB is
1432 * dequeued. Don't want to halt a channel that hasn't
1433 * started yet.
1434 */
1435 return;
1436 }
1437 }
1438
1439 if (hc->halt_pending) {
1440 /*
1441 * A halt has already been issued for this channel. This might
1442 * happen when a transfer is aborted by a higher level in
1443 * the stack.
1444 */
1445 #ifdef DEBUG
1446 DWC_PRINT("*** %s: Channel %d, _hc->halt_pending already set ***\n",
1447 __func__, hc->hc_num);
1448
1449 /* dwc_otg_dump_global_registers(core_if); */
1450 /* dwc_otg_dump_host_registers(core_if); */
1451 #endif
1452 return;
1453 }
1454
1455 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1456 hcchar.b.chen = 1;
1457 hcchar.b.chdis = 1;
1458
1459 if (!core_if->dma_enable) {
1460 /* Check for space in the request queue to issue the halt. */
1461 if (hc->ep_type == DWC_OTG_EP_TYPE_CONTROL ||
1462 hc->ep_type == DWC_OTG_EP_TYPE_BULK) {
1463 nptxsts.d32 = dwc_read_reg32(&global_regs->gnptxsts);
1464 if (nptxsts.b.nptxqspcavail == 0) {
1465 hcchar.b.chen = 0;
1466 }
1467 }
1468 else {
1469 hptxsts.d32 = dwc_read_reg32(&host_global_regs->hptxsts);
1470 if ((hptxsts.b.ptxqspcavail == 0) || (core_if->queuing_high_bandwidth)) {
1471 hcchar.b.chen = 0;
1472 }
1473 }
1474 }
1475
1476 dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
1477
1478 hc->halt_status = halt_status;
1479
1480 if (hcchar.b.chen) {
1481 hc->halt_pending = 1;
1482 hc->halt_on_queue = 0;
1483 }
1484 else {
1485 hc->halt_on_queue = 1;
1486 }
1487
1488 DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num);
1489 DWC_DEBUGPL(DBG_HCDV, " hcchar: 0x%08x\n", hcchar.d32);
1490 DWC_DEBUGPL(DBG_HCDV, " halt_pending: %d\n", hc->halt_pending);
1491 DWC_DEBUGPL(DBG_HCDV, " halt_on_queue: %d\n", hc->halt_on_queue);
1492 DWC_DEBUGPL(DBG_HCDV, " halt_status: %d\n", hc->halt_status);
1493
1494 return;
1495 }
1496
1497 /**
1498 * Clears the transfer state for a host channel. This function is normally
1499 * called after a transfer is done and the host channel is being released.
1500 *
1501 * @param core_if Programming view of DWC_otg controller.
1502 * @param hc Identifies the host channel to clean up.
1503 */
1504 void dwc_otg_hc_cleanup(dwc_otg_core_if_t *core_if, dwc_hc_t *hc)
1505 {
1506 dwc_otg_hc_regs_t *hc_regs;
1507
1508 hc->xfer_started = 0;
1509
1510 /*
1511 * Clear channel interrupt enables and any unhandled channel interrupt
1512 * conditions.
1513 */
1514 hc_regs = core_if->host_if->hc_regs[hc->hc_num];
1515 dwc_write_reg32(&hc_regs->hcintmsk, 0);
1516 dwc_write_reg32(&hc_regs->hcint, 0xFFFFFFFF);
1517
1518 #ifdef DEBUG
1519 del_timer(&core_if->hc_xfer_timer[hc->hc_num]);
1520 {
1521 hcchar_data_t hcchar;
1522 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1523 if (hcchar.b.chdis) {
1524 DWC_WARN("%s: chdis set, channel %d, hcchar 0x%08x\n",
1525 __func__, hc->hc_num, hcchar.d32);
1526 }
1527 }
1528 #endif
1529 }
1530
1531 /**
1532 * Sets the channel property that indicates in which frame a periodic transfer
1533 * should occur. This is always set to the _next_ frame. This function has no
1534 * effect on non-periodic transfers.
1535 *
1536 * @param core_if Programming view of DWC_otg controller.
1537 * @param hc Identifies the host channel to set up and its properties.
1538 * @param hcchar Current value of the HCCHAR register for the specified host
1539 * channel.
1540 */
1541 static inline void hc_set_even_odd_frame(dwc_otg_core_if_t *core_if,
1542 dwc_hc_t *hc,
1543 hcchar_data_t *hcchar)
1544 {
1545 if (hc->ep_type == DWC_OTG_EP_TYPE_INTR ||
1546 hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
1547 hfnum_data_t hfnum;
1548 hfnum.d32 = dwc_read_reg32(&core_if->host_if->host_global_regs->hfnum);
1549
1550 /* 1 if _next_ frame is odd, 0 if it's even */
1551 hcchar->b.oddfrm = (hfnum.b.frnum & 0x1) ? 0 : 1;
1552 #ifdef DEBUG
1553 if (hc->ep_type == DWC_OTG_EP_TYPE_INTR && hc->do_split && !hc->complete_split) {
1554 switch (hfnum.b.frnum & 0x7) {
1555 case 7:
1556 core_if->hfnum_7_samples++;
1557 core_if->hfnum_7_frrem_accum += hfnum.b.frrem;
1558 break;
1559 case 0:
1560 core_if->hfnum_0_samples++;
1561 core_if->hfnum_0_frrem_accum += hfnum.b.frrem;
1562 break;
1563 default:
1564 core_if->hfnum_other_samples++;
1565 core_if->hfnum_other_frrem_accum += hfnum.b.frrem;
1566 break;
1567 }
1568 }
1569 #endif
1570 }
1571 }
1572
1573 #ifdef DEBUG
1574 static void hc_xfer_timeout(unsigned long ptr)
1575 {
1576 hc_xfer_info_t *xfer_info = (hc_xfer_info_t *)ptr;
1577 int hc_num = xfer_info->hc->hc_num;
1578 DWC_WARN("%s: timeout on channel %d\n", __func__, hc_num);
1579 DWC_WARN(" start_hcchar_val 0x%08x\n", xfer_info->core_if->start_hcchar_val[hc_num]);
1580 }
1581 #endif
1582
1583 /*
1584 * This function does the setup for a data transfer for a host channel and
1585 * starts the transfer. May be called in either Slave mode or DMA mode. In
1586 * Slave mode, the caller must ensure that there is sufficient space in the
1587 * request queue and Tx Data FIFO.
1588 *
1589 * For an OUT transfer in Slave mode, it loads a data packet into the
1590 * appropriate FIFO. If necessary, additional data packets will be loaded in
1591 * the Host ISR.
1592 *
1593 * For an IN transfer in Slave mode, a data packet is requested. The data
1594 * packets are unloaded from the Rx FIFO in the Host ISR. If necessary,
1595 * additional data packets are requested in the Host ISR.
1596 *
1597 * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ
1598 * register along with a packet count of 1 and the channel is enabled. This
1599 * causes a single PING transaction to occur. Other fields in HCTSIZ are
1600 * simply set to 0 since no data transfer occurs in this case.
1601 *
1602 * For a PING transfer in DMA mode, the HCTSIZ register is initialized with
1603 * all the information required to perform the subsequent data transfer. In
1604 * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the
1605 * controller performs the entire PING protocol, then starts the data
1606 * transfer.
1607 *
1608 * @param core_if Programming view of DWC_otg controller.
1609 * @param hc Information needed to initialize the host channel. The xfer_len
1610 * value may be reduced to accommodate the max widths of the XferSize and
1611 * PktCnt fields in the HCTSIZn register. The multi_count value may be changed
1612 * to reflect the final xfer_len value.
1613 */
1614 void dwc_otg_hc_start_transfer(dwc_otg_core_if_t *core_if, dwc_hc_t *hc)
1615 {
1616 hcchar_data_t hcchar;
1617 hctsiz_data_t hctsiz;
1618 uint16_t num_packets;
1619 uint32_t max_hc_xfer_size = core_if->core_params->max_transfer_size;
1620 uint16_t max_hc_pkt_count = core_if->core_params->max_packet_count;
1621 dwc_otg_hc_regs_t *hc_regs = core_if->host_if->hc_regs[hc->hc_num];
1622
1623 hctsiz.d32 = 0;
1624
1625 if (hc->do_ping) {
1626 if (!core_if->dma_enable) {
1627 dwc_otg_hc_do_ping(core_if, hc);
1628 hc->xfer_started = 1;
1629 return;
1630 }
1631 else {
1632 hctsiz.b.dopng = 1;
1633 }
1634 }
1635
1636 if (hc->do_split) {
1637 num_packets = 1;
1638
1639 if (hc->complete_split && !hc->ep_is_in) {
1640 /* For CSPLIT OUT Transfer, set the size to 0 so the
1641 * core doesn't expect any data written to the FIFO */
1642 hc->xfer_len = 0;
1643 }
1644 else if (hc->ep_is_in || (hc->xfer_len > hc->max_packet)) {
1645 hc->xfer_len = hc->max_packet;
1646 }
1647 else if (!hc->ep_is_in && (hc->xfer_len > 188)) {
1648 hc->xfer_len = 188;
1649 }
1650
1651 hctsiz.b.xfersize = hc->xfer_len;
1652 }
1653 else {
1654 /*
1655 * Ensure that the transfer length and packet count will fit
1656 * in the widths allocated for them in the HCTSIZn register.
1657 */
1658 if (hc->ep_type == DWC_OTG_EP_TYPE_INTR ||
1659 hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
1660 /*
1661 * Make sure the transfer size is no larger than one
1662 * (micro)frame's worth of data. (A check was done
1663 * when the periodic transfer was accepted to ensure
1664 * that a (micro)frame's worth of data can be
1665 * programmed into a channel.)
1666 */
1667 uint32_t max_periodic_len = hc->multi_count * hc->max_packet;
1668 if (hc->xfer_len > max_periodic_len) {
1669 hc->xfer_len = max_periodic_len;
1670 }
1671 else {
1672 }
1673 }
1674 else if (hc->xfer_len > max_hc_xfer_size) {
1675 /* Make sure that xfer_len is a multiple of max packet size. */
1676 hc->xfer_len = max_hc_xfer_size - hc->max_packet + 1;
1677 }
1678
1679 if (hc->xfer_len > 0) {
1680 num_packets = (hc->xfer_len + hc->max_packet - 1) / hc->max_packet;
1681 if (num_packets > max_hc_pkt_count) {
1682 num_packets = max_hc_pkt_count;
1683 hc->xfer_len = num_packets * hc->max_packet;
1684 }
1685 }
1686 else {
1687 /* Need 1 packet for transfer length of 0. */
1688 num_packets = 1;
1689 }
1690
1691 #if 0
1692 //host testusb item 10, would do series of Control transfer
1693 //with URB_SHORT_NOT_OK set in transfer_flags ,
1694 //changing the xfer_len would cause the test fail
1695 if (hc->ep_is_in) {
1696 /* Always program an integral # of max packets for IN transfers. */
1697 hc->xfer_len = num_packets * hc->max_packet;
1698 }
1699 #endif
1700
1701 if (hc->ep_type == DWC_OTG_EP_TYPE_INTR ||
1702 hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
1703 /*
1704 * Make sure that the multi_count field matches the
1705 * actual transfer length.
1706 */
1707 hc->multi_count = num_packets;
1708 }
1709
1710 if (hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
1711 /* Set up the initial PID for the transfer. */
1712 if (hc->speed == DWC_OTG_EP_SPEED_HIGH) {
1713 if (hc->ep_is_in) {
1714 if (hc->multi_count == 1) {
1715 hc->data_pid_start = DWC_OTG_HC_PID_DATA0;
1716 }
1717 else if (hc->multi_count == 2) {
1718 hc->data_pid_start = DWC_OTG_HC_PID_DATA1;
1719 }
1720 else {
1721 hc->data_pid_start = DWC_OTG_HC_PID_DATA2;
1722 }
1723 }
1724 else {
1725 if (hc->multi_count == 1) {
1726 hc->data_pid_start = DWC_OTG_HC_PID_DATA0;
1727 }
1728 else {
1729 hc->data_pid_start = DWC_OTG_HC_PID_MDATA;
1730 }
1731 }
1732 }
1733 else {
1734 hc->data_pid_start = DWC_OTG_HC_PID_DATA0;
1735 }
1736 }
1737
1738 hctsiz.b.xfersize = hc->xfer_len;
1739 }
1740
1741 hc->start_pkt_count = num_packets;
1742 hctsiz.b.pktcnt = num_packets;
1743 hctsiz.b.pid = hc->data_pid_start;
1744 dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32);
1745
1746 DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num);
1747 DWC_DEBUGPL(DBG_HCDV, " Xfer Size: %d\n", hctsiz.b.xfersize);
1748 DWC_DEBUGPL(DBG_HCDV, " Num Pkts: %d\n", hctsiz.b.pktcnt);
1749 DWC_DEBUGPL(DBG_HCDV, " Start PID: %d\n", hctsiz.b.pid);
1750
1751 if (core_if->dma_enable) {
1752 dwc_write_reg32(&hc_regs->hcdma, (uint32_t)hc->xfer_buff);
1753 }
1754
1755 /* Start the split */
1756 if (hc->do_split) {
1757 hcsplt_data_t hcsplt;
1758 hcsplt.d32 = dwc_read_reg32 (&hc_regs->hcsplt);
1759 hcsplt.b.spltena = 1;
1760 dwc_write_reg32(&hc_regs->hcsplt, hcsplt.d32);
1761 }
1762
1763 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1764 hcchar.b.multicnt = hc->multi_count;
1765 hc_set_even_odd_frame(core_if, hc, &hcchar);
1766 #ifdef DEBUG
1767 core_if->start_hcchar_val[hc->hc_num] = hcchar.d32;
1768 if (hcchar.b.chdis) {
1769 DWC_WARN("%s: chdis set, channel %d, hcchar 0x%08x\n",
1770 __func__, hc->hc_num, hcchar.d32);
1771 }
1772 #endif
1773
1774 /* Set host channel enable after all other setup is complete. */
1775 hcchar.b.chen = 1;
1776 hcchar.b.chdis = 0;
1777 dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
1778
1779 hc->xfer_started = 1;
1780 hc->requests++;
1781
1782 if (!core_if->dma_enable &&
1783 !hc->ep_is_in && hc->xfer_len > 0) {
1784 /* Load OUT packet into the appropriate Tx FIFO. */
1785 dwc_otg_hc_write_packet(core_if, hc);
1786 }
1787
1788 #ifdef DEBUG
1789 /* Start a timer for this transfer. */
1790 core_if->hc_xfer_timer[hc->hc_num].function = hc_xfer_timeout;
1791 core_if->hc_xfer_info[hc->hc_num].core_if = core_if;
1792 core_if->hc_xfer_info[hc->hc_num].hc = hc;
1793 core_if->hc_xfer_timer[hc->hc_num].data = (unsigned long)(&core_if->hc_xfer_info[hc->hc_num]);
1794 core_if->hc_xfer_timer[hc->hc_num].expires = jiffies + (HZ*10);
1795 add_timer(&core_if->hc_xfer_timer[hc->hc_num]);
1796 #endif
1797 }
1798
1799 /**
1800 * This function continues a data transfer that was started by previous call
1801 * to <code>dwc_otg_hc_start_transfer</code>. The caller must ensure there is
1802 * sufficient space in the request queue and Tx Data FIFO. This function
1803 * should only be called in Slave mode. In DMA mode, the controller acts
1804 * autonomously to complete transfers programmed to a host channel.
1805 *
1806 * For an OUT transfer, a new data packet is loaded into the appropriate FIFO
1807 * if there is any data remaining to be queued. For an IN transfer, another
1808 * data packet is always requested. For the SETUP phase of a control transfer,
1809 * this function does nothing.
1810 *
1811 * @return 1 if a new request is queued, 0 if no more requests are required
1812 * for this transfer.
1813 */
1814 int dwc_otg_hc_continue_transfer(dwc_otg_core_if_t *core_if, dwc_hc_t *hc)
1815 {
1816 DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num);
1817
1818 if (hc->do_split) {
1819 /* SPLITs always queue just once per channel */
1820 return 0;
1821 }
1822 else if (hc->data_pid_start == DWC_OTG_HC_PID_SETUP) {
1823 /* SETUPs are queued only once since they can't be NAKed. */
1824 return 0;
1825 }
1826 else if (hc->ep_is_in) {
1827 /*
1828 * Always queue another request for other IN transfers. If
1829 * back-to-back INs are issued and NAKs are received for both,
1830 * the driver may still be processing the first NAK when the
1831 * second NAK is received. When the interrupt handler clears
1832 * the NAK interrupt for the first NAK, the second NAK will
1833 * not be seen. So we can't depend on the NAK interrupt
1834 * handler to requeue a NAKed request. Instead, IN requests
1835 * are issued each time this function is called. When the
1836 * transfer completes, the extra requests for the channel will
1837 * be flushed.
1838 */
1839 hcchar_data_t hcchar;
1840 dwc_otg_hc_regs_t *hc_regs = core_if->host_if->hc_regs[hc->hc_num];
1841
1842 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1843 hc_set_even_odd_frame(core_if, hc, &hcchar);
1844 hcchar.b.chen = 1;
1845 hcchar.b.chdis = 0;
1846 DWC_DEBUGPL(DBG_HCDV, " IN xfer: hcchar = 0x%08x\n", hcchar.d32);
1847 dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
1848 hc->requests++;
1849 return 1;
1850 }
1851 else {
1852 /* OUT transfers. */
1853 if (hc->xfer_count < hc->xfer_len) {
1854 if (hc->ep_type == DWC_OTG_EP_TYPE_INTR ||
1855 hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
1856 hcchar_data_t hcchar;
1857 dwc_otg_hc_regs_t *hc_regs;
1858 hc_regs = core_if->host_if->hc_regs[hc->hc_num];
1859 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1860 hc_set_even_odd_frame(core_if, hc, &hcchar);
1861 }
1862
1863 /* Load OUT packet into the appropriate Tx FIFO. */
1864 dwc_otg_hc_write_packet(core_if, hc);
1865 hc->requests++;
1866 return 1;
1867 }
1868 else {
1869 return 0;
1870 }
1871 }
1872 }
1873
1874 /**
1875 * Starts a PING transfer. This function should only be called in Slave mode.
1876 * The Do Ping bit is set in the HCTSIZ register, then the channel is enabled.
1877 */
1878 void dwc_otg_hc_do_ping(dwc_otg_core_if_t *core_if, dwc_hc_t *hc)
1879 {
1880 hcchar_data_t hcchar;
1881 hctsiz_data_t hctsiz;
1882 dwc_otg_hc_regs_t *hc_regs = core_if->host_if->hc_regs[hc->hc_num];
1883
1884 DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num);
1885
1886 hctsiz.d32 = 0;
1887 hctsiz.b.dopng = 1;
1888 hctsiz.b.pktcnt = 1;
1889 dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32);
1890
1891 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1892 hcchar.b.chen = 1;
1893 hcchar.b.chdis = 0;
1894 dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
1895 }
1896
1897 /*
1898 * This function writes a packet into the Tx FIFO associated with the Host
1899 * Channel. For a channel associated with a non-periodic EP, the non-periodic
1900 * Tx FIFO is written. For a channel associated with a periodic EP, the
1901 * periodic Tx FIFO is written. This function should only be called in Slave
1902 * mode.
1903 *
1904 * Upon return the xfer_buff and xfer_count fields in _hc are incremented by
1905 * then number of bytes written to the Tx FIFO.
1906 */
1907 void dwc_otg_hc_write_packet(dwc_otg_core_if_t *core_if, dwc_hc_t *hc)
1908 {
1909 uint32_t i;
1910 uint32_t remaining_count;
1911 uint32_t byte_count;
1912 uint32_t dword_count;
1913
1914 uint32_t *data_buff = (uint32_t *)(hc->xfer_buff);
1915 uint32_t *data_fifo = core_if->data_fifo[hc->hc_num];
1916
1917 remaining_count = hc->xfer_len - hc->xfer_count;
1918 if (remaining_count > hc->max_packet) {
1919 byte_count = hc->max_packet;
1920 }
1921 else {
1922 byte_count = remaining_count;
1923 }
1924
1925 dword_count = (byte_count + 3) / 4;
1926
1927 if ((((unsigned long)data_buff) & 0x3) == 0) {
1928 /* xfer_buff is DWORD aligned. */
1929 for (i = 0; i < dword_count; i++, data_buff++)
1930 {
1931 dwc_write_reg32(data_fifo, *data_buff);
1932 }
1933 }
1934 else {
1935 /* xfer_buff is not DWORD aligned. */
1936 for (i = 0; i < dword_count; i++, data_buff++)
1937 {
1938 dwc_write_reg32(data_fifo, get_unaligned(data_buff));
1939 }
1940 }
1941
1942 hc->xfer_count += byte_count;
1943 hc->xfer_buff += byte_count;
1944 }
1945
1946 /**
1947 * Gets the current USB frame number. This is the frame number from the last
1948 * SOF packet.
1949 */
1950 uint32_t dwc_otg_get_frame_number(dwc_otg_core_if_t *core_if)
1951 {
1952 dsts_data_t dsts;
1953 dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts);
1954
1955 /* read current frame/microframe number from DSTS register */
1956 return dsts.b.soffn;
1957 }
1958
1959 /**
1960 * This function reads a setup packet from the Rx FIFO into the destination
1961 * buffer. This function is called from the Rx Status Queue Level (RxStsQLvl)
1962 * Interrupt routine when a SETUP packet has been received in Slave mode.
1963 *
1964 * @param core_if Programming view of DWC_otg controller.
1965 * @param dest Destination buffer for packet data.
1966 */
1967 void dwc_otg_read_setup_packet(dwc_otg_core_if_t *core_if, uint32_t *dest)
1968 {
1969 /* Get the 8 bytes of a setup transaction data */
1970
1971 /* Pop 2 DWORDS off the receive data FIFO into memory */
1972 dest[0] = dwc_read_reg32(core_if->data_fifo[0]);
1973 dest[1] = dwc_read_reg32(core_if->data_fifo[0]);
1974 }
1975
1976
1977 /**
1978 * This function enables EP0 OUT to receive SETUP packets and configures EP0
1979 * IN for transmitting packets. It is normally called when the
1980 * "Enumeration Done" interrupt occurs.
1981 *
1982 * @param core_if Programming view of DWC_otg controller.
1983 * @param ep The EP0 data.
1984 */
1985 void dwc_otg_ep0_activate(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
1986 {
1987 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
1988 dsts_data_t dsts;
1989 depctl_data_t diepctl;
1990 depctl_data_t doepctl;
1991 dctl_data_t dctl = { .d32 = 0 };
1992
1993 /* Read the Device Status and Endpoint 0 Control registers */
1994 dsts.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dsts);
1995 diepctl.d32 = dwc_read_reg32(&dev_if->in_ep_regs[0]->diepctl);
1996 doepctl.d32 = dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl);
1997
1998 /* Set the MPS of the IN EP based on the enumeration speed */
1999 switch (dsts.b.enumspd) {
2000 case DWC_DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ:
2001 case DWC_DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ:
2002 case DWC_DSTS_ENUMSPD_FS_PHY_48MHZ:
2003 diepctl.b.mps = DWC_DEP0CTL_MPS_64;
2004 break;
2005 case DWC_DSTS_ENUMSPD_LS_PHY_6MHZ:
2006 diepctl.b.mps = DWC_DEP0CTL_MPS_8;
2007 break;
2008 }
2009
2010 dwc_write_reg32(&dev_if->in_ep_regs[0]->diepctl, diepctl.d32);
2011
2012 /* Enable OUT EP for receive */
2013 doepctl.b.epena = 1;
2014 dwc_write_reg32(&dev_if->out_ep_regs[0]->doepctl, doepctl.d32);
2015
2016 #ifdef VERBOSE
2017 DWC_DEBUGPL(DBG_PCDV,"doepctl0=%0x\n",
2018 dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl));
2019 DWC_DEBUGPL(DBG_PCDV,"diepctl0=%0x\n",
2020 dwc_read_reg32(&dev_if->in_ep_regs[0]->diepctl));
2021 #endif
2022 dctl.b.cgnpinnak = 1;
2023
2024 dwc_modify_reg32(&dev_if->dev_global_regs->dctl, dctl.d32, dctl.d32);
2025 DWC_DEBUGPL(DBG_PCDV,"dctl=%0x\n",
2026 dwc_read_reg32(&dev_if->dev_global_regs->dctl));
2027 }
2028
2029 /**
2030 * This function activates an EP. The Device EP control register for
2031 * the EP is configured as defined in the ep structure. Note: This
2032 * function is not used for EP0.
2033 *
2034 * @param core_if Programming view of DWC_otg controller.
2035 * @param ep The EP to activate.
2036 */
2037 void dwc_otg_ep_activate(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
2038 {
2039 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
2040 depctl_data_t depctl;
2041 volatile uint32_t *addr;
2042 daint_data_t daintmsk = { .d32 = 0 };
2043
2044 DWC_DEBUGPL(DBG_PCDV, "%s() EP%d-%s\n", __func__, ep->num,
2045 (ep->is_in?"IN":"OUT"));
2046
2047 /* Read DEPCTLn register */
2048 if (ep->is_in == 1) {
2049 addr = &dev_if->in_ep_regs[ep->num]->diepctl;
2050 daintmsk.ep.in = 1<<ep->num;
2051 }
2052 else {
2053 addr = &dev_if->out_ep_regs[ep->num]->doepctl;
2054 daintmsk.ep.out = 1<<ep->num;
2055 }
2056
2057 /* If the EP is already active don't change the EP Control
2058 * register. */
2059 depctl.d32 = dwc_read_reg32(addr);
2060 if (!depctl.b.usbactep) {
2061 depctl.b.mps = ep->maxpacket;
2062 depctl.b.eptype = ep->type;
2063 depctl.b.txfnum = ep->tx_fifo_num;
2064
2065 if (ep->type == DWC_OTG_EP_TYPE_ISOC) {
2066 depctl.b.setd0pid = 1; // ???
2067 }
2068 else {
2069 depctl.b.setd0pid = 1;
2070 }
2071 depctl.b.usbactep = 1;
2072
2073 dwc_write_reg32(addr, depctl.d32);
2074 DWC_DEBUGPL(DBG_PCDV,"DEPCTL(%.8x)=%08x\n",(u32)addr, dwc_read_reg32(addr));
2075 }
2076
2077 /* Enable the Interrupt for this EP */
2078 if(core_if->multiproc_int_enable) {
2079 if (ep->is_in == 1) {
2080 diepmsk_data_t diepmsk = { .d32 = 0};
2081 diepmsk.b.xfercompl = 1;
2082 diepmsk.b.timeout = 1;
2083 diepmsk.b.epdisabled = 1;
2084 diepmsk.b.ahberr = 1;
2085 diepmsk.b.intknepmis = 1;
2086 diepmsk.b.txfifoundrn = 1; //?????
2087
2088
2089 if(core_if->dma_desc_enable) {
2090 diepmsk.b.bna = 1;
2091 }
2092 /*
2093 if(core_if->dma_enable) {
2094 doepmsk.b.nak = 1;
2095 }
2096 */
2097 dwc_write_reg32(&dev_if->dev_global_regs->diepeachintmsk[ep->num], diepmsk.d32);
2098
2099 } else {
2100 doepmsk_data_t doepmsk = { .d32 = 0};
2101 doepmsk.b.xfercompl = 1;
2102 doepmsk.b.ahberr = 1;
2103 doepmsk.b.epdisabled = 1;
2104
2105
2106 if(core_if->dma_desc_enable) {
2107 doepmsk.b.bna = 1;
2108 }
2109 /*
2110 doepmsk.b.babble = 1;
2111 doepmsk.b.nyet = 1;
2112 doepmsk.b.nak = 1;
2113 */
2114 dwc_write_reg32(&dev_if->dev_global_regs->doepeachintmsk[ep->num], doepmsk.d32);
2115 }
2116 dwc_modify_reg32(&dev_if->dev_global_regs->deachintmsk,
2117 0, daintmsk.d32);
2118 } else {
2119 dwc_modify_reg32(&dev_if->dev_global_regs->daintmsk,
2120 0, daintmsk.d32);
2121 }
2122
2123 DWC_DEBUGPL(DBG_PCDV,"DAINTMSK=%0x\n",
2124 dwc_read_reg32(&dev_if->dev_global_regs->daintmsk));
2125
2126 ep->stall_clear_flag = 0;
2127 return;
2128 }
2129
2130 /**
2131 * This function deactivates an EP. This is done by clearing the USB Active
2132 * EP bit in the Device EP control register. Note: This function is not used
2133 * for EP0. EP0 cannot be deactivated.
2134 *
2135 * @param core_if Programming view of DWC_otg controller.
2136 * @param ep The EP to deactivate.
2137 */
2138 void dwc_otg_ep_deactivate(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
2139 {
2140 depctl_data_t depctl = { .d32 = 0 };
2141 volatile uint32_t *addr;
2142 daint_data_t daintmsk = { .d32 = 0};
2143
2144 /* Read DEPCTLn register */
2145 if (ep->is_in == 1) {
2146 addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl;
2147 daintmsk.ep.in = 1<<ep->num;
2148 }
2149 else {
2150 addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl;
2151 daintmsk.ep.out = 1<<ep->num;
2152 }
2153
2154 //disabled ep only when ep is enabled
2155 //or got halt in the loop in test in cv9
2156 depctl.d32=dwc_read_reg32(addr);
2157 if(depctl.b.epena){
2158 if (ep->is_in == 1) {
2159 diepint_data_t diepint;
2160 dwc_otg_dev_in_ep_regs_t *in_reg=core_if->dev_if->in_ep_regs[ep->num];
2161
2162 //Set ep nak
2163 depctl.d32=dwc_read_reg32(&in_reg->diepctl);
2164 depctl.b.snak=1;
2165 dwc_write_reg32(&in_reg->diepctl,depctl.d32);
2166
2167 //wait for diepint.b.inepnakeff
2168 diepint.d32=dwc_read_reg32(&in_reg->diepint);
2169 while(!diepint.b.inepnakeff){
2170 udelay(1);
2171 diepint.d32=dwc_read_reg32(&in_reg->diepint);
2172 }
2173 diepint.d32=0;
2174 diepint.b.inepnakeff=1;
2175 dwc_write_reg32(&in_reg->diepint,diepint.d32);
2176
2177 //set ep disable and snak
2178 depctl.d32=dwc_read_reg32(&in_reg->diepctl);
2179 depctl.b.snak=1;
2180 depctl.b.epdis=1;
2181 dwc_write_reg32(&in_reg->diepctl,depctl.d32);
2182
2183 //wait for diepint.b.epdisabled
2184 diepint.d32=dwc_read_reg32(&in_reg->diepint);
2185 while(!diepint.b.epdisabled){
2186 udelay(1);
2187 diepint.d32=dwc_read_reg32(&in_reg->diepint);
2188 }
2189 diepint.d32=0;
2190 diepint.b.epdisabled=1;
2191 dwc_write_reg32(&in_reg->diepint,diepint.d32);
2192
2193 //clear ep enable and disable bit
2194 depctl.d32=dwc_read_reg32(&in_reg->diepctl);
2195 depctl.b.epena=0;
2196 depctl.b.epdis=0;
2197 dwc_write_reg32(&in_reg->diepctl,depctl.d32);
2198
2199 }
2200 #if 0
2201 //following DWC OTG DataBook v2.72a, 6.4.2.1.3 Disabling an OUT Endpoint,
2202 //but this doesn't work, the old code do.
2203 else {
2204 doepint_data_t doepint;
2205 dwc_otg_dev_out_ep_regs_t *out_reg=core_if->dev_if->out_ep_regs[ep->num];
2206 dctl_data_t dctl;
2207 gintsts_data_t gintsts;
2208
2209 //set dctl global out nak
2210 dctl.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dctl);
2211 dctl.b.sgoutnak=1;
2212 dwc_write_reg32(&core_if->dev_if->dev_global_regs->dctl,dctl.d32);
2213
2214 //wait for gintsts.goutnakeff
2215 gintsts.d32=dwc_read_reg32(&core_if->core_global_regs->gintsts);
2216 while(!gintsts.b.goutnakeff){
2217 udelay(1);
2218 gintsts.d32=dwc_read_reg32(&core_if->core_global_regs->gintsts);
2219 }
2220 gintsts.d32=0;
2221 gintsts.b.goutnakeff=1;
2222 dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32);
2223
2224 //set ep disable and snak
2225 depctl.d32=dwc_read_reg32(&out_reg->doepctl);
2226 depctl.b.snak=1;
2227 depctl.b.epdis=1;
2228 dwc_write_reg32(&out_reg->doepctl,depctl.d32);
2229
2230 //wait for diepint.b.epdisabled
2231 doepint.d32=dwc_read_reg32(&out_reg->doepint);
2232 while(!doepint.b.epdisabled){
2233 udelay(1);
2234 doepint.d32=dwc_read_reg32(&out_reg->doepint);
2235 }
2236 doepint.d32=0;
2237 doepint.b.epdisabled=1;
2238 dwc_write_reg32(&out_reg->doepint,doepint.d32);
2239
2240 //clear ep enable and disable bit
2241 depctl.d32=dwc_read_reg32(&out_reg->doepctl);
2242 depctl.b.epena=0;
2243 depctl.b.epdis=0;
2244 dwc_write_reg32(&out_reg->doepctl,depctl.d32);
2245 }
2246 #endif
2247
2248 depctl.d32=0;
2249 depctl.b.usbactep = 0;
2250
2251 if (ep->is_in == 0) {
2252 if(core_if->dma_enable||core_if->dma_desc_enable)
2253 depctl.b.epdis = 1;
2254 }
2255
2256 dwc_write_reg32(addr, depctl.d32);
2257 }
2258
2259 /* Disable the Interrupt for this EP */
2260 if(core_if->multiproc_int_enable) {
2261 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->deachintmsk,
2262 daintmsk.d32, 0);
2263
2264 if (ep->is_in == 1) {
2265 dwc_write_reg32(&core_if->dev_if->dev_global_regs->diepeachintmsk[ep->num], 0);
2266 } else {
2267 dwc_write_reg32(&core_if->dev_if->dev_global_regs->doepeachintmsk[ep->num], 0);
2268 }
2269 } else {
2270 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->daintmsk,
2271 daintmsk.d32, 0);
2272 }
2273
2274 if (ep->is_in == 1) {
2275 DWC_DEBUGPL(DBG_PCD, "DIEPCTL(%.8x)=%08x DIEPTSIZ=%08x, DIEPINT=%.8x, DIEPDMA=%.8x, DTXFSTS=%.8x\n",
2276 (u32)&core_if->dev_if->in_ep_regs[ep->num]->diepctl,
2277 dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->diepctl),
2278 dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dieptsiz),
2279 dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->diepint),
2280 dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->diepdma),
2281 dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dtxfsts));
2282 DWC_DEBUGPL(DBG_PCD, "DAINTMSK=%08x GINTMSK=%08x\n",
2283 dwc_read_reg32(&core_if->dev_if->dev_global_regs->daintmsk),
2284 dwc_read_reg32(&core_if->core_global_regs->gintmsk));
2285 }
2286 else {
2287 DWC_DEBUGPL(DBG_PCD, "DOEPCTL(%.8x)=%08x DOEPTSIZ=%08x, DOEPINT=%.8x, DOEPDMA=%.8x\n",
2288 (u32)&core_if->dev_if->out_ep_regs[ep->num]->doepctl,
2289 dwc_read_reg32(&core_if->dev_if->out_ep_regs[ep->num]->doepctl),
2290 dwc_read_reg32(&core_if->dev_if->out_ep_regs[ep->num]->doeptsiz),
2291 dwc_read_reg32(&core_if->dev_if->out_ep_regs[ep->num]->doepint),
2292 dwc_read_reg32(&core_if->dev_if->out_ep_regs[ep->num]->doepdma));
2293
2294 DWC_DEBUGPL(DBG_PCD, "DAINTMSK=%08x GINTMSK=%08x\n",
2295 dwc_read_reg32(&core_if->dev_if->dev_global_regs->daintmsk),
2296 dwc_read_reg32(&core_if->core_global_regs->gintmsk));
2297 }
2298
2299 }
2300
2301 /**
2302 * This function does the setup for a data transfer for an EP and
2303 * starts the transfer. For an IN transfer, the packets will be
2304 * loaded into the appropriate Tx FIFO in the ISR. For OUT transfers,
2305 * the packets are unloaded from the Rx FIFO in the ISR. the ISR.
2306 *
2307 * @param core_if Programming view of DWC_otg controller.
2308 * @param ep The EP to start the transfer on.
2309 */
2310 static void init_dma_desc_chain(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
2311 {
2312 dwc_otg_dma_desc_t* dma_desc;
2313 uint32_t offset;
2314 uint32_t xfer_est;
2315 int i;
2316
2317 ep->desc_cnt = ( ep->total_len / ep->maxxfer) +
2318 ((ep->total_len % ep->maxxfer) ? 1 : 0);
2319 if(!ep->desc_cnt)
2320 ep->desc_cnt = 1;
2321
2322 dma_desc = ep->desc_addr;
2323 xfer_est = ep->total_len;
2324 offset = 0;
2325 for( i = 0; i < ep->desc_cnt; ++i) {
2326 /** DMA Descriptor Setup */
2327 if(xfer_est > ep->maxxfer) {
2328 dma_desc->status.b.bs = BS_HOST_BUSY;
2329 dma_desc->status.b.l = 0;
2330 dma_desc->status.b.ioc = 0;
2331 dma_desc->status.b.sp = 0;
2332 dma_desc->status.b.bytes = ep->maxxfer;
2333 dma_desc->buf = ep->dma_addr + offset;
2334 dma_desc->status.b.bs = BS_HOST_READY;
2335
2336 xfer_est -= ep->maxxfer;
2337 offset += ep->maxxfer;
2338 } else {
2339 dma_desc->status.b.bs = BS_HOST_BUSY;
2340 dma_desc->status.b.l = 1;
2341 dma_desc->status.b.ioc = 1;
2342 if(ep->is_in) {
2343 dma_desc->status.b.sp = (xfer_est % ep->maxpacket) ?
2344 1 : ((ep->sent_zlp) ? 1 : 0);
2345 dma_desc->status.b.bytes = xfer_est;
2346 } else {
2347 dma_desc->status.b.bytes = xfer_est + ((4 - (xfer_est & 0x3)) & 0x3) ;
2348 }
2349
2350 dma_desc->buf = ep->dma_addr + offset;
2351 dma_desc->status.b.bs = BS_HOST_READY;
2352 }
2353 dma_desc ++;
2354 }
2355 }
2356
2357 /**
2358 * This function does the setup for a data transfer for an EP and
2359 * starts the transfer. For an IN transfer, the packets will be
2360 * loaded into the appropriate Tx FIFO in the ISR. For OUT transfers,
2361 * the packets are unloaded from the Rx FIFO in the ISR. the ISR.
2362 *
2363 * @param core_if Programming view of DWC_otg controller.
2364 * @param ep The EP to start the transfer on.
2365 */
2366
2367 void dwc_otg_ep_start_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
2368 {
2369 depctl_data_t depctl;
2370 deptsiz_data_t deptsiz;
2371 gintmsk_data_t intr_mask = { .d32 = 0};
2372
2373 DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s()\n", __func__);
2374
2375 DWC_DEBUGPL(DBG_PCD, "ep%d-%s xfer_len=%d xfer_cnt=%d "
2376 "xfer_buff=%p start_xfer_buff=%p\n",
2377 ep->num, (ep->is_in?"IN":"OUT"), ep->xfer_len,
2378 ep->xfer_count, ep->xfer_buff, ep->start_xfer_buff);
2379
2380 /* IN endpoint */
2381 if (ep->is_in == 1) {
2382 dwc_otg_dev_in_ep_regs_t *in_regs =
2383 core_if->dev_if->in_ep_regs[ep->num];
2384
2385 gnptxsts_data_t gtxstatus;
2386
2387 gtxstatus.d32 =
2388 dwc_read_reg32(&core_if->core_global_regs->gnptxsts);
2389
2390 if(core_if->en_multiple_tx_fifo == 0 && gtxstatus.b.nptxqspcavail == 0) {
2391 #ifdef DEBUG
2392 DWC_PRINT("TX Queue Full (0x%0x)\n", gtxstatus.d32);
2393 #endif
2394 return;
2395 }
2396
2397 depctl.d32 = dwc_read_reg32(&(in_regs->diepctl));
2398 deptsiz.d32 = dwc_read_reg32(&(in_regs->dieptsiz));
2399
2400 ep->xfer_len += (ep->maxxfer < (ep->total_len - ep->xfer_len)) ?
2401 ep->maxxfer : (ep->total_len - ep->xfer_len);
2402
2403 /* Zero Length Packet? */
2404 if ((ep->xfer_len - ep->xfer_count) == 0) {
2405 deptsiz.b.xfersize = 0;
2406 deptsiz.b.pktcnt = 1;
2407 }
2408 else {
2409 /* Program the transfer size and packet count
2410 * as follows: xfersize = N * maxpacket +
2411 * short_packet pktcnt = N + (short_packet
2412 * exist ? 1 : 0)
2413 */
2414 deptsiz.b.xfersize = ep->xfer_len - ep->xfer_count;
2415 deptsiz.b.pktcnt =
2416 (ep->xfer_len - ep->xfer_count - 1 + ep->maxpacket) /
2417 ep->maxpacket;
2418 }
2419
2420
2421 /* Write the DMA register */
2422 if (core_if->dma_enable) {
2423 if (/*(core_if->dma_enable)&&*/(ep->dma_addr==DMA_ADDR_INVALID)) {
2424 ep->dma_addr=dma_map_single(NULL,(void *)(ep->xfer_buff),(ep->xfer_len),DMA_TO_DEVICE);
2425 }
2426 DWC_DEBUGPL(DBG_PCDV, "ep%d dma_addr=%.8x\n", ep->num, ep->dma_addr);
2427
2428 if (core_if->dma_desc_enable == 0) {
2429 dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32);
2430
2431 VERIFY_PCD_DMA_ADDR(ep->dma_addr);
2432 dwc_write_reg32 (&(in_regs->diepdma),
2433 (uint32_t)ep->dma_addr);
2434 }
2435 else {
2436 init_dma_desc_chain(core_if, ep);
2437 /** DIEPDMAn Register write */
2438
2439 VERIFY_PCD_DMA_ADDR(ep->dma_desc_addr);
2440 dwc_write_reg32(&in_regs->diepdma, ep->dma_desc_addr);
2441 }
2442 }
2443 else
2444 {
2445 dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32);
2446 if(ep->type != DWC_OTG_EP_TYPE_ISOC) {
2447 /**
2448 * Enable the Non-Periodic Tx FIFO empty interrupt,
2449 * or the Tx FIFO epmty interrupt in dedicated Tx FIFO mode,
2450 * the data will be written into the fifo by the ISR.
2451 */
2452 if(core_if->en_multiple_tx_fifo == 0) {
2453 intr_mask.b.nptxfempty = 1;
2454 dwc_modify_reg32(&core_if->core_global_regs->gintmsk,
2455 intr_mask.d32, intr_mask.d32);
2456 }
2457 else {
2458 /* Enable the Tx FIFO Empty Interrupt for this EP */
2459 if(ep->xfer_len > 0) {
2460 uint32_t fifoemptymsk = 0;
2461 fifoemptymsk = 1 << ep->num;
2462 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk,
2463 0, fifoemptymsk);
2464
2465 }
2466 }
2467 }
2468 }
2469
2470 /* EP enable, IN data in FIFO */
2471 depctl.b.cnak = 1;
2472 depctl.b.epena = 1;
2473 dwc_write_reg32(&in_regs->diepctl, depctl.d32);
2474
2475 depctl.d32 = dwc_read_reg32 (&core_if->dev_if->in_ep_regs[0]->diepctl);
2476 depctl.b.nextep = ep->num;
2477 dwc_write_reg32 (&core_if->dev_if->in_ep_regs[0]->diepctl, depctl.d32);
2478
2479 DWC_DEBUGPL(DBG_PCD, "DIEPCTL(%.8x)=%08x DIEPTSIZ=%08x, DIEPINT=%.8x, DIEPDMA=%.8x, DTXFSTS=%.8x\n",
2480 (u32)&in_regs->diepctl,
2481 dwc_read_reg32(&in_regs->diepctl),
2482 dwc_read_reg32(&in_regs->dieptsiz),
2483 dwc_read_reg32(&in_regs->diepint),
2484 dwc_read_reg32(&in_regs->diepdma),
2485 dwc_read_reg32(&in_regs->dtxfsts));
2486 DWC_DEBUGPL(DBG_PCD, "DAINTMSK=%08x GINTMSK=%08x\n",
2487 dwc_read_reg32(&core_if->dev_if->dev_global_regs->daintmsk),
2488 dwc_read_reg32(&core_if->core_global_regs->gintmsk));
2489
2490 }
2491 else {
2492 /* OUT endpoint */
2493 dwc_otg_dev_out_ep_regs_t *out_regs =
2494 core_if->dev_if->out_ep_regs[ep->num];
2495
2496 depctl.d32 = dwc_read_reg32(&(out_regs->doepctl));
2497 deptsiz.d32 = dwc_read_reg32(&(out_regs->doeptsiz));
2498
2499 ep->xfer_len += (ep->maxxfer < (ep->total_len - ep->xfer_len)) ?
2500 ep->maxxfer : (ep->total_len - ep->xfer_len);
2501
2502 /* Program the transfer size and packet count as follows:
2503 *
2504 * pktcnt = N
2505 * xfersize = N * maxpacket
2506 */
2507 if ((ep->xfer_len - ep->xfer_count) == 0) {
2508 /* Zero Length Packet */
2509 deptsiz.b.xfersize = ep->maxpacket;
2510 deptsiz.b.pktcnt = 1;
2511 }
2512 else {
2513 deptsiz.b.pktcnt =
2514 (ep->xfer_len - ep->xfer_count + (ep->maxpacket - 1)) /
2515 ep->maxpacket;
2516 ep->xfer_len = deptsiz.b.pktcnt * ep->maxpacket + ep->xfer_count;
2517 deptsiz.b.xfersize = ep->xfer_len - ep->xfer_count;
2518 }
2519
2520 DWC_DEBUGPL(DBG_PCDV, "ep%d xfersize=%d pktcnt=%d\n",
2521 ep->num,
2522 deptsiz.b.xfersize, deptsiz.b.pktcnt);
2523
2524 if (core_if->dma_enable) {
2525 if (/*(core_if->dma_enable)&&*/(ep->dma_addr==DMA_ADDR_INVALID)) {
2526 ep->dma_addr=dma_map_single(NULL,(void *)(ep->xfer_buff),(ep->xfer_len),DMA_TO_DEVICE);
2527 }
2528 DWC_DEBUGPL(DBG_PCDV, "ep%d dma_addr=%.8x\n",
2529 ep->num,
2530 ep->dma_addr);
2531 if (!core_if->dma_desc_enable) {
2532 dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32);
2533
2534 VERIFY_PCD_DMA_ADDR(ep->dma_addr);
2535 dwc_write_reg32 (&(out_regs->doepdma),
2536 (uint32_t)ep->dma_addr);
2537 }
2538 else {
2539 init_dma_desc_chain(core_if, ep);
2540
2541 /** DOEPDMAn Register write */
2542
2543 VERIFY_PCD_DMA_ADDR(ep->dma_desc_addr);
2544 dwc_write_reg32(&out_regs->doepdma, ep->dma_desc_addr);
2545 }
2546 }
2547 else {
2548 dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32);
2549 }
2550
2551 /* EP enable */
2552 depctl.b.cnak = 1;
2553 depctl.b.epena = 1;
2554
2555 dwc_write_reg32(&out_regs->doepctl, depctl.d32);
2556
2557 DWC_DEBUGPL(DBG_PCD, "DOEPCTL(%.8x)=%08x DOEPTSIZ=%08x, DOEPINT=%.8x, DOEPDMA=%.8x\n",
2558 (u32)&out_regs->doepctl,
2559 dwc_read_reg32(&out_regs->doepctl),
2560 dwc_read_reg32(&out_regs->doeptsiz),
2561 dwc_read_reg32(&out_regs->doepint),
2562 dwc_read_reg32(&out_regs->doepdma));
2563
2564 DWC_DEBUGPL(DBG_PCD, "DAINTMSK=%08x GINTMSK=%08x\n",
2565 dwc_read_reg32(&core_if->dev_if->dev_global_regs->daintmsk),
2566 dwc_read_reg32(&core_if->core_global_regs->gintmsk));
2567 }
2568 }
2569
2570 /**
2571 * This function setup a zero length transfer in Buffer DMA and
2572 * Slave modes for usb requests with zero field set
2573 *
2574 * @param core_if Programming view of DWC_otg controller.
2575 * @param ep The EP to start the transfer on.
2576 *
2577 */
2578 void dwc_otg_ep_start_zl_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
2579 {
2580
2581 depctl_data_t depctl;
2582 deptsiz_data_t deptsiz;
2583 gintmsk_data_t intr_mask = { .d32 = 0};
2584
2585 DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s()\n", __func__);
2586
2587 /* IN endpoint */
2588 if (ep->is_in == 1) {
2589 dwc_otg_dev_in_ep_regs_t *in_regs =
2590 core_if->dev_if->in_ep_regs[ep->num];
2591
2592 depctl.d32 = dwc_read_reg32(&(in_regs->diepctl));
2593 deptsiz.d32 = dwc_read_reg32(&(in_regs->dieptsiz));
2594
2595 deptsiz.b.xfersize = 0;
2596 deptsiz.b.pktcnt = 1;
2597
2598
2599 /* Write the DMA register */
2600 if (core_if->dma_enable) {
2601 if (/*(core_if->dma_enable)&&*/(ep->dma_addr==DMA_ADDR_INVALID)) {
2602 ep->dma_addr=dma_map_single(NULL,(void *)(ep->xfer_buff),(ep->xfer_len),DMA_TO_DEVICE);
2603 }
2604 if (core_if->dma_desc_enable == 0) {
2605 dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32);
2606
2607 VERIFY_PCD_DMA_ADDR(ep->dma_addr);
2608 dwc_write_reg32 (&(in_regs->diepdma),
2609 (uint32_t)ep->dma_addr);
2610 }
2611 }
2612 else {
2613 dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32);
2614 /**
2615 * Enable the Non-Periodic Tx FIFO empty interrupt,
2616 * or the Tx FIFO epmty interrupt in dedicated Tx FIFO mode,
2617 * the data will be written into the fifo by the ISR.
2618 */
2619 if(core_if->en_multiple_tx_fifo == 0) {
2620 intr_mask.b.nptxfempty = 1;
2621 dwc_modify_reg32(&core_if->core_global_regs->gintmsk,
2622 intr_mask.d32, intr_mask.d32);
2623 }
2624 else {
2625 /* Enable the Tx FIFO Empty Interrupt for this EP */
2626 if(ep->xfer_len > 0) {
2627 uint32_t fifoemptymsk = 0;
2628 fifoemptymsk = 1 << ep->num;
2629 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk,
2630 0, fifoemptymsk);
2631 }
2632 }
2633 }
2634
2635 /* EP enable, IN data in FIFO */
2636 depctl.b.cnak = 1;
2637 depctl.b.epena = 1;
2638 dwc_write_reg32(&in_regs->diepctl, depctl.d32);
2639
2640 depctl.d32 = dwc_read_reg32 (&core_if->dev_if->in_ep_regs[0]->diepctl);
2641 depctl.b.nextep = ep->num;
2642 dwc_write_reg32 (&core_if->dev_if->in_ep_regs[0]->diepctl, depctl.d32);
2643
2644 }
2645 else {
2646 /* OUT endpoint */
2647 dwc_otg_dev_out_ep_regs_t *out_regs =
2648 core_if->dev_if->out_ep_regs[ep->num];
2649
2650 depctl.d32 = dwc_read_reg32(&(out_regs->doepctl));
2651 deptsiz.d32 = dwc_read_reg32(&(out_regs->doeptsiz));
2652
2653 /* Zero Length Packet */
2654 deptsiz.b.xfersize = ep->maxpacket;
2655 deptsiz.b.pktcnt = 1;
2656
2657 if (core_if->dma_enable) {
2658 if (/*(core_if->dma_enable)&&*/(ep->dma_addr==DMA_ADDR_INVALID)) {
2659 ep->dma_addr=dma_map_single(NULL,(void *)(ep->xfer_buff),(ep->xfer_len),DMA_TO_DEVICE);
2660 }
2661 if (!core_if->dma_desc_enable) {
2662 dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32);
2663
2664
2665 VERIFY_PCD_DMA_ADDR(ep->dma_addr);
2666 dwc_write_reg32 (&(out_regs->doepdma),
2667 (uint32_t)ep->dma_addr);
2668 }
2669 }
2670 else {
2671 dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32);
2672 }
2673
2674 /* EP enable */
2675 depctl.b.cnak = 1;
2676 depctl.b.epena = 1;
2677
2678 dwc_write_reg32(&out_regs->doepctl, depctl.d32);
2679
2680 }
2681 }
2682
2683 /**
2684 * This function does the setup for a data transfer for EP0 and starts
2685 * the transfer. For an IN transfer, the packets will be loaded into
2686 * the appropriate Tx FIFO in the ISR. For OUT transfers, the packets are
2687 * unloaded from the Rx FIFO in the ISR.
2688 *
2689 * @param core_if Programming view of DWC_otg controller.
2690 * @param ep The EP0 data.
2691 */
2692 void dwc_otg_ep0_start_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
2693 {
2694 depctl_data_t depctl;
2695 deptsiz0_data_t deptsiz;
2696 gintmsk_data_t intr_mask = { .d32 = 0};
2697 dwc_otg_dma_desc_t* dma_desc;
2698
2699 DWC_DEBUGPL(DBG_PCD, "ep%d-%s xfer_len=%d xfer_cnt=%d "
2700 "xfer_buff=%p start_xfer_buff=%p, dma_addr=%.8x\n",
2701 ep->num, (ep->is_in?"IN":"OUT"), ep->xfer_len,
2702 ep->xfer_count, ep->xfer_buff, ep->start_xfer_buff,ep->dma_addr);
2703
2704 ep->total_len = ep->xfer_len;
2705
2706 /* IN endpoint */
2707 if (ep->is_in == 1) {
2708 dwc_otg_dev_in_ep_regs_t *in_regs =
2709 core_if->dev_if->in_ep_regs[0];
2710
2711 gnptxsts_data_t gtxstatus;
2712
2713 gtxstatus.d32 =
2714 dwc_read_reg32(&core_if->core_global_regs->gnptxsts);
2715
2716 if(core_if->en_multiple_tx_fifo == 0 && gtxstatus.b.nptxqspcavail == 0) {
2717 #ifdef DEBUG
2718 deptsiz.d32 = dwc_read_reg32(&in_regs->dieptsiz);
2719 DWC_DEBUGPL(DBG_PCD,"DIEPCTL0=%0x\n",
2720 dwc_read_reg32(&in_regs->diepctl));
2721 DWC_DEBUGPL(DBG_PCD, "DIEPTSIZ0=%0x (sz=%d, pcnt=%d)\n",
2722 deptsiz.d32,
2723 deptsiz.b.xfersize, deptsiz.b.pktcnt);
2724 DWC_PRINT("TX Queue or FIFO Full (0x%0x)\n",
2725 gtxstatus.d32);
2726 #endif
2727 return;
2728 }
2729
2730
2731 depctl.d32 = dwc_read_reg32(&in_regs->diepctl);
2732 deptsiz.d32 = dwc_read_reg32(&in_regs->dieptsiz);
2733
2734 /* Zero Length Packet? */
2735 if (ep->xfer_len == 0) {
2736 deptsiz.b.xfersize = 0;
2737 deptsiz.b.pktcnt = 1;
2738 }
2739 else {
2740 /* Program the transfer size and packet count
2741 * as follows: xfersize = N * maxpacket +
2742 * short_packet pktcnt = N + (short_packet
2743 * exist ? 1 : 0)
2744 */
2745 if (ep->xfer_len > ep->maxpacket) {
2746 ep->xfer_len = ep->maxpacket;
2747 deptsiz.b.xfersize = ep->maxpacket;
2748 }
2749 else {
2750 deptsiz.b.xfersize = ep->xfer_len;
2751 }
2752 deptsiz.b.pktcnt = 1;
2753
2754 }
2755 DWC_DEBUGPL(DBG_PCDV, "IN len=%d xfersize=%d pktcnt=%d [%08x]\n",
2756 ep->xfer_len,
2757 deptsiz.b.xfersize, deptsiz.b.pktcnt, deptsiz.d32);
2758 /* Write the DMA register */
2759 if (core_if->dma_enable) {
2760 if (/*(core_if->dma_enable)&&*/(ep->dma_addr==DMA_ADDR_INVALID)) {
2761 ep->dma_addr=dma_map_single(NULL,(void *)(ep->xfer_buff),(ep->xfer_len),DMA_TO_DEVICE);
2762 }
2763 if(core_if->dma_desc_enable == 0) {
2764 dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32);
2765
2766 VERIFY_PCD_DMA_ADDR(ep->dma_addr);
2767 dwc_write_reg32 (&(in_regs->diepdma),
2768 (uint32_t)ep->dma_addr);
2769 }
2770 else {
2771 dma_desc = core_if->dev_if->in_desc_addr;
2772
2773 /** DMA Descriptor Setup */
2774 dma_desc->status.b.bs = BS_HOST_BUSY;
2775 dma_desc->status.b.l = 1;
2776 dma_desc->status.b.ioc = 1;
2777 dma_desc->status.b.sp = (ep->xfer_len == ep->maxpacket) ? 0 : 1;
2778 dma_desc->status.b.bytes = ep->xfer_len;
2779 dma_desc->buf = ep->dma_addr;
2780 dma_desc->status.b.bs = BS_HOST_READY;
2781
2782 /** DIEPDMA0 Register write */
2783
2784 VERIFY_PCD_DMA_ADDR(core_if->dev_if->dma_in_desc_addr);
2785 dwc_write_reg32(&in_regs->diepdma, core_if->dev_if->dma_in_desc_addr);
2786 }
2787 }
2788 else {
2789 dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32);
2790 }
2791
2792 /* EP enable, IN data in FIFO */
2793 depctl.b.cnak = 1;
2794 depctl.b.epena = 1;
2795 dwc_write_reg32(&in_regs->diepctl, depctl.d32);
2796
2797 /**
2798 * Enable the Non-Periodic Tx FIFO empty interrupt, the
2799 * data will be written into the fifo by the ISR.
2800 */
2801 if (!core_if->dma_enable) {
2802 if(core_if->en_multiple_tx_fifo == 0) {
2803 intr_mask.b.nptxfempty = 1;
2804 dwc_modify_reg32(&core_if->core_global_regs->gintmsk,
2805 intr_mask.d32, intr_mask.d32);
2806 }
2807 else {
2808 /* Enable the Tx FIFO Empty Interrupt for this EP */
2809 if(ep->xfer_len > 0) {
2810 uint32_t fifoemptymsk = 0;
2811 fifoemptymsk |= 1 << ep->num;
2812 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk,
2813 0, fifoemptymsk);
2814 }
2815 }
2816 }
2817 }
2818 else {
2819 /* OUT endpoint */
2820 dwc_otg_dev_out_ep_regs_t *out_regs =
2821 core_if->dev_if->out_ep_regs[0];
2822
2823 depctl.d32 = dwc_read_reg32(&out_regs->doepctl);
2824 deptsiz.d32 = dwc_read_reg32(&out_regs->doeptsiz);
2825
2826 /* Program the transfer size and packet count as follows:
2827 * xfersize = N * (maxpacket + 4 - (maxpacket % 4))
2828 * pktcnt = N */
2829 /* Zero Length Packet */
2830 deptsiz.b.xfersize = ep->maxpacket;
2831 deptsiz.b.pktcnt = 1;
2832
2833 DWC_DEBUGPL(DBG_PCDV, "len=%d xfersize=%d pktcnt=%d\n",
2834 ep->xfer_len,
2835 deptsiz.b.xfersize, deptsiz.b.pktcnt);
2836
2837 if (core_if->dma_enable) {
2838 if (/*(core_if->dma_enable)&&*/(ep->dma_addr==DMA_ADDR_INVALID)) {
2839 ep->dma_addr=dma_map_single(NULL,(void *)(ep->xfer_buff),(ep->xfer_len),DMA_TO_DEVICE);
2840 }
2841 if(!core_if->dma_desc_enable) {
2842 dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32);
2843
2844
2845 VERIFY_PCD_DMA_ADDR(ep->dma_addr);
2846 dwc_write_reg32 (&(out_regs->doepdma),
2847 (uint32_t)ep->dma_addr);
2848 }
2849 else {
2850 dma_desc = core_if->dev_if->out_desc_addr;
2851
2852 /** DMA Descriptor Setup */
2853 dma_desc->status.b.bs = BS_HOST_BUSY;
2854 dma_desc->status.b.l = 1;
2855 dma_desc->status.b.ioc = 1;
2856 dma_desc->status.b.bytes = ep->maxpacket;
2857 dma_desc->buf = ep->dma_addr;
2858 dma_desc->status.b.bs = BS_HOST_READY;
2859
2860 /** DOEPDMA0 Register write */
2861 VERIFY_PCD_DMA_ADDR(core_if->dev_if->dma_out_desc_addr);
2862 dwc_write_reg32(&out_regs->doepdma, core_if->dev_if->dma_out_desc_addr);
2863 }
2864 }
2865 else {
2866 dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32);
2867 }
2868
2869 /* EP enable */
2870 depctl.b.cnak = 1;
2871 depctl.b.epena = 1;
2872 dwc_write_reg32 (&(out_regs->doepctl), depctl.d32);
2873 }
2874 }
2875
2876 /**
2877 * This function continues control IN transfers started by
2878 * dwc_otg_ep0_start_transfer, when the transfer does not fit in a
2879 * single packet. NOTE: The DIEPCTL0/DOEPCTL0 registers only have one
2880 * bit for the packet count.
2881 *
2882 * @param core_if Programming view of DWC_otg controller.
2883 * @param ep The EP0 data.
2884 */
2885 void dwc_otg_ep0_continue_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
2886 {
2887 depctl_data_t depctl;
2888 deptsiz0_data_t deptsiz;
2889 gintmsk_data_t intr_mask = { .d32 = 0};
2890 dwc_otg_dma_desc_t* dma_desc;
2891
2892 if (ep->is_in == 1) {
2893 dwc_otg_dev_in_ep_regs_t *in_regs =
2894 core_if->dev_if->in_ep_regs[0];
2895 gnptxsts_data_t tx_status = { .d32 = 0 };
2896
2897 tx_status.d32 = dwc_read_reg32(&core_if->core_global_regs->gnptxsts);
2898 /** @todo Should there be check for room in the Tx
2899 * Status Queue. If not remove the code above this comment. */
2900
2901 depctl.d32 = dwc_read_reg32(&in_regs->diepctl);
2902 deptsiz.d32 = dwc_read_reg32(&in_regs->dieptsiz);
2903
2904 /* Program the transfer size and packet count
2905 * as follows: xfersize = N * maxpacket +
2906 * short_packet pktcnt = N + (short_packet
2907 * exist ? 1 : 0)
2908 */
2909
2910
2911 if(core_if->dma_desc_enable == 0) {
2912 deptsiz.b.xfersize = (ep->total_len - ep->xfer_count) > ep->maxpacket ? ep->maxpacket :
2913 (ep->total_len - ep->xfer_count);
2914 deptsiz.b.pktcnt = 1;
2915 if(core_if->dma_enable == 0) {
2916 ep->xfer_len += deptsiz.b.xfersize;
2917 } else {
2918 ep->xfer_len = deptsiz.b.xfersize;
2919 }
2920 dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32);
2921 }
2922 else {
2923 ep->xfer_len = (ep->total_len - ep->xfer_count) > ep->maxpacket ? ep->maxpacket :
2924 (ep->total_len - ep->xfer_count);
2925
2926 dma_desc = core_if->dev_if->in_desc_addr;
2927
2928 /** DMA Descriptor Setup */
2929 dma_desc->status.b.bs = BS_HOST_BUSY;
2930 dma_desc->status.b.l = 1;
2931 dma_desc->status.b.ioc = 1;
2932 dma_desc->status.b.sp = (ep->xfer_len == ep->maxpacket) ? 0 : 1;
2933 dma_desc->status.b.bytes = ep->xfer_len;
2934 dma_desc->buf = ep->dma_addr;
2935 dma_desc->status.b.bs = BS_HOST_READY;
2936
2937
2938 /** DIEPDMA0 Register write */
2939 VERIFY_PCD_DMA_ADDR(core_if->dev_if->dma_in_desc_addr);
2940 dwc_write_reg32(&in_regs->diepdma, core_if->dev_if->dma_in_desc_addr);
2941 }
2942
2943
2944 DWC_DEBUGPL(DBG_PCDV, "IN len=%d xfersize=%d pktcnt=%d [%08x]\n",
2945 ep->xfer_len,
2946 deptsiz.b.xfersize, deptsiz.b.pktcnt, deptsiz.d32);
2947
2948 /* Write the DMA register */
2949 if (core_if->hwcfg2.b.architecture == DWC_INT_DMA_ARCH) {
2950 if(core_if->dma_desc_enable == 0){
2951
2952 VERIFY_PCD_DMA_ADDR(ep->dma_addr);
2953 dwc_write_reg32 (&(in_regs->diepdma), (uint32_t)ep->dma_addr);
2954 }
2955 }
2956
2957 /* EP enable, IN data in FIFO */
2958 depctl.b.cnak = 1;
2959 depctl.b.epena = 1;
2960 dwc_write_reg32(&in_regs->diepctl, depctl.d32);
2961
2962 /**
2963 * Enable the Non-Periodic Tx FIFO empty interrupt, the
2964 * data will be written into the fifo by the ISR.
2965 */
2966 if (!core_if->dma_enable) {
2967 if(core_if->en_multiple_tx_fifo == 0) {
2968 /* First clear it from GINTSTS */
2969 intr_mask.b.nptxfempty = 1;
2970 dwc_modify_reg32(&core_if->core_global_regs->gintmsk,
2971 intr_mask.d32, intr_mask.d32);
2972
2973 }
2974 else {
2975 /* Enable the Tx FIFO Empty Interrupt for this EP */
2976 if(ep->xfer_len > 0) {
2977 uint32_t fifoemptymsk = 0;
2978 fifoemptymsk |= 1 << ep->num;
2979 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk,
2980 0, fifoemptymsk);
2981 }
2982 }
2983 }
2984 }
2985 else {
2986 dwc_otg_dev_out_ep_regs_t *out_regs =
2987 core_if->dev_if->out_ep_regs[0];
2988
2989
2990 depctl.d32 = dwc_read_reg32(&out_regs->doepctl);
2991 deptsiz.d32 = dwc_read_reg32(&out_regs->doeptsiz);
2992
2993 /* Program the transfer size and packet count
2994 * as follows: xfersize = N * maxpacket +
2995 * short_packet pktcnt = N + (short_packet
2996 * exist ? 1 : 0)
2997 */
2998 deptsiz.b.xfersize = ep->maxpacket;
2999 deptsiz.b.pktcnt = 1;
3000
3001
3002 if(core_if->dma_desc_enable == 0) {
3003 dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32);
3004 }
3005 else {
3006 dma_desc = core_if->dev_if->out_desc_addr;
3007
3008 /** DMA Descriptor Setup */
3009 dma_desc->status.b.bs = BS_HOST_BUSY;
3010 dma_desc->status.b.l = 1;
3011 dma_desc->status.b.ioc = 1;
3012 dma_desc->status.b.bytes = ep->maxpacket;
3013 dma_desc->buf = ep->dma_addr;
3014 dma_desc->status.b.bs = BS_HOST_READY;
3015
3016 /** DOEPDMA0 Register write */
3017 VERIFY_PCD_DMA_ADDR(core_if->dev_if->dma_out_desc_addr);
3018 dwc_write_reg32(&out_regs->doepdma, core_if->dev_if->dma_out_desc_addr);
3019 }
3020
3021
3022 DWC_DEBUGPL(DBG_PCDV, "IN len=%d xfersize=%d pktcnt=%d [%08x]\n",
3023 ep->xfer_len,
3024 deptsiz.b.xfersize, deptsiz.b.pktcnt, deptsiz.d32);
3025
3026 /* Write the DMA register */
3027 if (core_if->hwcfg2.b.architecture == DWC_INT_DMA_ARCH) {
3028 if(core_if->dma_desc_enable == 0){
3029
3030 VERIFY_PCD_DMA_ADDR(ep->dma_addr);
3031 dwc_write_reg32 (&(out_regs->doepdma), (uint32_t)ep->dma_addr);
3032 }
3033 }
3034
3035 /* EP enable, IN data in FIFO */
3036 depctl.b.cnak = 1;
3037 depctl.b.epena = 1;
3038 dwc_write_reg32(&out_regs->doepctl, depctl.d32);
3039
3040 }
3041 }
3042
3043 #ifdef DEBUG
3044 void dump_msg(const u8 *buf, unsigned int length)
3045 {
3046 unsigned int start, num, i;
3047 char line[52], *p;
3048
3049 if (length >= 512)
3050 return;
3051 start = 0;
3052 while (length > 0) {
3053 num = min(length, 16u);
3054 p = line;
3055 for (i = 0; i < num; ++i)
3056 {
3057 if (i == 8)
3058 *p++ = ' ';
3059 sprintf(p, " %02x", buf[i]);
3060 p += 3;
3061 }
3062 *p = 0;
3063 DWC_PRINT("%6x: %s\n", start, line);
3064 buf += num;
3065 start += num;
3066 length -= num;
3067 }
3068 }
3069 #else
3070 static inline void dump_msg(const u8 *buf, unsigned int length)
3071 {
3072 }
3073 #endif
3074
3075 /**
3076 * This function writes a packet into the Tx FIFO associated with the
3077 * EP. For non-periodic EPs the non-periodic Tx FIFO is written. For
3078 * periodic EPs the periodic Tx FIFO associated with the EP is written
3079 * with all packets for the next micro-frame.
3080 *
3081 * @param core_if Programming view of DWC_otg controller.
3082 * @param ep The EP to write packet for.
3083 * @param dma Indicates if DMA is being used.
3084 */
3085 void dwc_otg_ep_write_packet(dwc_otg_core_if_t *core_if, dwc_ep_t *ep, int dma)
3086 {
3087 /**
3088 * The buffer is padded to DWORD on a per packet basis in
3089 * slave/dma mode if the MPS is not DWORD aligned. The last
3090 * packet, if short, is also padded to a multiple of DWORD.
3091 *
3092 * ep->xfer_buff always starts DWORD aligned in memory and is a
3093 * multiple of DWORD in length
3094 *
3095 * ep->xfer_len can be any number of bytes
3096 *
3097 * ep->xfer_count is a multiple of ep->maxpacket until the last
3098 * packet
3099 *
3100 * FIFO access is DWORD */
3101
3102 uint32_t i;
3103 uint32_t byte_count;
3104 uint32_t dword_count;
3105 uint32_t *fifo;
3106 uint32_t *data_buff = (uint32_t *)ep->xfer_buff;
3107
3108 DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s(%p,%p)\n", __func__, core_if, ep);
3109 if (ep->xfer_count >= ep->xfer_len) {
3110 DWC_WARN("%s() No data for EP%d!!!\n", __func__, ep->num);
3111 return;
3112 }
3113
3114 /* Find the byte length of the packet either short packet or MPS */
3115 if ((ep->xfer_len - ep->xfer_count) < ep->maxpacket) {
3116 byte_count = ep->xfer_len - ep->xfer_count;
3117 }
3118 else {
3119 byte_count = ep->maxpacket;
3120 }
3121
3122 /* Find the DWORD length, padded by extra bytes as neccessary if MPS
3123 * is not a multiple of DWORD */
3124 dword_count = (byte_count + 3) / 4;
3125
3126 #ifdef VERBOSE
3127 dump_msg(ep->xfer_buff, byte_count);
3128 #endif
3129
3130 /**@todo NGS Where are the Periodic Tx FIFO addresses
3131 * intialized? What should this be? */
3132
3133 fifo = core_if->data_fifo[ep->num];
3134
3135
3136 DWC_DEBUGPL((DBG_PCDV|DBG_CILV), "fifo=%p buff=%p *p=%08x bc=%d\n", fifo, data_buff, *data_buff, byte_count);
3137
3138 if (!dma) {
3139 for (i=0; i<dword_count; i++, data_buff++) {
3140 dwc_write_reg32(fifo, *data_buff);
3141 }
3142 }
3143
3144 ep->xfer_count += byte_count;
3145 ep->xfer_buff += byte_count;
3146 ep->dma_addr += byte_count;
3147 }
3148
3149 /**
3150 * Set the EP STALL.
3151 *
3152 * @param core_if Programming view of DWC_otg controller.
3153 * @param ep The EP to set the stall on.
3154 */
3155 void dwc_otg_ep_set_stall(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
3156 {
3157 depctl_data_t depctl;
3158 volatile uint32_t *depctl_addr;
3159
3160 DWC_DEBUGPL(DBG_PCDV, "%s ep%d-%s1\n", __func__, ep->num,
3161 (ep->is_in?"IN":"OUT"));
3162
3163 DWC_PRINT("%s ep%d-%s\n", __func__, ep->num,
3164 (ep->is_in?"in":"out"));
3165
3166 if (ep->is_in == 1) {
3167 depctl_addr = &(core_if->dev_if->in_ep_regs[ep->num]->diepctl);
3168 depctl.d32 = dwc_read_reg32(depctl_addr);
3169
3170 /* set the disable and stall bits */
3171 #if 0
3172 //epdis is set here but not cleared at latter dwc_otg_ep_clear_stall,
3173 //which cause the testusb item 13 failed(Host:pc, device: otg device)
3174 if (depctl.b.epena) {
3175 depctl.b.epdis = 1;
3176 }
3177 #endif
3178 depctl.b.stall = 1;
3179 dwc_write_reg32(depctl_addr, depctl.d32);
3180 }
3181 else {
3182 depctl_addr = &(core_if->dev_if->out_ep_regs[ep->num]->doepctl);
3183 depctl.d32 = dwc_read_reg32(depctl_addr);
3184
3185 /* set the stall bit */
3186 depctl.b.stall = 1;
3187 dwc_write_reg32(depctl_addr, depctl.d32);
3188 }
3189
3190 DWC_DEBUGPL(DBG_PCDV,"%s: DEPCTL(%.8x)=%0x\n",__func__,(u32)depctl_addr,dwc_read_reg32(depctl_addr));
3191
3192 return;
3193 }
3194
3195 /**
3196 * Clear the EP STALL.
3197 *
3198 * @param core_if Programming view of DWC_otg controller.
3199 * @param ep The EP to clear stall from.
3200 */
3201 void dwc_otg_ep_clear_stall(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
3202 {
3203 depctl_data_t depctl;
3204 volatile uint32_t *depctl_addr;
3205
3206 DWC_DEBUGPL(DBG_PCD, "%s ep%d-%s\n", __func__, ep->num,
3207 (ep->is_in?"IN":"OUT"));
3208
3209 if (ep->is_in == 1) {
3210 depctl_addr = &(core_if->dev_if->in_ep_regs[ep->num]->diepctl);
3211 }
3212 else {
3213 depctl_addr = &(core_if->dev_if->out_ep_regs[ep->num]->doepctl);
3214 }
3215
3216 depctl.d32 = dwc_read_reg32(depctl_addr);
3217
3218 /* clear the stall bits */
3219 depctl.b.stall = 0;
3220
3221 /*
3222 * USB Spec 9.4.5: For endpoints using data toggle, regardless
3223 * of whether an endpoint has the Halt feature set, a
3224 * ClearFeature(ENDPOINT_HALT) request always results in the
3225 * data toggle being reinitialized to DATA0.
3226 */
3227 if (ep->type == DWC_OTG_EP_TYPE_INTR ||
3228 ep->type == DWC_OTG_EP_TYPE_BULK) {
3229 depctl.b.setd0pid = 1; /* DATA0 */
3230 }
3231
3232 dwc_write_reg32(depctl_addr, depctl.d32);
3233 DWC_DEBUGPL(DBG_PCD,"DEPCTL=%0x\n",dwc_read_reg32(depctl_addr));
3234 return;
3235 }
3236
3237 /**
3238 * This function reads a packet from the Rx FIFO into the destination
3239 * buffer. To read SETUP data use dwc_otg_read_setup_packet.
3240 *
3241 * @param core_if Programming view of DWC_otg controller.
3242 * @param dest Destination buffer for the packet.
3243 * @param bytes Number of bytes to copy to the destination.
3244 */
3245 void dwc_otg_read_packet(dwc_otg_core_if_t *core_if,
3246 uint8_t *dest,
3247 uint16_t bytes)
3248 {
3249 int i;
3250 int word_count = (bytes + 3) / 4;
3251
3252 volatile uint32_t *fifo = core_if->data_fifo[0];
3253 uint32_t *data_buff = (uint32_t *)dest;
3254
3255 /**
3256 * @todo Account for the case where _dest is not dword aligned. This
3257 * requires reading data from the FIFO into a uint32_t temp buffer,
3258 * then moving it into the data buffer.
3259 */
3260
3261 DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s(%p,%p,%d)\n", __func__,
3262 core_if, dest, bytes);
3263
3264 for (i=0; i<word_count; i++, data_buff++)
3265 {
3266 *data_buff = dwc_read_reg32(fifo);
3267 }
3268
3269 return;
3270 }
3271
3272
3273
3274 /**
3275 * This functions reads the device registers and prints them
3276 *
3277 * @param core_if Programming view of DWC_otg controller.
3278 */
3279 void dwc_otg_dump_dev_registers(dwc_otg_core_if_t *core_if)
3280 {
3281 int i;
3282 volatile uint32_t *addr;
3283
3284 DWC_PRINT("Device Global Registers\n");
3285 addr=&core_if->dev_if->dev_global_regs->dcfg;
3286 DWC_PRINT("DCFG @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3287 addr=&core_if->dev_if->dev_global_regs->dctl;
3288 DWC_PRINT("DCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3289 addr=&core_if->dev_if->dev_global_regs->dsts;
3290 DWC_PRINT("DSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3291 addr=&core_if->dev_if->dev_global_regs->diepmsk;
3292 DWC_PRINT("DIEPMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3293 addr=&core_if->dev_if->dev_global_regs->doepmsk;
3294 DWC_PRINT("DOEPMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3295 addr=&core_if->dev_if->dev_global_regs->daint;
3296 DWC_PRINT("DAINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3297 addr=&core_if->dev_if->dev_global_regs->daintmsk;
3298 DWC_PRINT("DAINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3299 addr=&core_if->dev_if->dev_global_regs->dtknqr1;
3300 DWC_PRINT("DTKNQR1 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3301 if (core_if->hwcfg2.b.dev_token_q_depth > 6) {
3302 addr=&core_if->dev_if->dev_global_regs->dtknqr2;
3303 DWC_PRINT("DTKNQR2 @0x%08X : 0x%08X\n",
3304 (uint32_t)addr,dwc_read_reg32(addr));
3305 }
3306
3307 addr=&core_if->dev_if->dev_global_regs->dvbusdis;
3308 DWC_PRINT("DVBUSID @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3309
3310 addr=&core_if->dev_if->dev_global_regs->dvbuspulse;
3311 DWC_PRINT("DVBUSPULSE @0x%08X : 0x%08X\n",
3312 (uint32_t)addr,dwc_read_reg32(addr));
3313
3314 if (core_if->hwcfg2.b.dev_token_q_depth > 14) {
3315 addr=&core_if->dev_if->dev_global_regs->dtknqr3_dthrctl;
3316 DWC_PRINT("DTKNQR3_DTHRCTL @0x%08X : 0x%08X\n",
3317 (uint32_t)addr, dwc_read_reg32(addr));
3318 }
3319 /*
3320 if (core_if->hwcfg2.b.dev_token_q_depth > 22) {
3321 addr=&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk;
3322 DWC_PRINT("DTKNQR4 @0x%08X : 0x%08X\n",
3323 (uint32_t)addr, dwc_read_reg32(addr));
3324 }
3325 */
3326 addr=&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk;
3327 DWC_PRINT("FIFOEMPMSK @0x%08X : 0x%08X\n", (uint32_t)addr, dwc_read_reg32(addr));
3328
3329 addr=&core_if->dev_if->dev_global_regs->deachint;
3330 DWC_PRINT("DEACHINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3331 addr=&core_if->dev_if->dev_global_regs->deachintmsk;
3332 DWC_PRINT("DEACHINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3333
3334 for (i=0; i<= core_if->dev_if->num_in_eps; i++) {
3335 addr=&core_if->dev_if->dev_global_regs->diepeachintmsk[i];
3336 DWC_PRINT("DIEPEACHINTMSK[%d] @0x%08X : 0x%08X\n", i, (uint32_t)addr, dwc_read_reg32(addr));
3337 }
3338
3339
3340 for (i=0; i<= core_if->dev_if->num_out_eps; i++) {
3341 addr=&core_if->dev_if->dev_global_regs->doepeachintmsk[i];
3342 DWC_PRINT("DOEPEACHINTMSK[%d] @0x%08X : 0x%08X\n", i, (uint32_t)addr, dwc_read_reg32(addr));
3343 }
3344
3345 for (i=0; i<= core_if->dev_if->num_in_eps; i++) {
3346 DWC_PRINT("Device IN EP %d Registers\n", i);
3347 addr=&core_if->dev_if->in_ep_regs[i]->diepctl;
3348 DWC_PRINT("DIEPCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3349 addr=&core_if->dev_if->in_ep_regs[i]->diepint;
3350 DWC_PRINT("DIEPINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3351 addr=&core_if->dev_if->in_ep_regs[i]->dieptsiz;
3352 DWC_PRINT("DIETSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3353 addr=&core_if->dev_if->in_ep_regs[i]->diepdma;
3354 DWC_PRINT("DIEPDMA @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3355 addr=&core_if->dev_if->in_ep_regs[i]->dtxfsts;
3356 DWC_PRINT("DTXFSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3357 //reading depdmab in non desc dma mode would halt the ahb bus...
3358 if(core_if->dma_desc_enable){
3359 addr=&core_if->dev_if->in_ep_regs[i]->diepdmab;
3360 DWC_PRINT("DIEPDMAB @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3361 }
3362 }
3363
3364
3365 for (i=0; i<= core_if->dev_if->num_out_eps; i++) {
3366 DWC_PRINT("Device OUT EP %d Registers\n", i);
3367 addr=&core_if->dev_if->out_ep_regs[i]->doepctl;
3368 DWC_PRINT("DOEPCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3369 addr=&core_if->dev_if->out_ep_regs[i]->doepfn;
3370 DWC_PRINT("DOEPFN @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3371 addr=&core_if->dev_if->out_ep_regs[i]->doepint;
3372 DWC_PRINT("DOEPINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3373 addr=&core_if->dev_if->out_ep_regs[i]->doeptsiz;
3374 DWC_PRINT("DOETSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3375 addr=&core_if->dev_if->out_ep_regs[i]->doepdma;
3376 DWC_PRINT("DOEPDMA @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3377
3378 //reading depdmab in non desc dma mode would halt the ahb bus...
3379 if(core_if->dma_desc_enable){
3380 addr=&core_if->dev_if->out_ep_regs[i]->doepdmab;
3381 DWC_PRINT("DOEPDMAB @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3382 }
3383
3384 }
3385
3386
3387
3388 return;
3389 }
3390
3391 /**
3392 * This functions reads the SPRAM and prints its content
3393 *
3394 * @param core_if Programming view of DWC_otg controller.
3395 */
3396 void dwc_otg_dump_spram(dwc_otg_core_if_t *core_if)
3397 {
3398 volatile uint8_t *addr, *start_addr, *end_addr;
3399
3400 DWC_PRINT("SPRAM Data:\n");
3401 start_addr = (void*)core_if->core_global_regs;
3402 DWC_PRINT("Base Address: 0x%8X\n", (uint32_t)start_addr);
3403 start_addr += 0x00028000;
3404 end_addr=(void*)core_if->core_global_regs;
3405 end_addr += 0x000280e0;
3406
3407 for(addr = start_addr; addr < end_addr; addr+=16)
3408 {
3409 DWC_PRINT("0x%8X:\t%2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X\n", (uint32_t)addr,
3410 addr[0],
3411 addr[1],
3412 addr[2],
3413 addr[3],
3414 addr[4],
3415 addr[5],
3416 addr[6],
3417 addr[7],
3418 addr[8],
3419 addr[9],
3420 addr[10],
3421 addr[11],
3422 addr[12],
3423 addr[13],
3424 addr[14],
3425 addr[15]
3426 );
3427 }
3428
3429 return;
3430 }
3431 /**
3432 * This function reads the host registers and prints them
3433 *
3434 * @param core_if Programming view of DWC_otg controller.
3435 */
3436 void dwc_otg_dump_host_registers(dwc_otg_core_if_t *core_if)
3437 {
3438 int i;
3439 volatile uint32_t *addr;
3440
3441 DWC_PRINT("Host Global Registers\n");
3442 addr=&core_if->host_if->host_global_regs->hcfg;
3443 DWC_PRINT("HCFG @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3444 addr=&core_if->host_if->host_global_regs->hfir;
3445 DWC_PRINT("HFIR @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3446 addr=&core_if->host_if->host_global_regs->hfnum;
3447 DWC_PRINT("HFNUM @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3448 addr=&core_if->host_if->host_global_regs->hptxsts;
3449 DWC_PRINT("HPTXSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3450 addr=&core_if->host_if->host_global_regs->haint;
3451 DWC_PRINT("HAINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3452 addr=&core_if->host_if->host_global_regs->haintmsk;
3453 DWC_PRINT("HAINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3454 addr=core_if->host_if->hprt0;
3455 DWC_PRINT("HPRT0 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3456
3457 for (i=0; i<core_if->core_params->host_channels; i++)
3458 {
3459 DWC_PRINT("Host Channel %d Specific Registers\n", i);
3460 addr=&core_if->host_if->hc_regs[i]->hcchar;
3461 DWC_PRINT("HCCHAR @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3462 addr=&core_if->host_if->hc_regs[i]->hcsplt;
3463 DWC_PRINT("HCSPLT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3464 addr=&core_if->host_if->hc_regs[i]->hcint;
3465 DWC_PRINT("HCINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3466 addr=&core_if->host_if->hc_regs[i]->hcintmsk;
3467 DWC_PRINT("HCINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3468 addr=&core_if->host_if->hc_regs[i]->hctsiz;
3469 DWC_PRINT("HCTSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3470 addr=&core_if->host_if->hc_regs[i]->hcdma;
3471 DWC_PRINT("HCDMA @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3472 }
3473 return;
3474 }
3475
3476 /**
3477 * This function reads the core global registers and prints them
3478 *
3479 * @param core_if Programming view of DWC_otg controller.
3480 */
3481 void dwc_otg_dump_global_registers(dwc_otg_core_if_t *core_if)
3482 {
3483 int i,size;
3484 char* str;
3485 volatile uint32_t *addr;
3486
3487 DWC_PRINT("Core Global Registers\n");
3488 addr=&core_if->core_global_regs->gotgctl;
3489 DWC_PRINT("GOTGCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3490 addr=&core_if->core_global_regs->gotgint;
3491 DWC_PRINT("GOTGINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3492 addr=&core_if->core_global_regs->gahbcfg;
3493 DWC_PRINT("GAHBCFG @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3494 addr=&core_if->core_global_regs->gusbcfg;
3495 DWC_PRINT("GUSBCFG @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3496 addr=&core_if->core_global_regs->grstctl;
3497 DWC_PRINT("GRSTCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3498 addr=&core_if->core_global_regs->gintsts;
3499 DWC_PRINT("GINTSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3500 addr=&core_if->core_global_regs->gintmsk;
3501 DWC_PRINT("GINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3502 addr=&core_if->core_global_regs->grxstsr;
3503 DWC_PRINT("GRXSTSR @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3504 //addr=&core_if->core_global_regs->grxstsp;
3505 //DWC_PRINT("GRXSTSP @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3506 addr=&core_if->core_global_regs->grxfsiz;
3507 DWC_PRINT("GRXFSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3508 addr=&core_if->core_global_regs->gnptxfsiz;
3509 DWC_PRINT("GNPTXFSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3510 addr=&core_if->core_global_regs->gnptxsts;
3511 DWC_PRINT("GNPTXSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3512 addr=&core_if->core_global_regs->gi2cctl;
3513 DWC_PRINT("GI2CCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3514 addr=&core_if->core_global_regs->gpvndctl;
3515 DWC_PRINT("GPVNDCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3516 addr=&core_if->core_global_regs->ggpio;
3517 DWC_PRINT("GGPIO @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3518 addr=&core_if->core_global_regs->guid;
3519 DWC_PRINT("GUID @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3520 addr=&core_if->core_global_regs->gsnpsid;
3521 DWC_PRINT("GSNPSID @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3522 addr=&core_if->core_global_regs->ghwcfg1;
3523 DWC_PRINT("GHWCFG1 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3524 addr=&core_if->core_global_regs->ghwcfg2;
3525 DWC_PRINT("GHWCFG2 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3526 addr=&core_if->core_global_regs->ghwcfg3;
3527 DWC_PRINT("GHWCFG3 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3528 addr=&core_if->core_global_regs->ghwcfg4;
3529 DWC_PRINT("GHWCFG4 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3530 addr=&core_if->core_global_regs->hptxfsiz;
3531 DWC_PRINT("HPTXFSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3532
3533 size=(core_if->hwcfg4.b.ded_fifo_en)?
3534 core_if->hwcfg4.b.num_in_eps:core_if->hwcfg4.b.num_dev_perio_in_ep;
3535 str=(core_if->hwcfg4.b.ded_fifo_en)?"DIEPTXF":"DPTXFSIZ";
3536 for (i=0; i<size; i++)
3537 {
3538 addr=&core_if->core_global_regs->dptxfsiz_dieptxf[i];
3539 DWC_PRINT("%s[%d] @0x%08X : 0x%08X\n",str,i,(uint32_t)addr,dwc_read_reg32(addr));
3540 }
3541 }
3542
3543 /**
3544 * Flush a Tx FIFO.
3545 *
3546 * @param core_if Programming view of DWC_otg controller.
3547 * @param num Tx FIFO to flush.
3548 */
3549 void dwc_otg_flush_tx_fifo(dwc_otg_core_if_t *core_if,
3550 const int num)
3551 {
3552 dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
3553 volatile grstctl_t greset = { .d32 = 0};
3554 int count = 0;
3555
3556 DWC_DEBUGPL((DBG_CIL|DBG_PCDV), "Flush Tx FIFO %d\n", num);
3557
3558 greset.b.txfflsh = 1;
3559 greset.b.txfnum = num;
3560 dwc_write_reg32(&global_regs->grstctl, greset.d32);
3561
3562 do {
3563 greset.d32 = dwc_read_reg32(&global_regs->grstctl);
3564 if (++count > 10000) {
3565 DWC_WARN("%s() HANG! GRSTCTL=%0x GNPTXSTS=0x%08x\n",
3566 __func__, greset.d32,
3567 dwc_read_reg32(&global_regs->gnptxsts));
3568 break;
3569 }
3570 }
3571 while (greset.b.txfflsh == 1);
3572
3573 /* Wait for 3 PHY Clocks*/
3574 UDELAY(1);
3575 }
3576
3577 /**
3578 * Flush Rx FIFO.
3579 *
3580 * @param core_if Programming view of DWC_otg controller.
3581 */
3582 void dwc_otg_flush_rx_fifo(dwc_otg_core_if_t *core_if)
3583 {
3584 dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
3585 volatile grstctl_t greset = { .d32 = 0};
3586 int count = 0;
3587
3588 DWC_DEBUGPL((DBG_CIL|DBG_PCDV), "%s\n", __func__);
3589 /*
3590 *
3591 */
3592 greset.b.rxfflsh = 1;
3593 dwc_write_reg32(&global_regs->grstctl, greset.d32);
3594
3595 do {
3596 greset.d32 = dwc_read_reg32(&global_regs->grstctl);
3597 if (++count > 10000) {
3598 DWC_WARN("%s() HANG! GRSTCTL=%0x\n", __func__,
3599 greset.d32);
3600 break;
3601 }
3602 }
3603 while (greset.b.rxfflsh == 1);
3604
3605 /* Wait for 3 PHY Clocks*/
3606 UDELAY(1);
3607 }
3608
3609 /**
3610 * Do core a soft reset of the core. Be careful with this because it
3611 * resets all the internal state machines of the core.
3612 */
3613 void dwc_otg_core_reset(dwc_otg_core_if_t *core_if)
3614 {
3615 dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
3616 volatile grstctl_t greset = { .d32 = 0};
3617 int count = 0;
3618
3619 DWC_DEBUGPL(DBG_CILV, "%s\n", __func__);
3620 /* Wait for AHB master IDLE state. */
3621 do {
3622 UDELAY(10);
3623 greset.d32 = dwc_read_reg32(&global_regs->grstctl);
3624 if (++count > 100000) {
3625 DWC_WARN("%s() HANG! AHB Idle GRSTCTL=%0x\n", __func__,
3626 greset.d32);
3627 return;
3628 }
3629 }
3630 while (greset.b.ahbidle == 0);
3631
3632 /* Core Soft Reset */
3633 count = 0;
3634 greset.b.csftrst = 1;
3635 dwc_write_reg32(&global_regs->grstctl, greset.d32);
3636 do {
3637 greset.d32 = dwc_read_reg32(&global_regs->grstctl);
3638 if (++count > 10000) {
3639 DWC_WARN("%s() HANG! Soft Reset GRSTCTL=%0x\n", __func__,
3640 greset.d32);
3641 break;
3642 }
3643 }
3644 while (greset.b.csftrst == 1);
3645
3646 /* Wait for 3 PHY Clocks*/
3647 MDELAY(100);
3648
3649 DWC_DEBUGPL(DBG_CILV, "GINTSTS=%.8x\n", dwc_read_reg32(&global_regs->gintsts));
3650 DWC_DEBUGPL(DBG_CILV, "GINTSTS=%.8x\n", dwc_read_reg32(&global_regs->gintsts));
3651 DWC_DEBUGPL(DBG_CILV, "GINTSTS=%.8x\n", dwc_read_reg32(&global_regs->gintsts));
3652
3653 }
3654
3655
3656
3657 /**
3658 * Register HCD callbacks. The callbacks are used to start and stop
3659 * the HCD for interrupt processing.
3660 *
3661 * @param core_if Programming view of DWC_otg controller.
3662 * @param cb the HCD callback structure.
3663 * @param p pointer to be passed to callback function (usb_hcd*).
3664 */
3665 void dwc_otg_cil_register_hcd_callbacks(dwc_otg_core_if_t *core_if,
3666 dwc_otg_cil_callbacks_t *cb,
3667 void *p)
3668 {
3669 core_if->hcd_cb = cb;
3670 cb->p = p;
3671 }
3672
3673 /**
3674 * Register PCD callbacks. The callbacks are used to start and stop
3675 * the PCD for interrupt processing.
3676 *
3677 * @param core_if Programming view of DWC_otg controller.
3678 * @param cb the PCD callback structure.
3679 * @param p pointer to be passed to callback function (pcd*).
3680 */
3681 void dwc_otg_cil_register_pcd_callbacks(dwc_otg_core_if_t *core_if,
3682 dwc_otg_cil_callbacks_t *cb,
3683 void *p)
3684 {
3685 core_if->pcd_cb = cb;
3686 cb->p = p;
3687 }
3688
3689 #ifdef DWC_EN_ISOC
3690
3691 /**
3692 * This function writes isoc data per 1 (micro)frame into tx fifo
3693 *
3694 * @param core_if Programming view of DWC_otg controller.
3695 * @param ep The EP to start the transfer on.
3696 *
3697 */
3698 void write_isoc_frame_data(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
3699 {
3700 dwc_otg_dev_in_ep_regs_t *ep_regs;
3701 dtxfsts_data_t txstatus = {.d32 = 0};
3702 uint32_t len = 0;
3703 uint32_t dwords;
3704
3705 ep->xfer_len = ep->data_per_frame;
3706 ep->xfer_count = 0;
3707
3708 ep_regs = core_if->dev_if->in_ep_regs[ep->num];
3709
3710 len = ep->xfer_len - ep->xfer_count;
3711
3712 if (len > ep->maxpacket) {
3713 len = ep->maxpacket;
3714 }
3715
3716 dwords = (len + 3)/4;
3717
3718 /* While there is space in the queue and space in the FIFO and
3719 * More data to tranfer, Write packets to the Tx FIFO */
3720 txstatus.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dtxfsts);
3721 DWC_DEBUGPL(DBG_PCDV, "b4 dtxfsts[%d]=0x%08x\n",ep->num,txstatus.d32);
3722
3723 while (txstatus.b.txfspcavail > dwords &&
3724 ep->xfer_count < ep->xfer_len &&
3725 ep->xfer_len != 0) {
3726 /* Write the FIFO */
3727 dwc_otg_ep_write_packet(core_if, ep, 0);
3728
3729 len = ep->xfer_len - ep->xfer_count;
3730 if (len > ep->maxpacket) {
3731 len = ep->maxpacket;
3732 }
3733
3734 dwords = (len + 3)/4;
3735 txstatus.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dtxfsts);
3736 DWC_DEBUGPL(DBG_PCDV,"dtxfsts[%d]=0x%08x\n", ep->num, txstatus.d32);
3737 }
3738 }
3739
3740
3741 /**
3742 * This function initializes a descriptor chain for Isochronous transfer
3743 *
3744 * @param core_if Programming view of DWC_otg controller.
3745 * @param ep The EP to start the transfer on.
3746 *
3747 */
3748 void dwc_otg_iso_ep_start_frm_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
3749 {
3750 deptsiz_data_t deptsiz = { .d32 = 0 };
3751 depctl_data_t depctl = { .d32 = 0 };
3752 dsts_data_t dsts = { .d32 = 0 };
3753 volatile uint32_t *addr;
3754
3755 if(ep->is_in) {
3756 addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl;
3757 } else {
3758 addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl;
3759 }
3760
3761 ep->xfer_len = ep->data_per_frame;
3762 ep->xfer_count = 0;
3763 ep->xfer_buff = ep->cur_pkt_addr;
3764 ep->dma_addr = ep->cur_pkt_dma_addr;
3765
3766 if(ep->is_in) {
3767 /* Program the transfer size and packet count
3768 * as follows: xfersize = N * maxpacket +
3769 * short_packet pktcnt = N + (short_packet
3770 * exist ? 1 : 0)
3771 */
3772 deptsiz.b.xfersize = ep->xfer_len;
3773 deptsiz.b.pktcnt =
3774 (ep->xfer_len - 1 + ep->maxpacket) /
3775 ep->maxpacket;
3776 deptsiz.b.mc = deptsiz.b.pktcnt;
3777 dwc_write_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dieptsiz, deptsiz.d32);
3778
3779 /* Write the DMA register */
3780 if (core_if->dma_enable) {
3781 dwc_write_reg32 (&(core_if->dev_if->in_ep_regs[ep->num]->diepdma), (uint32_t)ep->dma_addr);
3782 }
3783 } else {
3784 deptsiz.b.pktcnt =
3785 (ep->xfer_len + (ep->maxpacket - 1)) /
3786 ep->maxpacket;
3787 deptsiz.b.xfersize = deptsiz.b.pktcnt * ep->maxpacket;
3788
3789 dwc_write_reg32(&core_if->dev_if->out_ep_regs[ep->num]->doeptsiz, deptsiz.d32);
3790
3791 if (core_if->dma_enable) {
3792 dwc_write_reg32 (&(core_if->dev_if->out_ep_regs[ep->num]->doepdma),
3793 (uint32_t)ep->dma_addr);
3794 }
3795 }
3796
3797
3798 /** Enable endpoint, clear nak */
3799
3800 depctl.d32 = 0;
3801 if(ep->bInterval == 1) {
3802 dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts);
3803 ep->next_frame = dsts.b.soffn + ep->bInterval;
3804
3805 if(ep->next_frame & 0x1) {
3806 depctl.b.setd1pid = 1;
3807 } else {
3808 depctl.b.setd0pid = 1;
3809 }
3810 } else {
3811 ep->next_frame += ep->bInterval;
3812
3813 if(ep->next_frame & 0x1) {
3814 depctl.b.setd1pid = 1;
3815 } else {
3816 depctl.b.setd0pid = 1;
3817 }
3818 }
3819 depctl.b.epena = 1;
3820 depctl.b.cnak = 1;
3821
3822 dwc_modify_reg32(addr, 0, depctl.d32);
3823 depctl.d32 = dwc_read_reg32(addr);
3824
3825 if(ep->is_in && core_if->dma_enable == 0) {
3826 write_isoc_frame_data(core_if, ep);
3827 }
3828
3829 }
3830
3831 #endif //DWC_EN_ISOC