various dwc (OTG) driver fixups
[openwrt/staging/lynxis/omap.git] / target / linux / cns3xxx / files / drivers / usb / dwc / otg_cil.c
1 /* ==========================================================================
2 * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_cil.c $
3 * $Revision: #147 $
4 * $Date: 2008/10/16 $
5 * $Change: 1117667 $
6 *
7 * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
8 * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
9 * otherwise expressly agreed to in writing between Synopsys and you.
10 *
11 * The Software IS NOT an item of Licensed Software or Licensed Product under
12 * any End User Software License Agreement or Agreement for Licensed Product
13 * with Synopsys or any supplement thereto. You are permitted to use and
14 * redistribute this Software in source and binary forms, with or without
15 * modification, provided that redistributions of source code must retain this
16 * notice. You may not view, use, disclose, copy or distribute this file or
17 * any information contained herein except pursuant to this license grant from
18 * Synopsys. If you do not agree with this notice, including the disclaimer
19 * below, then you are not authorized to use the Software.
20 *
21 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
25 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
31 * DAMAGE.
32 * ========================================================================== */
33
34 /** @file
35 *
36 * The Core Interface Layer provides basic services for accessing and
37 * managing the DWC_otg hardware. These services are used by both the
38 * Host Controller Driver and the Peripheral Controller Driver.
39 *
40 * The CIL manages the memory map for the core so that the HCD and PCD
41 * don't have to do this separately. It also handles basic tasks like
42 * reading/writing the registers and data FIFOs in the controller.
43 * Some of the data access functions provide encapsulation of several
44 * operations required to perform a task, such as writing multiple
45 * registers to start a transfer. Finally, the CIL performs basic
46 * services that are not specific to either the host or device modes
47 * of operation. These services include management of the OTG Host
48 * Negotiation Protocol (HNP) and Session Request Protocol (SRP). A
49 * Diagnostic API is also provided to allow testing of the controller
50 * hardware.
51 *
52 * The Core Interface Layer has the following requirements:
53 * - Provides basic controller operations.
54 * - Minimal use of OS services.
55 * - The OS services used will be abstracted by using inline functions
56 * or macros.
57 *
58 */
59 #include <asm/unaligned.h>
60 #include <linux/dma-mapping.h>
61 #ifdef DEBUG
62 #include <linux/jiffies.h>
63 #endif
64
65 #include "otg_plat.h"
66 #include "otg_regs.h"
67 #include "otg_cil.h"
68 #include "otg_pcd.h"
69 #include "otg_hcd.h"
70
71 /**
72 * This function is called to initialize the DWC_otg CSR data
73 * structures. The register addresses in the device and host
74 * structures are initialized from the base address supplied by the
75 * caller. The calling function must make the OS calls to get the
76 * base address of the DWC_otg controller registers. The core_params
77 * argument holds the parameters that specify how the core should be
78 * configured.
79 *
80 * @param[in] reg_base_addr Base address of DWC_otg core registers
81 * @param[in] core_params Pointer to the core configuration parameters
82 *
83 */
84 dwc_otg_core_if_t *dwc_otg_cil_init(const uint32_t *reg_base_addr,
85 dwc_otg_core_params_t *core_params)
86 {
87 dwc_otg_core_if_t *core_if = 0;
88 dwc_otg_dev_if_t *dev_if = 0;
89 dwc_otg_host_if_t *host_if = 0;
90 uint8_t *reg_base = (uint8_t *)reg_base_addr;
91 int i = 0;
92
93 DWC_DEBUGPL(DBG_CILV, "%s(%p,%p)\n", __func__, reg_base_addr, core_params);
94
95 core_if = kmalloc(sizeof(dwc_otg_core_if_t), GFP_KERNEL);
96
97 if (core_if == 0) {
98 DWC_DEBUGPL(DBG_CIL, "Allocation of dwc_otg_core_if_t failed\n");
99 return 0;
100 }
101
102 memset(core_if, 0, sizeof(dwc_otg_core_if_t));
103
104 core_if->core_params = core_params;
105 core_if->core_global_regs = (dwc_otg_core_global_regs_t *)reg_base;
106
107 /*
108 * Allocate the Device Mode structures.
109 */
110 dev_if = kmalloc(sizeof(dwc_otg_dev_if_t), GFP_KERNEL);
111
112 if (dev_if == 0) {
113 DWC_DEBUGPL(DBG_CIL, "Allocation of dwc_otg_dev_if_t failed\n");
114 kfree(core_if);
115 return 0;
116 }
117
118 dev_if->dev_global_regs =
119 (dwc_otg_device_global_regs_t *)(reg_base + DWC_DEV_GLOBAL_REG_OFFSET);
120
121 for (i=0; i<MAX_EPS_CHANNELS; i++)
122 {
123 dev_if->in_ep_regs[i] = (dwc_otg_dev_in_ep_regs_t *)
124 (reg_base + DWC_DEV_IN_EP_REG_OFFSET +
125 (i * DWC_EP_REG_OFFSET));
126
127 dev_if->out_ep_regs[i] = (dwc_otg_dev_out_ep_regs_t *)
128 (reg_base + DWC_DEV_OUT_EP_REG_OFFSET +
129 (i * DWC_EP_REG_OFFSET));
130 DWC_DEBUGPL(DBG_CILV, "in_ep_regs[%d]->diepctl=%p\n",
131 i, &dev_if->in_ep_regs[i]->diepctl);
132 DWC_DEBUGPL(DBG_CILV, "out_ep_regs[%d]->doepctl=%p\n",
133 i, &dev_if->out_ep_regs[i]->doepctl);
134 }
135
136 dev_if->speed = 0; // unknown
137
138 core_if->dev_if = dev_if;
139
140 /*
141 * Allocate the Host Mode structures.
142 */
143 host_if = kmalloc(sizeof(dwc_otg_host_if_t), GFP_KERNEL);
144
145 if (host_if == 0) {
146 DWC_DEBUGPL(DBG_CIL, "Allocation of dwc_otg_host_if_t failed\n");
147 kfree(dev_if);
148 kfree(core_if);
149 return 0;
150 }
151
152 host_if->host_global_regs = (dwc_otg_host_global_regs_t *)
153 (reg_base + DWC_OTG_HOST_GLOBAL_REG_OFFSET);
154
155 host_if->hprt0 = (uint32_t*)(reg_base + DWC_OTG_HOST_PORT_REGS_OFFSET);
156
157 for (i=0; i<MAX_EPS_CHANNELS; i++)
158 {
159 host_if->hc_regs[i] = (dwc_otg_hc_regs_t *)
160 (reg_base + DWC_OTG_HOST_CHAN_REGS_OFFSET +
161 (i * DWC_OTG_CHAN_REGS_OFFSET));
162 DWC_DEBUGPL(DBG_CILV, "hc_reg[%d]->hcchar=%p\n",
163 i, &host_if->hc_regs[i]->hcchar);
164 }
165
166 host_if->num_host_channels = MAX_EPS_CHANNELS;
167 core_if->host_if = host_if;
168
169 for (i=0; i<MAX_EPS_CHANNELS; i++)
170 {
171 core_if->data_fifo[i] =
172 (uint32_t *)(reg_base + DWC_OTG_DATA_FIFO_OFFSET +
173 (i * DWC_OTG_DATA_FIFO_SIZE));
174 DWC_DEBUGPL(DBG_CILV, "data_fifo[%d]=0x%08x\n",
175 i, (unsigned)core_if->data_fifo[i]);
176 }
177
178 core_if->pcgcctl = (uint32_t*)(reg_base + DWC_OTG_PCGCCTL_OFFSET);
179
180 /*
181 * Store the contents of the hardware configuration registers here for
182 * easy access later.
183 */
184 core_if->hwcfg1.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg1);
185 core_if->hwcfg2.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg2);
186 core_if->hwcfg3.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg3);
187 core_if->hwcfg4.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg4);
188
189 DWC_DEBUGPL(DBG_CILV,"hwcfg1=%08x\n",core_if->hwcfg1.d32);
190 DWC_DEBUGPL(DBG_CILV,"hwcfg2=%08x\n",core_if->hwcfg2.d32);
191 DWC_DEBUGPL(DBG_CILV,"hwcfg3=%08x\n",core_if->hwcfg3.d32);
192 DWC_DEBUGPL(DBG_CILV,"hwcfg4=%08x\n",core_if->hwcfg4.d32);
193
194 core_if->hcfg.d32 = dwc_read_reg32(&core_if->host_if->host_global_regs->hcfg);
195 core_if->dcfg.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dcfg);
196
197 DWC_DEBUGPL(DBG_CILV,"hcfg=%08x\n",core_if->hcfg.d32);
198 DWC_DEBUGPL(DBG_CILV,"dcfg=%08x\n",core_if->dcfg.d32);
199
200 DWC_DEBUGPL(DBG_CILV,"op_mode=%0x\n",core_if->hwcfg2.b.op_mode);
201 DWC_DEBUGPL(DBG_CILV,"arch=%0x\n",core_if->hwcfg2.b.architecture);
202 DWC_DEBUGPL(DBG_CILV,"num_dev_ep=%d\n",core_if->hwcfg2.b.num_dev_ep);
203 DWC_DEBUGPL(DBG_CILV,"num_host_chan=%d\n",core_if->hwcfg2.b.num_host_chan);
204 DWC_DEBUGPL(DBG_CILV,"nonperio_tx_q_depth=0x%0x\n",core_if->hwcfg2.b.nonperio_tx_q_depth);
205 DWC_DEBUGPL(DBG_CILV,"host_perio_tx_q_depth=0x%0x\n",core_if->hwcfg2.b.host_perio_tx_q_depth);
206 DWC_DEBUGPL(DBG_CILV,"dev_token_q_depth=0x%0x\n",core_if->hwcfg2.b.dev_token_q_depth);
207
208 DWC_DEBUGPL(DBG_CILV,"Total FIFO SZ=%d\n", core_if->hwcfg3.b.dfifo_depth);
209 DWC_DEBUGPL(DBG_CILV,"xfer_size_cntr_width=%0x\n", core_if->hwcfg3.b.xfer_size_cntr_width);
210
211 /*
212 * Set the SRP sucess bit for FS-I2c
213 */
214 core_if->srp_success = 0;
215 core_if->srp_timer_started = 0;
216
217
218 /*
219 * Create new workqueue and init works
220 */
221 core_if->wq_otg = create_singlethread_workqueue("dwc_otg");
222 if(core_if->wq_otg == 0) {
223 DWC_DEBUGPL(DBG_CIL, "Creation of wq_otg failed\n");
224 kfree(host_if);
225 kfree(dev_if);
226 kfree(core_if);
227 return 0 * HZ;
228 }
229 INIT_WORK(&core_if->w_conn_id, w_conn_id_status_change);
230 INIT_DELAYED_WORK(&core_if->w_wkp, w_wakeup_detected);
231
232 return core_if;
233 }
234
235 /**
236 * This function frees the structures allocated by dwc_otg_cil_init().
237 *
238 * @param[in] core_if The core interface pointer returned from
239 * dwc_otg_cil_init().
240 *
241 */
242 void dwc_otg_cil_remove(dwc_otg_core_if_t *core_if)
243 {
244 /* Disable all interrupts */
245 dwc_modify_reg32(&core_if->core_global_regs->gahbcfg, 1, 0);
246 dwc_write_reg32(&core_if->core_global_regs->gintmsk, 0);
247
248 if (core_if->wq_otg) {
249 destroy_workqueue(core_if->wq_otg);
250 }
251 if (core_if->dev_if) {
252 kfree(core_if->dev_if);
253 }
254 if (core_if->host_if) {
255 kfree(core_if->host_if);
256 }
257 kfree(core_if);
258 }
259
260 /**
261 * This function enables the controller's Global Interrupt in the AHB Config
262 * register.
263 *
264 * @param[in] core_if Programming view of DWC_otg controller.
265 */
266 void dwc_otg_enable_global_interrupts(dwc_otg_core_if_t *core_if)
267 {
268 gahbcfg_data_t ahbcfg = { .d32 = 0};
269 ahbcfg.b.glblintrmsk = 1; /* Enable interrupts */
270 dwc_modify_reg32(&core_if->core_global_regs->gahbcfg, 0, ahbcfg.d32);
271 }
272
273 /**
274 * This function disables the controller's Global Interrupt in the AHB Config
275 * register.
276 *
277 * @param[in] core_if Programming view of DWC_otg controller.
278 */
279 void dwc_otg_disable_global_interrupts(dwc_otg_core_if_t *core_if)
280 {
281 gahbcfg_data_t ahbcfg = { .d32 = 0};
282 ahbcfg.b.glblintrmsk = 1; /* Enable interrupts */
283 dwc_modify_reg32(&core_if->core_global_regs->gahbcfg, ahbcfg.d32, 0);
284 }
285
286 /**
287 * This function initializes the commmon interrupts, used in both
288 * device and host modes.
289 *
290 * @param[in] core_if Programming view of the DWC_otg controller
291 *
292 */
293 static void dwc_otg_enable_common_interrupts(dwc_otg_core_if_t *core_if)
294 {
295 dwc_otg_core_global_regs_t *global_regs =
296 core_if->core_global_regs;
297 gintmsk_data_t intr_mask = { .d32 = 0};
298
299 /* Clear any pending OTG Interrupts */
300 dwc_write_reg32(&global_regs->gotgint, 0xFFFFFFFF);
301
302 /* Clear any pending interrupts */
303 dwc_write_reg32(&global_regs->gintsts, 0xFFFFFFFF);
304
305 /*
306 * Enable the interrupts in the GINTMSK.
307 */
308 intr_mask.b.modemismatch = 1;
309 intr_mask.b.otgintr = 1;
310
311 if (!core_if->dma_enable) {
312 intr_mask.b.rxstsqlvl = 1;
313 }
314
315 intr_mask.b.conidstschng = 1;
316 intr_mask.b.wkupintr = 1;
317 intr_mask.b.disconnect = 1;
318 intr_mask.b.usbsuspend = 1;
319 intr_mask.b.sessreqintr = 1;
320 dwc_write_reg32(&global_regs->gintmsk, intr_mask.d32);
321 }
322
323 /**
324 * Initializes the FSLSPClkSel field of the HCFG register depending on the PHY
325 * type.
326 */
327 static void init_fslspclksel(dwc_otg_core_if_t *core_if)
328 {
329 uint32_t val;
330 hcfg_data_t hcfg;
331
332 if (((core_if->hwcfg2.b.hs_phy_type == 2) &&
333 (core_if->hwcfg2.b.fs_phy_type == 1) &&
334 (core_if->core_params->ulpi_fs_ls)) ||
335 (core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS)) {
336 /* Full speed PHY */
337 val = DWC_HCFG_48_MHZ;
338 }
339 else {
340 /* High speed PHY running at full speed or high speed */
341 val = DWC_HCFG_30_60_MHZ;
342 }
343
344 DWC_DEBUGPL(DBG_CIL, "Initializing HCFG.FSLSPClkSel to 0x%1x\n", val);
345 hcfg.d32 = dwc_read_reg32(&core_if->host_if->host_global_regs->hcfg);
346 hcfg.b.fslspclksel = val;
347 dwc_write_reg32(&core_if->host_if->host_global_regs->hcfg, hcfg.d32);
348 }
349
350 /**
351 * Initializes the DevSpd field of the DCFG register depending on the PHY type
352 * and the enumeration speed of the device.
353 */
354 static void init_devspd(dwc_otg_core_if_t *core_if)
355 {
356 uint32_t val;
357 dcfg_data_t dcfg;
358
359 if (((core_if->hwcfg2.b.hs_phy_type == 2) &&
360 (core_if->hwcfg2.b.fs_phy_type == 1) &&
361 (core_if->core_params->ulpi_fs_ls)) ||
362 (core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS)) {
363 /* Full speed PHY */
364 val = 0x3;
365 }
366 else if (core_if->core_params->speed == DWC_SPEED_PARAM_FULL) {
367 /* High speed PHY running at full speed */
368 val = 0x1;
369 }
370 else {
371 /* High speed PHY running at high speed */
372 val = 0x0;
373 }
374
375 DWC_DEBUGPL(DBG_CIL, "Initializing DCFG.DevSpd to 0x%1x\n", val);
376
377 dcfg.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dcfg);
378 dcfg.b.devspd = val;
379 dwc_write_reg32(&core_if->dev_if->dev_global_regs->dcfg, dcfg.d32);
380 }
381
382 /**
383 * This function calculates the number of IN EPS
384 * using GHWCFG1 and GHWCFG2 registers values
385 *
386 * @param core_if Programming view of the DWC_otg controller
387 */
388 static uint32_t calc_num_in_eps(dwc_otg_core_if_t *core_if)
389 {
390 uint32_t num_in_eps = 0;
391 uint32_t num_eps = core_if->hwcfg2.b.num_dev_ep;
392 uint32_t hwcfg1 = core_if->hwcfg1.d32 >> 3;
393 uint32_t num_tx_fifos = core_if->hwcfg4.b.num_in_eps;
394 int i;
395
396
397 for(i = 0; i < num_eps; ++i)
398 {
399 if(!(hwcfg1 & 0x1))
400 num_in_eps++;
401
402 hwcfg1 >>= 2;
403 }
404
405 if(core_if->hwcfg4.b.ded_fifo_en) {
406 num_in_eps = (num_in_eps > num_tx_fifos) ? num_tx_fifos : num_in_eps;
407 }
408
409 return num_in_eps;
410 }
411
412
413 /**
414 * This function calculates the number of OUT EPS
415 * using GHWCFG1 and GHWCFG2 registers values
416 *
417 * @param core_if Programming view of the DWC_otg controller
418 */
419 static uint32_t calc_num_out_eps(dwc_otg_core_if_t *core_if)
420 {
421 uint32_t num_out_eps = 0;
422 uint32_t num_eps = core_if->hwcfg2.b.num_dev_ep;
423 uint32_t hwcfg1 = core_if->hwcfg1.d32 >> 2;
424 int i;
425
426 for(i = 0; i < num_eps; ++i)
427 {
428 if(!(hwcfg1 & 0x2))
429 num_out_eps++;
430
431 hwcfg1 >>= 2;
432 }
433 return num_out_eps;
434 }
435 /**
436 * This function initializes the DWC_otg controller registers and
437 * prepares the core for device mode or host mode operation.
438 *
439 * @param core_if Programming view of the DWC_otg controller
440 *
441 */
442 void dwc_otg_core_init(dwc_otg_core_if_t *core_if)
443 {
444 int i = 0;
445 dwc_otg_core_global_regs_t *global_regs =
446 core_if->core_global_regs;
447 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
448 gahbcfg_data_t ahbcfg = { .d32 = 0 };
449 gusbcfg_data_t usbcfg = { .d32 = 0 };
450 gi2cctl_data_t i2cctl = { .d32 = 0 };
451
452 DWC_DEBUGPL(DBG_CILV, "dwc_otg_core_init(%p)\n", core_if);
453
454 /* Common Initialization */
455
456 usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
457
458 // usbcfg.b.tx_end_delay = 1;
459 /* Program the ULPI External VBUS bit if needed */
460 usbcfg.b.ulpi_ext_vbus_drv =
461 (core_if->core_params->phy_ulpi_ext_vbus == DWC_PHY_ULPI_EXTERNAL_VBUS) ? 1 : 0;
462
463 /* Set external TS Dline pulsing */
464 usbcfg.b.term_sel_dl_pulse = (core_if->core_params->ts_dline == 1) ? 1 : 0;
465 dwc_write_reg32 (&global_regs->gusbcfg, usbcfg.d32);
466
467
468 /* Reset the Controller */
469 dwc_otg_core_reset(core_if);
470
471 /* Initialize parameters from Hardware configuration registers. */
472 dev_if->num_in_eps = calc_num_in_eps(core_if);
473 dev_if->num_out_eps = calc_num_out_eps(core_if);
474
475
476 DWC_DEBUGPL(DBG_CIL, "num_dev_perio_in_ep=%d\n", core_if->hwcfg4.b.num_dev_perio_in_ep);
477
478 for (i=0; i < core_if->hwcfg4.b.num_dev_perio_in_ep; i++)
479 {
480 dev_if->perio_tx_fifo_size[i] =
481 dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i]) >> 16;
482 DWC_DEBUGPL(DBG_CIL, "Periodic Tx FIFO SZ #%d=0x%0x\n",
483 i, dev_if->perio_tx_fifo_size[i]);
484 }
485
486 for (i=0; i < core_if->hwcfg4.b.num_in_eps; i++)
487 {
488 dev_if->tx_fifo_size[i] =
489 dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i]) >> 16;
490 DWC_DEBUGPL(DBG_CIL, "Tx FIFO SZ #%d=0x%0x\n",
491 i, dev_if->perio_tx_fifo_size[i]);
492 }
493
494 core_if->total_fifo_size = core_if->hwcfg3.b.dfifo_depth;
495 core_if->rx_fifo_size =
496 dwc_read_reg32(&global_regs->grxfsiz);
497 core_if->nperio_tx_fifo_size =
498 dwc_read_reg32(&global_regs->gnptxfsiz) >> 16;
499
500 DWC_DEBUGPL(DBG_CIL, "Total FIFO SZ=%d\n", core_if->total_fifo_size);
501 DWC_DEBUGPL(DBG_CIL, "Rx FIFO SZ=%d\n", core_if->rx_fifo_size);
502 DWC_DEBUGPL(DBG_CIL, "NP Tx FIFO SZ=%d\n", core_if->nperio_tx_fifo_size);
503
504 /* This programming sequence needs to happen in FS mode before any other
505 * programming occurs */
506 if ((core_if->core_params->speed == DWC_SPEED_PARAM_FULL) &&
507 (core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS)) {
508 /* If FS mode with FS PHY */
509
510 /* core_init() is now called on every switch so only call the
511 * following for the first time through. */
512 if (!core_if->phy_init_done) {
513 core_if->phy_init_done = 1;
514 DWC_DEBUGPL(DBG_CIL, "FS_PHY detected\n");
515 usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
516 usbcfg.b.physel = 1;
517 dwc_write_reg32 (&global_regs->gusbcfg, usbcfg.d32);
518
519 /* Reset after a PHY select */
520 dwc_otg_core_reset(core_if);
521 }
522
523 /* Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also
524 * do this on HNP Dev/Host mode switches (done in dev_init and
525 * host_init). */
526 if (dwc_otg_is_host_mode(core_if)) {
527 init_fslspclksel(core_if);
528 }
529 else {
530 init_devspd(core_if);
531 }
532
533 if (core_if->core_params->i2c_enable) {
534 DWC_DEBUGPL(DBG_CIL, "FS_PHY Enabling I2c\n");
535 /* Program GUSBCFG.OtgUtmifsSel to I2C */
536 usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
537 usbcfg.b.otgutmifssel = 1;
538 dwc_write_reg32 (&global_regs->gusbcfg, usbcfg.d32);
539
540 /* Program GI2CCTL.I2CEn */
541 i2cctl.d32 = dwc_read_reg32(&global_regs->gi2cctl);
542 i2cctl.b.i2cdevaddr = 1;
543 i2cctl.b.i2cen = 0;
544 dwc_write_reg32 (&global_regs->gi2cctl, i2cctl.d32);
545 i2cctl.b.i2cen = 1;
546 dwc_write_reg32 (&global_regs->gi2cctl, i2cctl.d32);
547 }
548
549 } /* endif speed == DWC_SPEED_PARAM_FULL */
550
551 else {
552 /* High speed PHY. */
553 if (!core_if->phy_init_done) {
554 core_if->phy_init_done = 1;
555 /* HS PHY parameters. These parameters are preserved
556 * during soft reset so only program the first time. Do
557 * a soft reset immediately after setting phyif. */
558 usbcfg.b.ulpi_utmi_sel = core_if->core_params->phy_type;
559 if (usbcfg.b.ulpi_utmi_sel == 1) {
560 /* ULPI interface */
561 usbcfg.b.phyif = 0;
562 usbcfg.b.ddrsel = core_if->core_params->phy_ulpi_ddr;
563 }
564 else {
565 /* UTMI+ interface */
566 if (core_if->core_params->phy_utmi_width == 16) {
567 usbcfg.b.phyif = 1;
568 }
569 else {
570 usbcfg.b.phyif = 0;
571 }
572 }
573
574 dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32);
575
576 /* Reset after setting the PHY parameters */
577 dwc_otg_core_reset(core_if);
578 }
579 }
580
581 if ((core_if->hwcfg2.b.hs_phy_type == 2) &&
582 (core_if->hwcfg2.b.fs_phy_type == 1) &&
583 (core_if->core_params->ulpi_fs_ls)) {
584 DWC_DEBUGPL(DBG_CIL, "Setting ULPI FSLS\n");
585 usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
586 usbcfg.b.ulpi_fsls = 1;
587 usbcfg.b.ulpi_clk_sus_m = 1;
588 dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32);
589 }
590 else {
591 usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
592 usbcfg.b.ulpi_fsls = 0;
593 usbcfg.b.ulpi_clk_sus_m = 0;
594 dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32);
595 }
596
597 /* Program the GAHBCFG Register.*/
598 switch (core_if->hwcfg2.b.architecture) {
599
600 case DWC_SLAVE_ONLY_ARCH:
601 DWC_DEBUGPL(DBG_CIL, "Slave Only Mode\n");
602 ahbcfg.b.nptxfemplvl_txfemplvl = DWC_GAHBCFG_TXFEMPTYLVL_HALFEMPTY;
603 ahbcfg.b.ptxfemplvl = DWC_GAHBCFG_TXFEMPTYLVL_HALFEMPTY;
604 core_if->dma_enable = 0;
605 core_if->dma_desc_enable = 0;
606 break;
607
608 case DWC_EXT_DMA_ARCH:
609 DWC_DEBUGPL(DBG_CIL, "External DMA Mode\n");
610 ahbcfg.b.hburstlen = core_if->core_params->dma_burst_size;
611 core_if->dma_enable = (core_if->core_params->dma_enable != 0);
612 core_if->dma_desc_enable = (core_if->core_params->dma_desc_enable != 0);
613 break;
614
615 case DWC_INT_DMA_ARCH:
616 DWC_DEBUGPL(DBG_CIL, "Internal DMA Mode\n");
617 ahbcfg.b.hburstlen = DWC_GAHBCFG_INT_DMA_BURST_INCR;
618 core_if->dma_enable = (core_if->core_params->dma_enable != 0);
619 core_if->dma_desc_enable = (core_if->core_params->dma_desc_enable != 0);
620 break;
621
622 }
623 ahbcfg.b.dmaenable = core_if->dma_enable;
624 dwc_write_reg32(&global_regs->gahbcfg, ahbcfg.d32);
625
626 core_if->en_multiple_tx_fifo = core_if->hwcfg4.b.ded_fifo_en;
627
628 core_if->pti_enh_enable = core_if->core_params->pti_enable != 0;
629 core_if->multiproc_int_enable = core_if->core_params->mpi_enable;
630 DWC_PRINT("Periodic Transfer Interrupt Enhancement - %s\n", ((core_if->pti_enh_enable) ? "enabled": "disabled"));
631 DWC_PRINT("Multiprocessor Interrupt Enhancement - %s\n", ((core_if->multiproc_int_enable) ? "enabled": "disabled"));
632
633 /*
634 * Program the GUSBCFG register.
635 */
636 usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
637
638 switch (core_if->hwcfg2.b.op_mode) {
639 case DWC_MODE_HNP_SRP_CAPABLE:
640 usbcfg.b.hnpcap = (core_if->core_params->otg_cap ==
641 DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE);
642 usbcfg.b.srpcap = (core_if->core_params->otg_cap !=
643 DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE);
644 break;
645
646 case DWC_MODE_SRP_ONLY_CAPABLE:
647 usbcfg.b.hnpcap = 0;
648 usbcfg.b.srpcap = (core_if->core_params->otg_cap !=
649 DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE);
650 break;
651
652 case DWC_MODE_NO_HNP_SRP_CAPABLE:
653 usbcfg.b.hnpcap = 0;
654 usbcfg.b.srpcap = 0;
655 break;
656
657 case DWC_MODE_SRP_CAPABLE_DEVICE:
658 usbcfg.b.hnpcap = 0;
659 usbcfg.b.srpcap = (core_if->core_params->otg_cap !=
660 DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE);
661 break;
662
663 case DWC_MODE_NO_SRP_CAPABLE_DEVICE:
664 usbcfg.b.hnpcap = 0;
665 usbcfg.b.srpcap = 0;
666 break;
667
668 case DWC_MODE_SRP_CAPABLE_HOST:
669 usbcfg.b.hnpcap = 0;
670 usbcfg.b.srpcap = (core_if->core_params->otg_cap !=
671 DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE);
672 break;
673
674 case DWC_MODE_NO_SRP_CAPABLE_HOST:
675 usbcfg.b.hnpcap = 0;
676 usbcfg.b.srpcap = 0;
677 break;
678 }
679
680 dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32);
681
682 /* Enable common interrupts */
683 dwc_otg_enable_common_interrupts(core_if);
684
685 /* Do device or host intialization based on mode during PCD
686 * and HCD initialization */
687 if (dwc_otg_is_host_mode(core_if)) {
688 DWC_DEBUGPL(DBG_ANY, "Host Mode\n");
689 core_if->op_state = A_HOST;
690 }
691 else {
692 DWC_DEBUGPL(DBG_ANY, "Device Mode\n");
693 core_if->op_state = B_PERIPHERAL;
694 #ifdef DWC_DEVICE_ONLY
695 dwc_otg_core_dev_init(core_if);
696 #endif
697 }
698 }
699
700
701 /**
702 * This function enables the Device mode interrupts.
703 *
704 * @param core_if Programming view of DWC_otg controller
705 */
706 void dwc_otg_enable_device_interrupts(dwc_otg_core_if_t *core_if)
707 {
708 gintmsk_data_t intr_mask = { .d32 = 0};
709 dwc_otg_core_global_regs_t *global_regs =
710 core_if->core_global_regs;
711
712 DWC_DEBUGPL(DBG_CIL, "%s()\n", __func__);
713
714 /* Disable all interrupts. */
715 dwc_write_reg32(&global_regs->gintmsk, 0);
716
717 /* Clear any pending interrupts */
718 dwc_write_reg32(&global_regs->gintsts, 0xFFFFFFFF);
719
720 /* Enable the common interrupts */
721 dwc_otg_enable_common_interrupts(core_if);
722
723 /* Enable interrupts */
724 intr_mask.b.usbreset = 1;
725 intr_mask.b.enumdone = 1;
726
727 if(!core_if->multiproc_int_enable) {
728 intr_mask.b.inepintr = 1;
729 intr_mask.b.outepintr = 1;
730 }
731
732 intr_mask.b.erlysuspend = 1;
733
734 if(core_if->en_multiple_tx_fifo == 0) {
735 intr_mask.b.epmismatch = 1;
736 }
737
738
739 #ifdef DWC_EN_ISOC
740 if(core_if->dma_enable) {
741 if(core_if->dma_desc_enable == 0) {
742 if(core_if->pti_enh_enable) {
743 dctl_data_t dctl = { .d32 = 0 };
744 dctl.b.ifrmnum = 1;
745 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dctl, 0, dctl.d32);
746 } else {
747 intr_mask.b.incomplisoin = 1;
748 intr_mask.b.incomplisoout = 1;
749 }
750 }
751 } else {
752 intr_mask.b.incomplisoin = 1;
753 intr_mask.b.incomplisoout = 1;
754 }
755 #endif // DWC_EN_ISOC
756
757 /** @todo NGS: Should this be a module parameter? */
758 #ifdef USE_PERIODIC_EP
759 intr_mask.b.isooutdrop = 1;
760 intr_mask.b.eopframe = 1;
761 intr_mask.b.incomplisoin = 1;
762 intr_mask.b.incomplisoout = 1;
763 #endif
764
765 dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, intr_mask.d32);
766
767 DWC_DEBUGPL(DBG_CIL, "%s() gintmsk=%0x\n", __func__,
768 dwc_read_reg32(&global_regs->gintmsk));
769 }
770
771 /**
772 * This function initializes the DWC_otg controller registers for
773 * device mode.
774 *
775 * @param core_if Programming view of DWC_otg controller
776 *
777 */
778 void dwc_otg_core_dev_init(dwc_otg_core_if_t *core_if)
779 {
780 int i,size;
781 u_int32_t *default_value_array;
782
783 dwc_otg_core_global_regs_t *global_regs =
784 core_if->core_global_regs;
785 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
786 dwc_otg_core_params_t *params = core_if->core_params;
787 dcfg_data_t dcfg = { .d32 = 0};
788 grstctl_t resetctl = { .d32 = 0 };
789 uint32_t rx_fifo_size;
790 fifosize_data_t nptxfifosize;
791 fifosize_data_t txfifosize;
792 dthrctl_data_t dthrctl;
793
794 /* Restart the Phy Clock */
795 dwc_write_reg32(core_if->pcgcctl, 0);
796
797 /* Device configuration register */
798 init_devspd(core_if);
799 dcfg.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dcfg);
800 dcfg.b.descdma = (core_if->dma_desc_enable) ? 1 : 0;
801 dcfg.b.perfrint = DWC_DCFG_FRAME_INTERVAL_80;
802
803 dwc_write_reg32(&dev_if->dev_global_regs->dcfg, dcfg.d32);
804
805 /* Configure data FIFO sizes */
806 if (core_if->hwcfg2.b.dynamic_fifo && params->enable_dynamic_fifo) {
807 DWC_DEBUGPL(DBG_CIL, "Total FIFO Size=%d\n", core_if->total_fifo_size);
808 DWC_DEBUGPL(DBG_CIL, "Rx FIFO Size=%d\n", params->dev_rx_fifo_size);
809 DWC_DEBUGPL(DBG_CIL, "NP Tx FIFO Size=%d\n", params->dev_nperio_tx_fifo_size);
810
811 /* Rx FIFO */
812 DWC_DEBUGPL(DBG_CIL, "initial grxfsiz=%08x\n",
813 dwc_read_reg32(&global_regs->grxfsiz));
814
815 rx_fifo_size = params->dev_rx_fifo_size;
816 dwc_write_reg32(&global_regs->grxfsiz, rx_fifo_size);
817
818 DWC_DEBUGPL(DBG_CIL, "new grxfsiz=%08x\n",
819 dwc_read_reg32(&global_regs->grxfsiz));
820
821 /** Set Periodic Tx FIFO Mask all bits 0 */
822 core_if->p_tx_msk = 0;
823
824 /** Set Tx FIFO Mask all bits 0 */
825 core_if->tx_msk = 0;
826
827 /* Non-periodic Tx FIFO */
828 DWC_DEBUGPL(DBG_CIL, "initial gnptxfsiz=%08x\n",
829 dwc_read_reg32(&global_regs->gnptxfsiz));
830
831 nptxfifosize.b.depth = params->dev_nperio_tx_fifo_size;
832 nptxfifosize.b.startaddr = params->dev_rx_fifo_size;
833
834 dwc_write_reg32(&global_regs->gnptxfsiz, nptxfifosize.d32);
835
836 DWC_DEBUGPL(DBG_CIL, "new gnptxfsiz=%08x\n",
837 dwc_read_reg32(&global_regs->gnptxfsiz));
838
839 txfifosize.b.startaddr = nptxfifosize.b.startaddr + nptxfifosize.b.depth;
840 if(core_if->en_multiple_tx_fifo == 0) {
841 //core_if->hwcfg4.b.ded_fifo_en==0
842
843 /**@todo NGS: Fix Periodic FIFO Sizing! */
844 /*
845 * Periodic Tx FIFOs These FIFOs are numbered from 1 to 15.
846 * Indexes of the FIFO size module parameters in the
847 * dev_perio_tx_fifo_size array and the FIFO size registers in
848 * the dptxfsiz array run from 0 to 14.
849 */
850 /** @todo Finish debug of this */
851 size=core_if->hwcfg4.b.num_dev_perio_in_ep;
852 default_value_array=params->dev_perio_tx_fifo_size;
853
854 }
855 else {
856 //core_if->hwcfg4.b.ded_fifo_en==1
857 /*
858 * Tx FIFOs These FIFOs are numbered from 1 to 15.
859 * Indexes of the FIFO size module parameters in the
860 * dev_tx_fifo_size array and the FIFO size registers in
861 * the dptxfsiz_dieptxf array run from 0 to 14.
862 */
863
864 size=core_if->hwcfg4.b.num_in_eps;
865 default_value_array=params->dev_tx_fifo_size;
866
867 }
868 for (i=0; i < size; i++)
869 {
870
871 txfifosize.b.depth = default_value_array[i];
872 DWC_DEBUGPL(DBG_CIL, "initial dptxfsiz_dieptxf[%d]=%08x\n", i,
873 dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i]));
874 dwc_write_reg32(&global_regs->dptxfsiz_dieptxf[i],
875 txfifosize.d32);
876 DWC_DEBUGPL(DBG_CIL, "new dptxfsiz_dieptxf[%d]=%08x\n", i,
877 dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i]));
878 txfifosize.b.startaddr += txfifosize.b.depth;
879 }
880 }
881 /* Flush the FIFOs */
882 dwc_otg_flush_tx_fifo(core_if, 0x10); /* all Tx FIFOs */
883 dwc_otg_flush_rx_fifo(core_if);
884
885 /* Flush the Learning Queue. */
886 resetctl.b.intknqflsh = 1;
887 dwc_write_reg32(&core_if->core_global_regs->grstctl, resetctl.d32);
888
889 /* Clear all pending Device Interrupts */
890
891 if(core_if->multiproc_int_enable) {
892 }
893
894 /** @todo - if the condition needed to be checked
895 * or in any case all pending interrutps should be cleared?
896 */
897 if(core_if->multiproc_int_enable) {
898 for(i = 0; i < core_if->dev_if->num_in_eps; ++i) {
899 dwc_write_reg32(&dev_if->dev_global_regs->diepeachintmsk[i], 0);
900 }
901
902 for(i = 0; i < core_if->dev_if->num_out_eps; ++i) {
903 dwc_write_reg32(&dev_if->dev_global_regs->doepeachintmsk[i], 0);
904 }
905
906 dwc_write_reg32(&dev_if->dev_global_regs->deachint, 0xFFFFFFFF);
907 dwc_write_reg32(&dev_if->dev_global_regs->deachintmsk, 0);
908 } else {
909 dwc_write_reg32(&dev_if->dev_global_regs->diepmsk, 0);
910 dwc_write_reg32(&dev_if->dev_global_regs->doepmsk, 0);
911 dwc_write_reg32(&dev_if->dev_global_regs->daint, 0xFFFFFFFF);
912 dwc_write_reg32(&dev_if->dev_global_regs->daintmsk, 0);
913 }
914
915 for (i=0; i <= dev_if->num_in_eps; i++)
916 {
917 depctl_data_t depctl;
918 depctl.d32 = dwc_read_reg32(&dev_if->in_ep_regs[i]->diepctl);
919 if (depctl.b.epena) {
920 depctl.d32 = 0;
921 depctl.b.epdis = 1;
922 depctl.b.snak = 1;
923 }
924 else {
925 depctl.d32 = 0;
926 }
927
928 dwc_write_reg32(&dev_if->in_ep_regs[i]->diepctl, depctl.d32);
929
930
931 dwc_write_reg32(&dev_if->in_ep_regs[i]->dieptsiz, 0);
932 dwc_write_reg32(&dev_if->in_ep_regs[i]->diepdma, 0);
933 dwc_write_reg32(&dev_if->in_ep_regs[i]->diepint, 0xFF);
934 }
935
936 for (i=0; i <= dev_if->num_out_eps; i++)
937 {
938 depctl_data_t depctl;
939 depctl.d32 = dwc_read_reg32(&dev_if->out_ep_regs[i]->doepctl);
940 if (depctl.b.epena) {
941 depctl.d32 = 0;
942 depctl.b.epdis = 1;
943 depctl.b.snak = 1;
944 }
945 else {
946 depctl.d32 = 0;
947 }
948
949 dwc_write_reg32(&dev_if->out_ep_regs[i]->doepctl, depctl.d32);
950
951 dwc_write_reg32(&dev_if->out_ep_regs[i]->doeptsiz, 0);
952 dwc_write_reg32(&dev_if->out_ep_regs[i]->doepdma, 0);
953 dwc_write_reg32(&dev_if->out_ep_regs[i]->doepint, 0xFF);
954 }
955
956 if(core_if->en_multiple_tx_fifo && core_if->dma_enable) {
957 dev_if->non_iso_tx_thr_en = params->thr_ctl & 0x1;
958 dev_if->iso_tx_thr_en = (params->thr_ctl >> 1) & 0x1;
959 dev_if->rx_thr_en = (params->thr_ctl >> 2) & 0x1;
960
961 dev_if->rx_thr_length = params->rx_thr_length;
962 dev_if->tx_thr_length = params->tx_thr_length;
963
964 dev_if->setup_desc_index = 0;
965
966 dthrctl.d32 = 0;
967 dthrctl.b.non_iso_thr_en = dev_if->non_iso_tx_thr_en;
968 dthrctl.b.iso_thr_en = dev_if->iso_tx_thr_en;
969 dthrctl.b.tx_thr_len = dev_if->tx_thr_length;
970 dthrctl.b.rx_thr_en = dev_if->rx_thr_en;
971 dthrctl.b.rx_thr_len = dev_if->rx_thr_length;
972
973 dwc_write_reg32(&dev_if->dev_global_regs->dtknqr3_dthrctl, dthrctl.d32);
974
975 DWC_DEBUGPL(DBG_CIL, "Non ISO Tx Thr - %d\nISO Tx Thr - %d\nRx Thr - %d\nTx Thr Len - %d\nRx Thr Len - %d\n",
976 dthrctl.b.non_iso_thr_en, dthrctl.b.iso_thr_en, dthrctl.b.rx_thr_en, dthrctl.b.tx_thr_len, dthrctl.b.rx_thr_len);
977
978 }
979
980 dwc_otg_enable_device_interrupts(core_if);
981
982 {
983 diepmsk_data_t msk = { .d32 = 0 };
984 msk.b.txfifoundrn = 1;
985 if(core_if->multiproc_int_enable) {
986 dwc_modify_reg32(&dev_if->dev_global_regs->diepeachintmsk[0], msk.d32, msk.d32);
987 } else {
988 dwc_modify_reg32(&dev_if->dev_global_regs->diepmsk, msk.d32, msk.d32);
989 }
990 }
991
992
993 if(core_if->multiproc_int_enable) {
994 /* Set NAK on Babble */
995 dctl_data_t dctl = { .d32 = 0};
996 dctl.b.nakonbble = 1;
997 dwc_modify_reg32(&dev_if->dev_global_regs->dctl, 0, dctl.d32);
998 }
999 }
1000
1001 /**
1002 * This function enables the Host mode interrupts.
1003 *
1004 * @param core_if Programming view of DWC_otg controller
1005 */
1006 void dwc_otg_enable_host_interrupts(dwc_otg_core_if_t *core_if)
1007 {
1008 dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
1009 gintmsk_data_t intr_mask = { .d32 = 0 };
1010
1011 DWC_DEBUGPL(DBG_CIL, "%s()\n", __func__);
1012
1013 /* Disable all interrupts. */
1014 dwc_write_reg32(&global_regs->gintmsk, 0);
1015
1016 /* Clear any pending interrupts. */
1017 dwc_write_reg32(&global_regs->gintsts, 0xFFFFFFFF);
1018
1019 /* Enable the common interrupts */
1020 dwc_otg_enable_common_interrupts(core_if);
1021
1022 /*
1023 * Enable host mode interrupts without disturbing common
1024 * interrupts.
1025 */
1026 intr_mask.b.sofintr = 1;
1027 intr_mask.b.portintr = 1;
1028 intr_mask.b.hcintr = 1;
1029
1030 dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, intr_mask.d32);
1031 }
1032
1033 /**
1034 * This function disables the Host Mode interrupts.
1035 *
1036 * @param core_if Programming view of DWC_otg controller
1037 */
1038 void dwc_otg_disable_host_interrupts(dwc_otg_core_if_t *core_if)
1039 {
1040 dwc_otg_core_global_regs_t *global_regs =
1041 core_if->core_global_regs;
1042 gintmsk_data_t intr_mask = { .d32 = 0 };
1043
1044 DWC_DEBUGPL(DBG_CILV, "%s()\n", __func__);
1045
1046 /*
1047 * Disable host mode interrupts without disturbing common
1048 * interrupts.
1049 */
1050 intr_mask.b.sofintr = 1;
1051 intr_mask.b.portintr = 1;
1052 intr_mask.b.hcintr = 1;
1053 intr_mask.b.ptxfempty = 1;
1054 intr_mask.b.nptxfempty = 1;
1055
1056 dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, 0);
1057 }
1058
1059 /**
1060 * This function initializes the DWC_otg controller registers for
1061 * host mode.
1062 *
1063 * This function flushes the Tx and Rx FIFOs and it flushes any entries in the
1064 * request queues. Host channels are reset to ensure that they are ready for
1065 * performing transfers.
1066 *
1067 * @param core_if Programming view of DWC_otg controller
1068 *
1069 */
1070 void dwc_otg_core_host_init(dwc_otg_core_if_t *core_if)
1071 {
1072 dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
1073 dwc_otg_host_if_t *host_if = core_if->host_if;
1074 dwc_otg_core_params_t *params = core_if->core_params;
1075 hprt0_data_t hprt0 = { .d32 = 0 };
1076 fifosize_data_t nptxfifosize;
1077 fifosize_data_t ptxfifosize;
1078 int i;
1079 hcchar_data_t hcchar;
1080 hcfg_data_t hcfg;
1081 dwc_otg_hc_regs_t *hc_regs;
1082 int num_channels;
1083 gotgctl_data_t gotgctl = { .d32 = 0 };
1084
1085 DWC_DEBUGPL(DBG_CILV,"%s(%p)\n", __func__, core_if);
1086
1087 /* Restart the Phy Clock */
1088 dwc_write_reg32(core_if->pcgcctl, 0);
1089
1090 /* Initialize Host Configuration Register */
1091 init_fslspclksel(core_if);
1092 if (core_if->core_params->speed == DWC_SPEED_PARAM_FULL)
1093 {
1094 hcfg.d32 = dwc_read_reg32(&host_if->host_global_regs->hcfg);
1095 hcfg.b.fslssupp = 1;
1096 dwc_write_reg32(&host_if->host_global_regs->hcfg, hcfg.d32);
1097 }
1098
1099 /* Configure data FIFO sizes */
1100 if (core_if->hwcfg2.b.dynamic_fifo && params->enable_dynamic_fifo) {
1101 DWC_DEBUGPL(DBG_CIL,"Total FIFO Size=%d\n", core_if->total_fifo_size);
1102 DWC_DEBUGPL(DBG_CIL,"Rx FIFO Size=%d\n", params->host_rx_fifo_size);
1103 DWC_DEBUGPL(DBG_CIL,"NP Tx FIFO Size=%d\n", params->host_nperio_tx_fifo_size);
1104 DWC_DEBUGPL(DBG_CIL,"P Tx FIFO Size=%d\n", params->host_perio_tx_fifo_size);
1105
1106 /* Rx FIFO */
1107 DWC_DEBUGPL(DBG_CIL,"initial grxfsiz=%08x\n", dwc_read_reg32(&global_regs->grxfsiz));
1108 dwc_write_reg32(&global_regs->grxfsiz, params->host_rx_fifo_size);
1109 DWC_DEBUGPL(DBG_CIL,"new grxfsiz=%08x\n", dwc_read_reg32(&global_regs->grxfsiz));
1110
1111 /* Non-periodic Tx FIFO */
1112 DWC_DEBUGPL(DBG_CIL,"initial gnptxfsiz=%08x\n", dwc_read_reg32(&global_regs->gnptxfsiz));
1113 nptxfifosize.b.depth = params->host_nperio_tx_fifo_size;
1114 nptxfifosize.b.startaddr = params->host_rx_fifo_size;
1115 dwc_write_reg32(&global_regs->gnptxfsiz, nptxfifosize.d32);
1116 DWC_DEBUGPL(DBG_CIL,"new gnptxfsiz=%08x\n", dwc_read_reg32(&global_regs->gnptxfsiz));
1117
1118 /* Periodic Tx FIFO */
1119 DWC_DEBUGPL(DBG_CIL,"initial hptxfsiz=%08x\n", dwc_read_reg32(&global_regs->hptxfsiz));
1120 ptxfifosize.b.depth = params->host_perio_tx_fifo_size;
1121 ptxfifosize.b.startaddr = nptxfifosize.b.startaddr + nptxfifosize.b.depth;
1122 dwc_write_reg32(&global_regs->hptxfsiz, ptxfifosize.d32);
1123 DWC_DEBUGPL(DBG_CIL,"new hptxfsiz=%08x\n", dwc_read_reg32(&global_regs->hptxfsiz));
1124 }
1125
1126 /* Clear Host Set HNP Enable in the OTG Control Register */
1127 gotgctl.b.hstsethnpen = 1;
1128 dwc_modify_reg32(&global_regs->gotgctl, gotgctl.d32, 0);
1129
1130 /* Make sure the FIFOs are flushed. */
1131 dwc_otg_flush_tx_fifo(core_if, 0x10 /* all Tx FIFOs */);
1132 dwc_otg_flush_rx_fifo(core_if);
1133
1134 /* Flush out any leftover queued requests. */
1135 num_channels = core_if->core_params->host_channels;
1136 for (i = 0; i < num_channels; i++)
1137 {
1138 hc_regs = core_if->host_if->hc_regs[i];
1139 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1140 hcchar.b.chen = 0;
1141 hcchar.b.chdis = 1;
1142 hcchar.b.epdir = 0;
1143 dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
1144 }
1145
1146 /* Halt all channels to put them into a known state. */
1147 for (i = 0; i < num_channels; i++)
1148 {
1149 int count = 0;
1150 hc_regs = core_if->host_if->hc_regs[i];
1151 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1152 hcchar.b.chen = 1;
1153 hcchar.b.chdis = 1;
1154 hcchar.b.epdir = 0;
1155 dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
1156 DWC_DEBUGPL(DBG_HCDV, "%s: Halt channel %d\n", __func__, i);
1157 do {
1158 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1159 if (++count > 200)
1160 {
1161 DWC_ERROR("%s: Unable to clear halt on channel %d\n",
1162 __func__, i);
1163 break;
1164 }
1165 udelay(100);
1166 }
1167 while (hcchar.b.chen);
1168 }
1169
1170 /* Turn on the vbus power. */
1171 DWC_PRINT("Init: Port Power? op_state=%d\n", core_if->op_state);
1172 if (core_if->op_state == A_HOST) {
1173 hprt0.d32 = dwc_otg_read_hprt0(core_if);
1174 DWC_PRINT("Init: Power Port (%d)\n", hprt0.b.prtpwr);
1175 if (hprt0.b.prtpwr == 0) {
1176 hprt0.b.prtpwr = 1;
1177 dwc_write_reg32(host_if->hprt0, hprt0.d32);
1178 }
1179 }
1180
1181 dwc_otg_enable_host_interrupts(core_if);
1182 }
1183
1184 /**
1185 * Prepares a host channel for transferring packets to/from a specific
1186 * endpoint. The HCCHARn register is set up with the characteristics specified
1187 * in _hc. Host channel interrupts that may need to be serviced while this
1188 * transfer is in progress are enabled.
1189 *
1190 * @param core_if Programming view of DWC_otg controller
1191 * @param hc Information needed to initialize the host channel
1192 */
1193 void dwc_otg_hc_init(dwc_otg_core_if_t *core_if, dwc_hc_t *hc)
1194 {
1195 uint32_t intr_enable;
1196 hcintmsk_data_t hc_intr_mask;
1197 gintmsk_data_t gintmsk = { .d32 = 0 };
1198 hcchar_data_t hcchar;
1199 hcsplt_data_t hcsplt;
1200
1201 uint8_t hc_num = hc->hc_num;
1202 dwc_otg_host_if_t *host_if = core_if->host_if;
1203 dwc_otg_hc_regs_t *hc_regs = host_if->hc_regs[hc_num];
1204
1205 /* Clear old interrupt conditions for this host channel. */
1206 hc_intr_mask.d32 = 0xFFFFFFFF;
1207 hc_intr_mask.b.reserved = 0;
1208 dwc_write_reg32(&hc_regs->hcint, hc_intr_mask.d32);
1209
1210 /* Enable channel interrupts required for this transfer. */
1211 hc_intr_mask.d32 = 0;
1212 hc_intr_mask.b.chhltd = 1;
1213 if (core_if->dma_enable) {
1214 hc_intr_mask.b.ahberr = 1;
1215 /* Always record the first nak interrupt for bulk
1216 * packets. */
1217 if (hc->error_state && !hc->do_split &&
1218 hc->ep_type != DWC_OTG_EP_TYPE_ISOC) {
1219 hc_intr_mask.b.ack = 1;
1220 if (hc->ep_is_in) {
1221 hc_intr_mask.b.datatglerr = 1;
1222 if (hc->ep_type != DWC_OTG_EP_TYPE_INTR) {
1223 hc_intr_mask.b.nak = 1;
1224 }
1225 }
1226 }
1227 }
1228 else {
1229 switch (hc->ep_type) {
1230 case DWC_OTG_EP_TYPE_CONTROL:
1231 case DWC_OTG_EP_TYPE_BULK:
1232 hc_intr_mask.b.xfercompl = 1;
1233 hc_intr_mask.b.stall = 1;
1234 hc_intr_mask.b.xacterr = 1;
1235 hc_intr_mask.b.datatglerr = 1;
1236 if (hc->ep_is_in) {
1237 hc_intr_mask.b.bblerr = 1;
1238 }
1239 else {
1240 hc_intr_mask.b.nak = 1;
1241 hc_intr_mask.b.nyet = 1;
1242 if (hc->do_ping) {
1243 hc_intr_mask.b.ack = 1;
1244 }
1245 }
1246
1247 if (hc->do_split) {
1248 hc_intr_mask.b.nak = 1;
1249 if (hc->complete_split) {
1250 hc_intr_mask.b.nyet = 1;
1251 }
1252 else {
1253 hc_intr_mask.b.ack = 1;
1254 }
1255 }
1256
1257 if (hc->error_state) {
1258 hc_intr_mask.b.ack = 1;
1259 }
1260 break;
1261 case DWC_OTG_EP_TYPE_INTR:
1262 hc_intr_mask.b.xfercompl = 1;
1263 hc_intr_mask.b.nak = 1;
1264 hc_intr_mask.b.stall = 1;
1265 hc_intr_mask.b.xacterr = 1;
1266 hc_intr_mask.b.datatglerr = 1;
1267 hc_intr_mask.b.frmovrun = 1;
1268
1269 if (hc->ep_is_in) {
1270 hc_intr_mask.b.bblerr = 1;
1271 }
1272 if (hc->error_state) {
1273 hc_intr_mask.b.ack = 1;
1274 }
1275 if (hc->do_split) {
1276 if (hc->complete_split) {
1277 hc_intr_mask.b.nyet = 1;
1278 }
1279 else {
1280 hc_intr_mask.b.ack = 1;
1281 }
1282 }
1283 break;
1284 case DWC_OTG_EP_TYPE_ISOC:
1285 hc_intr_mask.b.xfercompl = 1;
1286 hc_intr_mask.b.frmovrun = 1;
1287 hc_intr_mask.b.ack = 1;
1288
1289 if (hc->ep_is_in) {
1290 hc_intr_mask.b.xacterr = 1;
1291 hc_intr_mask.b.bblerr = 1;
1292 }
1293 break;
1294 }
1295 }
1296 dwc_write_reg32(&hc_regs->hcintmsk, hc_intr_mask.d32);
1297
1298 // if(hc->ep_type == DWC_OTG_EP_TYPE_BULK && !hc->ep_is_in)
1299 // hc->max_packet = 512;
1300 /* Enable the top level host channel interrupt. */
1301 intr_enable = (1 << hc_num);
1302 dwc_modify_reg32(&host_if->host_global_regs->haintmsk, 0, intr_enable);
1303
1304 /* Make sure host channel interrupts are enabled. */
1305 gintmsk.b.hcintr = 1;
1306 dwc_modify_reg32(&core_if->core_global_regs->gintmsk, 0, gintmsk.d32);
1307
1308 /*
1309 * Program the HCCHARn register with the endpoint characteristics for
1310 * the current transfer.
1311 */
1312 hcchar.d32 = 0;
1313 hcchar.b.devaddr = hc->dev_addr;
1314 hcchar.b.epnum = hc->ep_num;
1315 hcchar.b.epdir = hc->ep_is_in;
1316 hcchar.b.lspddev = (hc->speed == DWC_OTG_EP_SPEED_LOW);
1317 hcchar.b.eptype = hc->ep_type;
1318 hcchar.b.mps = hc->max_packet;
1319
1320 dwc_write_reg32(&host_if->hc_regs[hc_num]->hcchar, hcchar.d32);
1321
1322 DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num);
1323 DWC_DEBUGPL(DBG_HCDV, " Dev Addr: %d\n", hcchar.b.devaddr);
1324 DWC_DEBUGPL(DBG_HCDV, " Ep Num: %d\n", hcchar.b.epnum);
1325 DWC_DEBUGPL(DBG_HCDV, " Is In: %d\n", hcchar.b.epdir);
1326 DWC_DEBUGPL(DBG_HCDV, " Is Low Speed: %d\n", hcchar.b.lspddev);
1327 DWC_DEBUGPL(DBG_HCDV, " Ep Type: %d\n", hcchar.b.eptype);
1328 DWC_DEBUGPL(DBG_HCDV, " Max Pkt: %d\n", hcchar.b.mps);
1329 DWC_DEBUGPL(DBG_HCDV, " Multi Cnt: %d\n", hcchar.b.multicnt);
1330
1331 /*
1332 * Program the HCSPLIT register for SPLITs
1333 */
1334 hcsplt.d32 = 0;
1335 if (hc->do_split) {
1336 DWC_DEBUGPL(DBG_HCDV, "Programming HC %d with split --> %s\n", hc->hc_num,
1337 hc->complete_split ? "CSPLIT" : "SSPLIT");
1338 hcsplt.b.compsplt = hc->complete_split;
1339 hcsplt.b.xactpos = hc->xact_pos;
1340 hcsplt.b.hubaddr = hc->hub_addr;
1341 hcsplt.b.prtaddr = hc->port_addr;
1342 DWC_DEBUGPL(DBG_HCDV, " comp split %d\n", hc->complete_split);
1343 DWC_DEBUGPL(DBG_HCDV, " xact pos %d\n", hc->xact_pos);
1344 DWC_DEBUGPL(DBG_HCDV, " hub addr %d\n", hc->hub_addr);
1345 DWC_DEBUGPL(DBG_HCDV, " port addr %d\n", hc->port_addr);
1346 DWC_DEBUGPL(DBG_HCDV, " is_in %d\n", hc->ep_is_in);
1347 DWC_DEBUGPL(DBG_HCDV, " Max Pkt: %d\n", hcchar.b.mps);
1348 DWC_DEBUGPL(DBG_HCDV, " xferlen: %d\n", hc->xfer_len);
1349 }
1350 dwc_write_reg32(&host_if->hc_regs[hc_num]->hcsplt, hcsplt.d32);
1351
1352 }
1353
1354 /**
1355 * Attempts to halt a host channel. This function should only be called in
1356 * Slave mode or to abort a transfer in either Slave mode or DMA mode. Under
1357 * normal circumstances in DMA mode, the controller halts the channel when the
1358 * transfer is complete or a condition occurs that requires application
1359 * intervention.
1360 *
1361 * In slave mode, checks for a free request queue entry, then sets the Channel
1362 * Enable and Channel Disable bits of the Host Channel Characteristics
1363 * register of the specified channel to intiate the halt. If there is no free
1364 * request queue entry, sets only the Channel Disable bit of the HCCHARn
1365 * register to flush requests for this channel. In the latter case, sets a
1366 * flag to indicate that the host channel needs to be halted when a request
1367 * queue slot is open.
1368 *
1369 * In DMA mode, always sets the Channel Enable and Channel Disable bits of the
1370 * HCCHARn register. The controller ensures there is space in the request
1371 * queue before submitting the halt request.
1372 *
1373 * Some time may elapse before the core flushes any posted requests for this
1374 * host channel and halts. The Channel Halted interrupt handler completes the
1375 * deactivation of the host channel.
1376 *
1377 * @param core_if Controller register interface.
1378 * @param hc Host channel to halt.
1379 * @param halt_status Reason for halting the channel.
1380 */
1381 void dwc_otg_hc_halt(dwc_otg_hcd_t *hcd,
1382 dwc_hc_t *hc,
1383 dwc_otg_halt_status_e halt_status)
1384 {
1385 gnptxsts_data_t nptxsts;
1386 hptxsts_data_t hptxsts;
1387 hcchar_data_t hcchar;
1388 dwc_otg_hc_regs_t *hc_regs;
1389 dwc_otg_core_global_regs_t *global_regs;
1390 dwc_otg_host_global_regs_t *host_global_regs;
1391 dwc_otg_core_if_t *core_if = hcd->core_if;
1392
1393 hc_regs = core_if->host_if->hc_regs[hc->hc_num];
1394 global_regs = core_if->core_global_regs;
1395 host_global_regs = core_if->host_if->host_global_regs;
1396
1397 WARN_ON(halt_status == DWC_OTG_HC_XFER_NO_HALT_STATUS);
1398
1399 if (halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE ||
1400 halt_status == DWC_OTG_HC_XFER_AHB_ERR) {
1401 /*
1402 * Disable all channel interrupts except Ch Halted. The QTD
1403 * and QH state associated with this transfer has been cleared
1404 * (in the case of URB_DEQUEUE), so the channel needs to be
1405 * shut down carefully to prevent crashes.
1406 */
1407 hcintmsk_data_t hcintmsk;
1408 hcintmsk.d32 = 0;
1409 hcintmsk.b.chhltd = 1;
1410 dwc_write_reg32(&hc_regs->hcintmsk, hcintmsk.d32);
1411
1412 /*
1413 * Make sure no other interrupts besides halt are currently
1414 * pending. Handling another interrupt could cause a crash due
1415 * to the QTD and QH state.
1416 */
1417 dwc_write_reg32(&hc_regs->hcint, ~hcintmsk.d32);
1418
1419 /*
1420 * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR
1421 * even if the channel was already halted for some other
1422 * reason.
1423 */
1424 hc->halt_status = halt_status;
1425
1426 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1427 if (hcchar.b.chen == 0) {
1428 /*
1429 * The channel is either already halted or it hasn't
1430 * started yet. In DMA mode, the transfer may halt if
1431 * it finishes normally or a condition occurs that
1432 * requires driver intervention. Don't want to halt
1433 * the channel again. In either Slave or DMA mode,
1434 * it's possible that the transfer has been assigned
1435 * to a channel, but not started yet when an URB is
1436 * dequeued. Don't want to halt a channel that hasn't
1437 * started yet.
1438 */
1439 return;
1440 }
1441 }
1442
1443 if (hc->halt_pending) {
1444 /*
1445 * A halt has already been issued for this channel. This might
1446 * happen when a transfer is aborted by a higher level in
1447 * the stack.
1448 */
1449 #ifdef DEBUG
1450 DWC_PRINT("*** %s: Channel %d, _hc->halt_pending already set ***\n",
1451 __func__, hc->hc_num);
1452
1453 /* dwc_otg_dump_global_registers(core_if); */
1454 /* dwc_otg_dump_host_registers(core_if); */
1455 #endif
1456 return;
1457 }
1458
1459 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1460 hcchar.b.chen = 1;
1461 hcchar.b.chdis = 1;
1462
1463 if (!core_if->dma_enable) {
1464 /* Check for space in the request queue to issue the halt. */
1465 if (hc->ep_type == DWC_OTG_EP_TYPE_CONTROL ||
1466 hc->ep_type == DWC_OTG_EP_TYPE_BULK) {
1467 nptxsts.d32 = dwc_read_reg32(&global_regs->gnptxsts);
1468 if (nptxsts.b.nptxqspcavail == 0) {
1469 hcchar.b.chen = 0;
1470 }
1471 }
1472 else {
1473 hptxsts.d32 = dwc_read_reg32(&host_global_regs->hptxsts);
1474 if ((hptxsts.b.ptxqspcavail == 0) || (core_if->queuing_high_bandwidth)) {
1475 hcchar.b.chen = 0;
1476 }
1477 }
1478 }
1479
1480 dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
1481
1482 hc->halt_status = halt_status;
1483
1484 if (!hc->halt_on_queue && !hc->halt_pending && hc->qh->nak_frame != 0xffff)
1485 hcd->nakking_channels--;
1486
1487 if (hcchar.b.chen) {
1488 hc->halt_pending = 1;
1489 hc->halt_on_queue = 0;
1490 }
1491 else {
1492 hc->halt_on_queue = 1;
1493 }
1494
1495 DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num);
1496 DWC_DEBUGPL(DBG_HCDV, " hcchar: 0x%08x\n", hcchar.d32);
1497 DWC_DEBUGPL(DBG_HCDV, " halt_pending: %d\n", hc->halt_pending);
1498 DWC_DEBUGPL(DBG_HCDV, " halt_on_queue: %d\n", hc->halt_on_queue);
1499 DWC_DEBUGPL(DBG_HCDV, " halt_status: %d\n", hc->halt_status);
1500
1501 return;
1502 }
1503
1504 /**
1505 * Clears the transfer state for a host channel. This function is normally
1506 * called after a transfer is done and the host channel is being released.
1507 *
1508 * @param core_if Programming view of DWC_otg controller.
1509 * @param hc Identifies the host channel to clean up.
1510 */
1511 void dwc_otg_hc_cleanup(dwc_otg_core_if_t *core_if, dwc_hc_t *hc)
1512 {
1513 dwc_otg_hc_regs_t *hc_regs;
1514
1515 hc->xfer_started = 0;
1516
1517 /*
1518 * Clear channel interrupt enables and any unhandled channel interrupt
1519 * conditions.
1520 */
1521 hc_regs = core_if->host_if->hc_regs[hc->hc_num];
1522 dwc_write_reg32(&hc_regs->hcintmsk, 0);
1523 dwc_write_reg32(&hc_regs->hcint, 0xFFFFFFFF);
1524
1525 #ifdef DEBUG
1526 del_timer(&core_if->hc_xfer_timer[hc->hc_num]);
1527 {
1528 hcchar_data_t hcchar;
1529 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1530 if (hcchar.b.chdis) {
1531 DWC_WARN("%s: chdis set, channel %d, hcchar 0x%08x\n",
1532 __func__, hc->hc_num, hcchar.d32);
1533 }
1534 }
1535 #endif
1536 }
1537
1538 /**
1539 * Sets the channel property that indicates in which frame a periodic transfer
1540 * should occur. This is always set to the _next_ frame. This function has no
1541 * effect on non-periodic transfers.
1542 *
1543 * @param core_if Programming view of DWC_otg controller.
1544 * @param hc Identifies the host channel to set up and its properties.
1545 * @param hcchar Current value of the HCCHAR register for the specified host
1546 * channel.
1547 */
1548 static inline void hc_set_even_odd_frame(dwc_otg_core_if_t *core_if,
1549 dwc_hc_t *hc,
1550 hcchar_data_t *hcchar)
1551 {
1552 if (hc->ep_type == DWC_OTG_EP_TYPE_INTR ||
1553 hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
1554 hfnum_data_t hfnum;
1555 hfnum.d32 = dwc_read_reg32(&core_if->host_if->host_global_regs->hfnum);
1556
1557 /* 1 if _next_ frame is odd, 0 if it's even */
1558 hcchar->b.oddfrm = (hfnum.b.frnum & 0x1) ? 0 : 1;
1559 #ifdef DEBUG
1560 if (hc->ep_type == DWC_OTG_EP_TYPE_INTR && hc->do_split && !hc->complete_split) {
1561 switch (hfnum.b.frnum & 0x7) {
1562 case 7:
1563 core_if->hfnum_7_samples++;
1564 core_if->hfnum_7_frrem_accum += hfnum.b.frrem;
1565 break;
1566 case 0:
1567 core_if->hfnum_0_samples++;
1568 core_if->hfnum_0_frrem_accum += hfnum.b.frrem;
1569 break;
1570 default:
1571 core_if->hfnum_other_samples++;
1572 core_if->hfnum_other_frrem_accum += hfnum.b.frrem;
1573 break;
1574 }
1575 }
1576 #endif
1577 }
1578 }
1579
1580 #ifdef DEBUG
1581 static void hc_xfer_timeout(unsigned long ptr)
1582 {
1583 hc_xfer_info_t *xfer_info = (hc_xfer_info_t *)ptr;
1584 int hc_num = xfer_info->hc->hc_num;
1585 DWC_WARN("%s: timeout on channel %d\n", __func__, hc_num);
1586 DWC_WARN(" start_hcchar_val 0x%08x\n", xfer_info->core_if->start_hcchar_val[hc_num]);
1587 }
1588 #endif
1589
1590 /*
1591 * This function does the setup for a data transfer for a host channel and
1592 * starts the transfer. May be called in either Slave mode or DMA mode. In
1593 * Slave mode, the caller must ensure that there is sufficient space in the
1594 * request queue and Tx Data FIFO.
1595 *
1596 * For an OUT transfer in Slave mode, it loads a data packet into the
1597 * appropriate FIFO. If necessary, additional data packets will be loaded in
1598 * the Host ISR.
1599 *
1600 * For an IN transfer in Slave mode, a data packet is requested. The data
1601 * packets are unloaded from the Rx FIFO in the Host ISR. If necessary,
1602 * additional data packets are requested in the Host ISR.
1603 *
1604 * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ
1605 * register along with a packet count of 1 and the channel is enabled. This
1606 * causes a single PING transaction to occur. Other fields in HCTSIZ are
1607 * simply set to 0 since no data transfer occurs in this case.
1608 *
1609 * For a PING transfer in DMA mode, the HCTSIZ register is initialized with
1610 * all the information required to perform the subsequent data transfer. In
1611 * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the
1612 * controller performs the entire PING protocol, then starts the data
1613 * transfer.
1614 *
1615 * @param core_if Programming view of DWC_otg controller.
1616 * @param hc Information needed to initialize the host channel. The xfer_len
1617 * value may be reduced to accommodate the max widths of the XferSize and
1618 * PktCnt fields in the HCTSIZn register. The multi_count value may be changed
1619 * to reflect the final xfer_len value.
1620 */
1621 void dwc_otg_hc_start_transfer(dwc_otg_core_if_t *core_if, dwc_hc_t *hc)
1622 {
1623 hcchar_data_t hcchar;
1624 hctsiz_data_t hctsiz;
1625 uint16_t num_packets;
1626 uint32_t max_hc_xfer_size = core_if->core_params->max_transfer_size;
1627 uint16_t max_hc_pkt_count = core_if->core_params->max_packet_count;
1628 dwc_otg_hc_regs_t *hc_regs = core_if->host_if->hc_regs[hc->hc_num];
1629
1630 hctsiz.d32 = 0;
1631
1632 if (hc->do_ping) {
1633 if (!core_if->dma_enable) {
1634 dwc_otg_hc_do_ping(core_if, hc);
1635 hc->xfer_started = 1;
1636 return;
1637 }
1638 else {
1639 hctsiz.b.dopng = 1;
1640 }
1641 }
1642
1643 if (hc->do_split) {
1644 num_packets = 1;
1645
1646 if (hc->complete_split && !hc->ep_is_in) {
1647 /* For CSPLIT OUT Transfer, set the size to 0 so the
1648 * core doesn't expect any data written to the FIFO */
1649 hc->xfer_len = 0;
1650 }
1651 else if (hc->ep_is_in || (hc->xfer_len > hc->max_packet)) {
1652 hc->xfer_len = hc->max_packet;
1653 }
1654 else if (!hc->ep_is_in && (hc->xfer_len > 188)) {
1655 hc->xfer_len = 188;
1656 }
1657
1658 hctsiz.b.xfersize = hc->xfer_len;
1659 }
1660 else {
1661 /*
1662 * Ensure that the transfer length and packet count will fit
1663 * in the widths allocated for them in the HCTSIZn register.
1664 */
1665 if (hc->ep_type == DWC_OTG_EP_TYPE_INTR ||
1666 hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
1667 /*
1668 * Make sure the transfer size is no larger than one
1669 * (micro)frame's worth of data. (A check was done
1670 * when the periodic transfer was accepted to ensure
1671 * that a (micro)frame's worth of data can be
1672 * programmed into a channel.)
1673 */
1674 uint32_t max_periodic_len = hc->multi_count * hc->max_packet;
1675 if (hc->xfer_len > max_periodic_len) {
1676 hc->xfer_len = max_periodic_len;
1677 }
1678 else {
1679 }
1680 }
1681 else if (hc->xfer_len > max_hc_xfer_size) {
1682 /* Make sure that xfer_len is a multiple of max packet size. */
1683 hc->xfer_len = max_hc_xfer_size - hc->max_packet + 1;
1684 }
1685
1686 if (hc->xfer_len > 0) {
1687 num_packets = (hc->xfer_len + hc->max_packet - 1) / hc->max_packet;
1688 if (num_packets > max_hc_pkt_count) {
1689 num_packets = max_hc_pkt_count;
1690 hc->xfer_len = num_packets * hc->max_packet;
1691 }
1692 }
1693 else {
1694 /* Need 1 packet for transfer length of 0. */
1695 num_packets = 1;
1696 }
1697
1698 #if 0
1699 //host testusb item 10, would do series of Control transfer
1700 //with URB_SHORT_NOT_OK set in transfer_flags ,
1701 //changing the xfer_len would cause the test fail
1702 if (hc->ep_is_in) {
1703 /* Always program an integral # of max packets for IN transfers. */
1704 hc->xfer_len = num_packets * hc->max_packet;
1705 }
1706 #endif
1707
1708 if (hc->ep_type == DWC_OTG_EP_TYPE_INTR ||
1709 hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
1710 /*
1711 * Make sure that the multi_count field matches the
1712 * actual transfer length.
1713 */
1714 hc->multi_count = num_packets;
1715 }
1716
1717 if (hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
1718 /* Set up the initial PID for the transfer. */
1719 if (hc->speed == DWC_OTG_EP_SPEED_HIGH) {
1720 if (hc->ep_is_in) {
1721 if (hc->multi_count == 1) {
1722 hc->data_pid_start = DWC_OTG_HC_PID_DATA0;
1723 }
1724 else if (hc->multi_count == 2) {
1725 hc->data_pid_start = DWC_OTG_HC_PID_DATA1;
1726 }
1727 else {
1728 hc->data_pid_start = DWC_OTG_HC_PID_DATA2;
1729 }
1730 }
1731 else {
1732 if (hc->multi_count == 1) {
1733 hc->data_pid_start = DWC_OTG_HC_PID_DATA0;
1734 }
1735 else {
1736 hc->data_pid_start = DWC_OTG_HC_PID_MDATA;
1737 }
1738 }
1739 }
1740 else {
1741 hc->data_pid_start = DWC_OTG_HC_PID_DATA0;
1742 }
1743 }
1744
1745 hctsiz.b.xfersize = hc->xfer_len;
1746 }
1747
1748 hc->start_pkt_count = num_packets;
1749 hctsiz.b.pktcnt = num_packets;
1750 hctsiz.b.pid = hc->data_pid_start;
1751 dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32);
1752
1753 DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num);
1754 DWC_DEBUGPL(DBG_HCDV, " Xfer Size: %d\n", hctsiz.b.xfersize);
1755 DWC_DEBUGPL(DBG_HCDV, " Num Pkts: %d\n", hctsiz.b.pktcnt);
1756 DWC_DEBUGPL(DBG_HCDV, " Start PID: %d\n", hctsiz.b.pid);
1757
1758 if (core_if->dma_enable) {
1759 dwc_write_reg32(&hc_regs->hcdma, (uint32_t)hc->xfer_buff);
1760 }
1761
1762 /* Start the split */
1763 if (hc->do_split) {
1764 hcsplt_data_t hcsplt;
1765 hcsplt.d32 = dwc_read_reg32 (&hc_regs->hcsplt);
1766 hcsplt.b.spltena = 1;
1767 dwc_write_reg32(&hc_regs->hcsplt, hcsplt.d32);
1768 }
1769
1770 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1771 hcchar.b.multicnt = hc->multi_count;
1772 hc_set_even_odd_frame(core_if, hc, &hcchar);
1773 #ifdef DEBUG
1774 core_if->start_hcchar_val[hc->hc_num] = hcchar.d32;
1775 if (hcchar.b.chdis) {
1776 DWC_WARN("%s: chdis set, channel %d, hcchar 0x%08x\n",
1777 __func__, hc->hc_num, hcchar.d32);
1778 }
1779 #endif
1780
1781 /* Set host channel enable after all other setup is complete. */
1782 hcchar.b.chen = 1;
1783 hcchar.b.chdis = 0;
1784
1785 /* Memory Barrier before enabling channel ensure the channel is setup correct */
1786 mb();
1787
1788 dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
1789
1790 hc->xfer_started = 1;
1791 hc->requests++;
1792
1793 if (!core_if->dma_enable &&
1794 !hc->ep_is_in && hc->xfer_len > 0) {
1795 /* Load OUT packet into the appropriate Tx FIFO. */
1796 dwc_otg_hc_write_packet(core_if, hc);
1797 }
1798
1799 #ifdef DEBUG
1800 /* Start a timer for this transfer */
1801 core_if->hc_xfer_timer[hc->hc_num].function = hc_xfer_timeout;
1802 core_if->hc_xfer_info[hc->hc_num].core_if = core_if;
1803 core_if->hc_xfer_info[hc->hc_num].hc = hc;
1804 core_if->hc_xfer_timer[hc->hc_num].data = (unsigned long)(&core_if->hc_xfer_info[hc->hc_num]);
1805 core_if->hc_xfer_timer[hc->hc_num].expires = jiffies + (HZ*10);
1806 add_timer(&core_if->hc_xfer_timer[hc->hc_num]);
1807 #endif
1808 }
1809
1810 /**
1811 * This function continues a data transfer that was started by previous call
1812 * to <code>dwc_otg_hc_start_transfer</code>. The caller must ensure there is
1813 * sufficient space in the request queue and Tx Data FIFO. This function
1814 * should only be called in Slave mode. In DMA mode, the controller acts
1815 * autonomously to complete transfers programmed to a host channel.
1816 *
1817 * For an OUT transfer, a new data packet is loaded into the appropriate FIFO
1818 * if there is any data remaining to be queued. For an IN transfer, another
1819 * data packet is always requested. For the SETUP phase of a control transfer,
1820 * this function does nothing.
1821 *
1822 * @return 1 if a new request is queued, 0 if no more requests are required
1823 * for this transfer.
1824 */
1825 int dwc_otg_hc_continue_transfer(dwc_otg_core_if_t *core_if, dwc_hc_t *hc)
1826 {
1827 DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num);
1828
1829 if (hc->do_split) {
1830 /* SPLITs always queue just once per channel */
1831 return 0;
1832 }
1833 else if (hc->data_pid_start == DWC_OTG_HC_PID_SETUP) {
1834 /* SETUPs are queued only once since they can't be NAKed. */
1835 return 0;
1836 }
1837 else if (hc->ep_is_in) {
1838 /*
1839 * Always queue another request for other IN transfers. If
1840 * back-to-back INs are issued and NAKs are received for both,
1841 * the driver may still be processing the first NAK when the
1842 * second NAK is received. When the interrupt handler clears
1843 * the NAK interrupt for the first NAK, the second NAK will
1844 * not be seen. So we can't depend on the NAK interrupt
1845 * handler to requeue a NAKed request. Instead, IN requests
1846 * are issued each time this function is called. When the
1847 * transfer completes, the extra requests for the channel will
1848 * be flushed.
1849 */
1850 hcchar_data_t hcchar;
1851 dwc_otg_hc_regs_t *hc_regs = core_if->host_if->hc_regs[hc->hc_num];
1852
1853 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1854 hc_set_even_odd_frame(core_if, hc, &hcchar);
1855 hcchar.b.chen = 1;
1856 hcchar.b.chdis = 0;
1857 DWC_DEBUGPL(DBG_HCDV, " IN xfer: hcchar = 0x%08x\n", hcchar.d32);
1858
1859 /* Memory Barrier before enabling channel ensure the channel is setup correct */
1860 mb();
1861
1862 dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
1863 hc->requests++;
1864 return 1;
1865 }
1866 else {
1867 /* OUT transfers. */
1868 if (hc->xfer_count < hc->xfer_len) {
1869 if (hc->ep_type == DWC_OTG_EP_TYPE_INTR ||
1870 hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
1871 hcchar_data_t hcchar;
1872 dwc_otg_hc_regs_t *hc_regs;
1873 hc_regs = core_if->host_if->hc_regs[hc->hc_num];
1874 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1875 hc_set_even_odd_frame(core_if, hc, &hcchar);
1876 }
1877
1878 /* Load OUT packet into the appropriate Tx FIFO. */
1879 dwc_otg_hc_write_packet(core_if, hc);
1880 hc->requests++;
1881 return 1;
1882 }
1883 else {
1884 return 0;
1885 }
1886 }
1887 }
1888
1889 /**
1890 * Starts a PING transfer. This function should only be called in Slave mode.
1891 * The Do Ping bit is set in the HCTSIZ register, then the channel is enabled.
1892 */
1893 void dwc_otg_hc_do_ping(dwc_otg_core_if_t *core_if, dwc_hc_t *hc)
1894 {
1895 hcchar_data_t hcchar;
1896 hctsiz_data_t hctsiz;
1897 dwc_otg_hc_regs_t *hc_regs = core_if->host_if->hc_regs[hc->hc_num];
1898
1899 DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num);
1900
1901 hctsiz.d32 = 0;
1902 hctsiz.b.dopng = 1;
1903 hctsiz.b.pktcnt = 1;
1904 dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32);
1905
1906 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1907 hcchar.b.chen = 1;
1908 hcchar.b.chdis = 0;
1909
1910 /* Memory Barrier before enabling channel ensure the channel is setup correct */
1911 mb();
1912
1913 dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
1914 }
1915
1916 /*
1917 * This function writes a packet into the Tx FIFO associated with the Host
1918 * Channel. For a channel associated with a non-periodic EP, the non-periodic
1919 * Tx FIFO is written. For a channel associated with a periodic EP, the
1920 * periodic Tx FIFO is written. This function should only be called in Slave
1921 * mode.
1922 *
1923 * Upon return the xfer_buff and xfer_count fields in _hc are incremented by
1924 * then number of bytes written to the Tx FIFO.
1925 */
1926 void dwc_otg_hc_write_packet(dwc_otg_core_if_t *core_if, dwc_hc_t *hc)
1927 {
1928 uint32_t i;
1929 uint32_t remaining_count;
1930 uint32_t byte_count;
1931 uint32_t dword_count;
1932
1933 uint32_t *data_buff = (uint32_t *)(hc->xfer_buff);
1934 uint32_t *data_fifo = core_if->data_fifo[hc->hc_num];
1935
1936 remaining_count = hc->xfer_len - hc->xfer_count;
1937 if (remaining_count > hc->max_packet) {
1938 byte_count = hc->max_packet;
1939 }
1940 else {
1941 byte_count = remaining_count;
1942 }
1943
1944 dword_count = (byte_count + 3) / 4;
1945
1946 if ((((unsigned long)data_buff) & 0x3) == 0) {
1947 /* xfer_buff is DWORD aligned. */
1948 for (i = 0; i < dword_count; i++, data_buff++)
1949 {
1950 dwc_write_reg32(data_fifo, *data_buff);
1951 }
1952 }
1953 else {
1954 /* xfer_buff is not DWORD aligned. */
1955 for (i = 0; i < dword_count; i++, data_buff++)
1956 {
1957 dwc_write_reg32(data_fifo, get_unaligned(data_buff));
1958 }
1959 }
1960
1961 hc->xfer_count += byte_count;
1962 hc->xfer_buff += byte_count;
1963 }
1964
1965 /**
1966 * Gets the current USB frame number. This is the frame number from the last
1967 * SOF packet.
1968 */
1969 uint32_t dwc_otg_get_frame_number(dwc_otg_core_if_t *core_if)
1970 {
1971 dsts_data_t dsts;
1972 dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts);
1973
1974 /* read current frame/microframe number from DSTS register */
1975 return dsts.b.soffn;
1976 }
1977
1978 /**
1979 * This function reads a setup packet from the Rx FIFO into the destination
1980 * buffer. This function is called from the Rx Status Queue Level (RxStsQLvl)
1981 * Interrupt routine when a SETUP packet has been received in Slave mode.
1982 *
1983 * @param core_if Programming view of DWC_otg controller.
1984 * @param dest Destination buffer for packet data.
1985 */
1986 void dwc_otg_read_setup_packet(dwc_otg_core_if_t *core_if, uint32_t *dest)
1987 {
1988 /* Get the 8 bytes of a setup transaction data */
1989
1990 /* Pop 2 DWORDS off the receive data FIFO into memory */
1991 dest[0] = dwc_read_reg32(core_if->data_fifo[0]);
1992 dest[1] = dwc_read_reg32(core_if->data_fifo[0]);
1993 }
1994
1995
1996 /**
1997 * This function enables EP0 OUT to receive SETUP packets and configures EP0
1998 * IN for transmitting packets. It is normally called when the
1999 * "Enumeration Done" interrupt occurs.
2000 *
2001 * @param core_if Programming view of DWC_otg controller.
2002 * @param ep The EP0 data.
2003 */
2004 void dwc_otg_ep0_activate(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
2005 {
2006 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
2007 dsts_data_t dsts;
2008 depctl_data_t diepctl;
2009 depctl_data_t doepctl;
2010 dctl_data_t dctl = { .d32 = 0 };
2011
2012 /* Read the Device Status and Endpoint 0 Control registers */
2013 dsts.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dsts);
2014 diepctl.d32 = dwc_read_reg32(&dev_if->in_ep_regs[0]->diepctl);
2015 doepctl.d32 = dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl);
2016
2017 /* Set the MPS of the IN EP based on the enumeration speed */
2018 switch (dsts.b.enumspd) {
2019 case DWC_DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ:
2020 case DWC_DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ:
2021 case DWC_DSTS_ENUMSPD_FS_PHY_48MHZ:
2022 diepctl.b.mps = DWC_DEP0CTL_MPS_64;
2023 break;
2024 case DWC_DSTS_ENUMSPD_LS_PHY_6MHZ:
2025 diepctl.b.mps = DWC_DEP0CTL_MPS_8;
2026 break;
2027 }
2028
2029 dwc_write_reg32(&dev_if->in_ep_regs[0]->diepctl, diepctl.d32);
2030
2031 /* Enable OUT EP for receive */
2032 doepctl.b.epena = 1;
2033 dwc_write_reg32(&dev_if->out_ep_regs[0]->doepctl, doepctl.d32);
2034
2035 #ifdef VERBOSE
2036 DWC_DEBUGPL(DBG_PCDV,"doepctl0=%0x\n",
2037 dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl));
2038 DWC_DEBUGPL(DBG_PCDV,"diepctl0=%0x\n",
2039 dwc_read_reg32(&dev_if->in_ep_regs[0]->diepctl));
2040 #endif
2041 dctl.b.cgnpinnak = 1;
2042
2043 dwc_modify_reg32(&dev_if->dev_global_regs->dctl, dctl.d32, dctl.d32);
2044 DWC_DEBUGPL(DBG_PCDV,"dctl=%0x\n",
2045 dwc_read_reg32(&dev_if->dev_global_regs->dctl));
2046 }
2047
2048 /**
2049 * This function activates an EP. The Device EP control register for
2050 * the EP is configured as defined in the ep structure. Note: This
2051 * function is not used for EP0.
2052 *
2053 * @param core_if Programming view of DWC_otg controller.
2054 * @param ep The EP to activate.
2055 */
2056 void dwc_otg_ep_activate(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
2057 {
2058 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
2059 depctl_data_t depctl;
2060 volatile uint32_t *addr;
2061 daint_data_t daintmsk = { .d32 = 0 };
2062
2063 DWC_DEBUGPL(DBG_PCDV, "%s() EP%d-%s\n", __func__, ep->num,
2064 (ep->is_in?"IN":"OUT"));
2065
2066 /* Read DEPCTLn register */
2067 if (ep->is_in == 1) {
2068 addr = &dev_if->in_ep_regs[ep->num]->diepctl;
2069 daintmsk.ep.in = 1<<ep->num;
2070 }
2071 else {
2072 addr = &dev_if->out_ep_regs[ep->num]->doepctl;
2073 daintmsk.ep.out = 1<<ep->num;
2074 }
2075
2076 /* If the EP is already active don't change the EP Control
2077 * register. */
2078 depctl.d32 = dwc_read_reg32(addr);
2079 if (!depctl.b.usbactep) {
2080 depctl.b.mps = ep->maxpacket;
2081 depctl.b.eptype = ep->type;
2082 depctl.b.txfnum = ep->tx_fifo_num;
2083
2084 if (ep->type == DWC_OTG_EP_TYPE_ISOC) {
2085 depctl.b.setd0pid = 1; // ???
2086 }
2087 else {
2088 depctl.b.setd0pid = 1;
2089 }
2090 depctl.b.usbactep = 1;
2091
2092 dwc_write_reg32(addr, depctl.d32);
2093 DWC_DEBUGPL(DBG_PCDV,"DEPCTL(%.8x)=%08x\n",(u32)addr, dwc_read_reg32(addr));
2094 }
2095
2096 /* Enable the Interrupt for this EP */
2097 if(core_if->multiproc_int_enable) {
2098 if (ep->is_in == 1) {
2099 diepmsk_data_t diepmsk = { .d32 = 0};
2100 diepmsk.b.xfercompl = 1;
2101 diepmsk.b.timeout = 1;
2102 diepmsk.b.epdisabled = 1;
2103 diepmsk.b.ahberr = 1;
2104 diepmsk.b.intknepmis = 1;
2105 diepmsk.b.txfifoundrn = 1; //?????
2106
2107
2108 if(core_if->dma_desc_enable) {
2109 diepmsk.b.bna = 1;
2110 }
2111
2112 /*
2113 if(core_if->dma_enable) {
2114 diepmsk.b.nak = 1;
2115 }
2116 */
2117 dwc_write_reg32(&dev_if->dev_global_regs->diepeachintmsk[ep->num], diepmsk.d32);
2118
2119 } else {
2120 doepmsk_data_t doepmsk = { .d32 = 0};
2121 doepmsk.b.xfercompl = 1;
2122 doepmsk.b.ahberr = 1;
2123 doepmsk.b.epdisabled = 1;
2124
2125
2126 if(core_if->dma_desc_enable) {
2127 doepmsk.b.bna = 1;
2128 }
2129 /*
2130 doepmsk.b.babble = 1;
2131 doepmsk.b.nyet = 1;
2132 doepmsk.b.nak = 1;
2133 */
2134 dwc_write_reg32(&dev_if->dev_global_regs->doepeachintmsk[ep->num], doepmsk.d32);
2135 }
2136 dwc_modify_reg32(&dev_if->dev_global_regs->deachintmsk,
2137 0, daintmsk.d32);
2138 } else {
2139 dwc_modify_reg32(&dev_if->dev_global_regs->daintmsk,
2140 0, daintmsk.d32);
2141 }
2142
2143 DWC_DEBUGPL(DBG_PCDV,"DAINTMSK=%0x\n",
2144 dwc_read_reg32(&dev_if->dev_global_regs->daintmsk));
2145
2146 ep->stall_clear_flag = 0;
2147 return;
2148 }
2149
2150 /**
2151 * This function deactivates an EP. This is done by clearing the USB Active
2152 * EP bit in the Device EP control register. Note: This function is not used
2153 * for EP0. EP0 cannot be deactivated.
2154 *
2155 * @param core_if Programming view of DWC_otg controller.
2156 * @param ep The EP to deactivate.
2157 */
2158 void dwc_otg_ep_deactivate(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
2159 {
2160 depctl_data_t depctl = { .d32 = 0 };
2161 volatile uint32_t *addr;
2162 daint_data_t daintmsk = { .d32 = 0};
2163
2164 /* Read DEPCTLn register */
2165 if (ep->is_in == 1) {
2166 addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl;
2167 daintmsk.ep.in = 1<<ep->num;
2168 }
2169 else {
2170 addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl;
2171 daintmsk.ep.out = 1<<ep->num;
2172 }
2173
2174 //disabled ep only when ep is enabled
2175 //or got halt in the loop in test in cv9
2176 depctl.d32=dwc_read_reg32(addr);
2177 if(depctl.b.epena){
2178 if (ep->is_in == 1) {
2179 diepint_data_t diepint;
2180 dwc_otg_dev_in_ep_regs_t *in_reg=core_if->dev_if->in_ep_regs[ep->num];
2181
2182 //Set ep nak
2183 depctl.d32=dwc_read_reg32(&in_reg->diepctl);
2184 depctl.b.snak=1;
2185 dwc_write_reg32(&in_reg->diepctl,depctl.d32);
2186
2187 //wait for diepint.b.inepnakeff
2188 diepint.d32=dwc_read_reg32(&in_reg->diepint);
2189 while(!diepint.b.inepnakeff){
2190 udelay(1);
2191 diepint.d32=dwc_read_reg32(&in_reg->diepint);
2192 }
2193 diepint.d32=0;
2194 diepint.b.inepnakeff=1;
2195 dwc_write_reg32(&in_reg->diepint,diepint.d32);
2196
2197 //set ep disable and snak
2198 depctl.d32=dwc_read_reg32(&in_reg->diepctl);
2199 depctl.b.snak=1;
2200 depctl.b.epdis=1;
2201 dwc_write_reg32(&in_reg->diepctl,depctl.d32);
2202
2203 //wait for diepint.b.epdisabled
2204 diepint.d32=dwc_read_reg32(&in_reg->diepint);
2205 while(!diepint.b.epdisabled){
2206 udelay(1);
2207 diepint.d32=dwc_read_reg32(&in_reg->diepint);
2208 }
2209 diepint.d32=0;
2210 diepint.b.epdisabled=1;
2211 dwc_write_reg32(&in_reg->diepint,diepint.d32);
2212
2213 //clear ep enable and disable bit
2214 depctl.d32=dwc_read_reg32(&in_reg->diepctl);
2215 depctl.b.epena=0;
2216 depctl.b.epdis=0;
2217 dwc_write_reg32(&in_reg->diepctl,depctl.d32);
2218
2219 }
2220 #if 0
2221 //following DWC OTG DataBook v2.72a, 6.4.2.1.3 Disabling an OUT Endpoint,
2222 //but this doesn't work, the old code do.
2223 else {
2224 doepint_data_t doepint;
2225 dwc_otg_dev_out_ep_regs_t *out_reg=core_if->dev_if->out_ep_regs[ep->num];
2226 dctl_data_t dctl;
2227 gintsts_data_t gintsts;
2228
2229 //set dctl global out nak
2230 dctl.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dctl);
2231 dctl.b.sgoutnak=1;
2232 dwc_write_reg32(&core_if->dev_if->dev_global_regs->dctl,dctl.d32);
2233
2234 //wait for gintsts.goutnakeff
2235 gintsts.d32=dwc_read_reg32(&core_if->core_global_regs->gintsts);
2236 while(!gintsts.b.goutnakeff){
2237 udelay(1);
2238 gintsts.d32=dwc_read_reg32(&core_if->core_global_regs->gintsts);
2239 }
2240 gintsts.d32=0;
2241 gintsts.b.goutnakeff=1;
2242 dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32);
2243
2244 //set ep disable and snak
2245 depctl.d32=dwc_read_reg32(&out_reg->doepctl);
2246 depctl.b.snak=1;
2247 depctl.b.epdis=1;
2248 dwc_write_reg32(&out_reg->doepctl,depctl.d32);
2249
2250 //wait for diepint.b.epdisabled
2251 doepint.d32=dwc_read_reg32(&out_reg->doepint);
2252 while(!doepint.b.epdisabled){
2253 udelay(1);
2254 doepint.d32=dwc_read_reg32(&out_reg->doepint);
2255 }
2256 doepint.d32=0;
2257 doepint.b.epdisabled=1;
2258 dwc_write_reg32(&out_reg->doepint,doepint.d32);
2259
2260 //clear ep enable and disable bit
2261 depctl.d32=dwc_read_reg32(&out_reg->doepctl);
2262 depctl.b.epena=0;
2263 depctl.b.epdis=0;
2264 dwc_write_reg32(&out_reg->doepctl,depctl.d32);
2265 }
2266 #endif
2267
2268 depctl.d32=0;
2269 depctl.b.usbactep = 0;
2270
2271 if (ep->is_in == 0) {
2272 if(core_if->dma_enable||core_if->dma_desc_enable)
2273 depctl.b.epdis = 1;
2274 }
2275
2276 dwc_write_reg32(addr, depctl.d32);
2277 }
2278
2279 /* Disable the Interrupt for this EP */
2280 if(core_if->multiproc_int_enable) {
2281 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->deachintmsk,
2282 daintmsk.d32, 0);
2283
2284 if (ep->is_in == 1) {
2285 dwc_write_reg32(&core_if->dev_if->dev_global_regs->diepeachintmsk[ep->num], 0);
2286 } else {
2287 dwc_write_reg32(&core_if->dev_if->dev_global_regs->doepeachintmsk[ep->num], 0);
2288 }
2289 } else {
2290 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->daintmsk,
2291 daintmsk.d32, 0);
2292 }
2293
2294 if (ep->is_in == 1) {
2295 DWC_DEBUGPL(DBG_PCD, "DIEPCTL(%.8x)=%08x DIEPTSIZ=%08x, DIEPINT=%.8x, DIEPDMA=%.8x, DTXFSTS=%.8x\n",
2296 (u32)&core_if->dev_if->in_ep_regs[ep->num]->diepctl,
2297 dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->diepctl),
2298 dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dieptsiz),
2299 dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->diepint),
2300 dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->diepdma),
2301 dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dtxfsts));
2302 DWC_DEBUGPL(DBG_PCD, "DAINTMSK=%08x GINTMSK=%08x\n",
2303 dwc_read_reg32(&core_if->dev_if->dev_global_regs->daintmsk),
2304 dwc_read_reg32(&core_if->core_global_regs->gintmsk));
2305 }
2306 else {
2307 DWC_DEBUGPL(DBG_PCD, "DOEPCTL(%.8x)=%08x DOEPTSIZ=%08x, DOEPINT=%.8x, DOEPDMA=%.8x\n",
2308 (u32)&core_if->dev_if->out_ep_regs[ep->num]->doepctl,
2309 dwc_read_reg32(&core_if->dev_if->out_ep_regs[ep->num]->doepctl),
2310 dwc_read_reg32(&core_if->dev_if->out_ep_regs[ep->num]->doeptsiz),
2311 dwc_read_reg32(&core_if->dev_if->out_ep_regs[ep->num]->doepint),
2312 dwc_read_reg32(&core_if->dev_if->out_ep_regs[ep->num]->doepdma));
2313
2314 DWC_DEBUGPL(DBG_PCD, "DAINTMSK=%08x GINTMSK=%08x\n",
2315 dwc_read_reg32(&core_if->dev_if->dev_global_regs->daintmsk),
2316 dwc_read_reg32(&core_if->core_global_regs->gintmsk));
2317 }
2318
2319 }
2320
2321 /**
2322 * This function does the setup for a data transfer for an EP and
2323 * starts the transfer. For an IN transfer, the packets will be
2324 * loaded into the appropriate Tx FIFO in the ISR. For OUT transfers,
2325 * the packets are unloaded from the Rx FIFO in the ISR. the ISR.
2326 *
2327 * @param core_if Programming view of DWC_otg controller.
2328 * @param ep The EP to start the transfer on.
2329 */
2330 static void init_dma_desc_chain(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
2331 {
2332 dwc_otg_dma_desc_t* dma_desc;
2333 uint32_t offset;
2334 uint32_t xfer_est;
2335 int i;
2336
2337 ep->desc_cnt = ( ep->total_len / ep->maxxfer) +
2338 ((ep->total_len % ep->maxxfer) ? 1 : 0);
2339 if(!ep->desc_cnt)
2340 ep->desc_cnt = 1;
2341
2342 dma_desc = ep->desc_addr;
2343 xfer_est = ep->total_len;
2344 offset = 0;
2345 for( i = 0; i < ep->desc_cnt; ++i) {
2346 /** DMA Descriptor Setup */
2347 if(xfer_est > ep->maxxfer) {
2348 dma_desc->status.b.bs = BS_HOST_BUSY;
2349 dma_desc->status.b.l = 0;
2350 dma_desc->status.b.ioc = 0;
2351 dma_desc->status.b.sp = 0;
2352 dma_desc->status.b.bytes = ep->maxxfer;
2353 dma_desc->buf = ep->dma_addr + offset;
2354 dma_desc->status.b.bs = BS_HOST_READY;
2355
2356 xfer_est -= ep->maxxfer;
2357 offset += ep->maxxfer;
2358 } else {
2359 dma_desc->status.b.bs = BS_HOST_BUSY;
2360 dma_desc->status.b.l = 1;
2361 dma_desc->status.b.ioc = 1;
2362 if(ep->is_in) {
2363 dma_desc->status.b.sp = (xfer_est % ep->maxpacket) ?
2364 1 : ((ep->sent_zlp) ? 1 : 0);
2365 dma_desc->status.b.bytes = xfer_est;
2366 } else {
2367 dma_desc->status.b.bytes = xfer_est + ((4 - (xfer_est & 0x3)) & 0x3) ;
2368 }
2369
2370 dma_desc->buf = ep->dma_addr + offset;
2371 dma_desc->status.b.bs = BS_HOST_READY;
2372 }
2373 dma_desc ++;
2374 }
2375 }
2376
2377 /**
2378 * This function does the setup for a data transfer for an EP and
2379 * starts the transfer. For an IN transfer, the packets will be
2380 * loaded into the appropriate Tx FIFO in the ISR. For OUT transfers,
2381 * the packets are unloaded from the Rx FIFO in the ISR. the ISR.
2382 *
2383 * @param core_if Programming view of DWC_otg controller.
2384 * @param ep The EP to start the transfer on.
2385 */
2386
2387 void dwc_otg_ep_start_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
2388 {
2389 depctl_data_t depctl;
2390 deptsiz_data_t deptsiz;
2391 gintmsk_data_t intr_mask = { .d32 = 0};
2392
2393 DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s()\n", __func__);
2394
2395 DWC_DEBUGPL(DBG_PCD, "ep%d-%s xfer_len=%d xfer_cnt=%d "
2396 "xfer_buff=%p start_xfer_buff=%p\n",
2397 ep->num, (ep->is_in?"IN":"OUT"), ep->xfer_len,
2398 ep->xfer_count, ep->xfer_buff, ep->start_xfer_buff);
2399
2400 /* IN endpoint */
2401 if (ep->is_in == 1) {
2402 dwc_otg_dev_in_ep_regs_t *in_regs =
2403 core_if->dev_if->in_ep_regs[ep->num];
2404
2405 gnptxsts_data_t gtxstatus;
2406
2407 gtxstatus.d32 =
2408 dwc_read_reg32(&core_if->core_global_regs->gnptxsts);
2409
2410 if(core_if->en_multiple_tx_fifo == 0 && gtxstatus.b.nptxqspcavail == 0) {
2411 #ifdef DEBUG
2412 DWC_PRINT("TX Queue Full (0x%0x)\n", gtxstatus.d32);
2413 #endif
2414 return;
2415 }
2416
2417 depctl.d32 = dwc_read_reg32(&(in_regs->diepctl));
2418 deptsiz.d32 = dwc_read_reg32(&(in_regs->dieptsiz));
2419
2420 ep->xfer_len += (ep->maxxfer < (ep->total_len - ep->xfer_len)) ?
2421 ep->maxxfer : (ep->total_len - ep->xfer_len);
2422
2423 /* Zero Length Packet? */
2424 if ((ep->xfer_len - ep->xfer_count) == 0) {
2425 deptsiz.b.xfersize = 0;
2426 deptsiz.b.pktcnt = 1;
2427 }
2428 else {
2429 /* Program the transfer size and packet count
2430 * as follows: xfersize = N * maxpacket +
2431 * short_packet pktcnt = N + (short_packet
2432 * exist ? 1 : 0)
2433 */
2434 deptsiz.b.xfersize = ep->xfer_len - ep->xfer_count;
2435 deptsiz.b.pktcnt =
2436 (ep->xfer_len - ep->xfer_count - 1 + ep->maxpacket) /
2437 ep->maxpacket;
2438 }
2439
2440
2441 /* Write the DMA register */
2442 if (core_if->dma_enable) {
2443 if (/*(core_if->dma_enable)&&*/(ep->dma_addr==DMA_ADDR_INVALID)) {
2444 ep->dma_addr=dma_map_single(NULL,(void *)(ep->xfer_buff),(ep->xfer_len),DMA_TO_DEVICE);
2445 }
2446 DWC_DEBUGPL(DBG_PCDV, "ep%d dma_addr=%.8x\n", ep->num, ep->dma_addr);
2447
2448 if (core_if->dma_desc_enable == 0) {
2449 dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32);
2450
2451 VERIFY_PCD_DMA_ADDR(ep->dma_addr);
2452 dwc_write_reg32 (&(in_regs->diepdma),
2453 (uint32_t)ep->dma_addr);
2454 }
2455 else {
2456 init_dma_desc_chain(core_if, ep);
2457 /** DIEPDMAn Register write */
2458
2459 VERIFY_PCD_DMA_ADDR(ep->dma_desc_addr);
2460 dwc_write_reg32(&in_regs->diepdma, ep->dma_desc_addr);
2461 }
2462 }
2463 else
2464 {
2465 dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32);
2466 if(ep->type != DWC_OTG_EP_TYPE_ISOC) {
2467 /**
2468 * Enable the Non-Periodic Tx FIFO empty interrupt,
2469 * or the Tx FIFO epmty interrupt in dedicated Tx FIFO mode,
2470 * the data will be written into the fifo by the ISR.
2471 */
2472 if(core_if->en_multiple_tx_fifo == 0) {
2473 intr_mask.b.nptxfempty = 1;
2474 dwc_modify_reg32(&core_if->core_global_regs->gintmsk,
2475 intr_mask.d32, intr_mask.d32);
2476 }
2477 else {
2478 /* Enable the Tx FIFO Empty Interrupt for this EP */
2479 if(ep->xfer_len > 0) {
2480 uint32_t fifoemptymsk = 0;
2481 fifoemptymsk = 1 << ep->num;
2482 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk,
2483 0, fifoemptymsk);
2484
2485 }
2486 }
2487 }
2488 }
2489
2490 /* EP enable, IN data in FIFO */
2491 depctl.b.cnak = 1;
2492 depctl.b.epena = 1;
2493 dwc_write_reg32(&in_regs->diepctl, depctl.d32);
2494
2495 depctl.d32 = dwc_read_reg32 (&core_if->dev_if->in_ep_regs[0]->diepctl);
2496 depctl.b.nextep = ep->num;
2497 dwc_write_reg32 (&core_if->dev_if->in_ep_regs[0]->diepctl, depctl.d32);
2498
2499 DWC_DEBUGPL(DBG_PCD, "DIEPCTL(%.8x)=%08x DIEPTSIZ=%08x, DIEPINT=%.8x, DIEPDMA=%.8x, DTXFSTS=%.8x\n",
2500 (u32)&in_regs->diepctl,
2501 dwc_read_reg32(&in_regs->diepctl),
2502 dwc_read_reg32(&in_regs->dieptsiz),
2503 dwc_read_reg32(&in_regs->diepint),
2504 dwc_read_reg32(&in_regs->diepdma),
2505 dwc_read_reg32(&in_regs->dtxfsts));
2506 DWC_DEBUGPL(DBG_PCD, "DAINTMSK=%08x GINTMSK=%08x\n",
2507 dwc_read_reg32(&core_if->dev_if->dev_global_regs->daintmsk),
2508 dwc_read_reg32(&core_if->core_global_regs->gintmsk));
2509
2510 }
2511 else {
2512 /* OUT endpoint */
2513 dwc_otg_dev_out_ep_regs_t *out_regs =
2514 core_if->dev_if->out_ep_regs[ep->num];
2515
2516 depctl.d32 = dwc_read_reg32(&(out_regs->doepctl));
2517 deptsiz.d32 = dwc_read_reg32(&(out_regs->doeptsiz));
2518
2519 ep->xfer_len += (ep->maxxfer < (ep->total_len - ep->xfer_len)) ?
2520 ep->maxxfer : (ep->total_len - ep->xfer_len);
2521
2522 /* Program the transfer size and packet count as follows:
2523 *
2524 * pktcnt = N
2525 * xfersize = N * maxpacket
2526 */
2527 if ((ep->xfer_len - ep->xfer_count) == 0) {
2528 /* Zero Length Packet */
2529 deptsiz.b.xfersize = ep->maxpacket;
2530 deptsiz.b.pktcnt = 1;
2531 }
2532 else {
2533 deptsiz.b.pktcnt =
2534 (ep->xfer_len - ep->xfer_count + (ep->maxpacket - 1)) /
2535 ep->maxpacket;
2536 ep->xfer_len = deptsiz.b.pktcnt * ep->maxpacket + ep->xfer_count;
2537 deptsiz.b.xfersize = ep->xfer_len - ep->xfer_count;
2538 }
2539
2540 DWC_DEBUGPL(DBG_PCDV, "ep%d xfersize=%d pktcnt=%d\n",
2541 ep->num,
2542 deptsiz.b.xfersize, deptsiz.b.pktcnt);
2543
2544 if (core_if->dma_enable) {
2545 if (/*(core_if->dma_enable)&&*/(ep->dma_addr==DMA_ADDR_INVALID)) {
2546 ep->dma_addr=dma_map_single(NULL,(void *)(ep->xfer_buff),(ep->xfer_len),DMA_TO_DEVICE);
2547 }
2548 DWC_DEBUGPL(DBG_PCDV, "ep%d dma_addr=%.8x\n",
2549 ep->num,
2550 ep->dma_addr);
2551 if (!core_if->dma_desc_enable) {
2552 dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32);
2553
2554 VERIFY_PCD_DMA_ADDR(ep->dma_addr);
2555 dwc_write_reg32 (&(out_regs->doepdma),
2556 (uint32_t)ep->dma_addr);
2557 }
2558 else {
2559 init_dma_desc_chain(core_if, ep);
2560
2561 /** DOEPDMAn Register write */
2562
2563 VERIFY_PCD_DMA_ADDR(ep->dma_desc_addr);
2564 dwc_write_reg32(&out_regs->doepdma, ep->dma_desc_addr);
2565 }
2566 }
2567 else {
2568 dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32);
2569 }
2570
2571 /* EP enable */
2572 depctl.b.cnak = 1;
2573 depctl.b.epena = 1;
2574
2575 dwc_write_reg32(&out_regs->doepctl, depctl.d32);
2576
2577 DWC_DEBUGPL(DBG_PCD, "DOEPCTL(%.8x)=%08x DOEPTSIZ=%08x, DOEPINT=%.8x, DOEPDMA=%.8x\n",
2578 (u32)&out_regs->doepctl,
2579 dwc_read_reg32(&out_regs->doepctl),
2580 dwc_read_reg32(&out_regs->doeptsiz),
2581 dwc_read_reg32(&out_regs->doepint),
2582 dwc_read_reg32(&out_regs->doepdma));
2583
2584 DWC_DEBUGPL(DBG_PCD, "DAINTMSK=%08x GINTMSK=%08x\n",
2585 dwc_read_reg32(&core_if->dev_if->dev_global_regs->daintmsk),
2586 dwc_read_reg32(&core_if->core_global_regs->gintmsk));
2587 }
2588 }
2589
2590 /**
2591 * This function setup a zero length transfer in Buffer DMA and
2592 * Slave modes for usb requests with zero field set
2593 *
2594 * @param core_if Programming view of DWC_otg controller.
2595 * @param ep The EP to start the transfer on.
2596 *
2597 */
2598 void dwc_otg_ep_start_zl_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
2599 {
2600
2601 depctl_data_t depctl;
2602 deptsiz_data_t deptsiz;
2603 gintmsk_data_t intr_mask = { .d32 = 0};
2604
2605 DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s()\n", __func__);
2606
2607 /* IN endpoint */
2608 if (ep->is_in == 1) {
2609 dwc_otg_dev_in_ep_regs_t *in_regs =
2610 core_if->dev_if->in_ep_regs[ep->num];
2611
2612 depctl.d32 = dwc_read_reg32(&(in_regs->diepctl));
2613 deptsiz.d32 = dwc_read_reg32(&(in_regs->dieptsiz));
2614
2615 deptsiz.b.xfersize = 0;
2616 deptsiz.b.pktcnt = 1;
2617
2618
2619 /* Write the DMA register */
2620 if (core_if->dma_enable) {
2621 if (/*(core_if->dma_enable)&&*/(ep->dma_addr==DMA_ADDR_INVALID)) {
2622 ep->dma_addr=dma_map_single(NULL,(void *)(ep->xfer_buff),(ep->xfer_len),DMA_TO_DEVICE);
2623 }
2624 if (core_if->dma_desc_enable == 0) {
2625 dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32);
2626
2627 VERIFY_PCD_DMA_ADDR(ep->dma_addr);
2628 dwc_write_reg32 (&(in_regs->diepdma),
2629 (uint32_t)ep->dma_addr);
2630 }
2631 }
2632 else {
2633 dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32);
2634 /**
2635 * Enable the Non-Periodic Tx FIFO empty interrupt,
2636 * or the Tx FIFO epmty interrupt in dedicated Tx FIFO mode,
2637 * the data will be written into the fifo by the ISR.
2638 */
2639 if(core_if->en_multiple_tx_fifo == 0) {
2640 intr_mask.b.nptxfempty = 1;
2641 dwc_modify_reg32(&core_if->core_global_regs->gintmsk,
2642 intr_mask.d32, intr_mask.d32);
2643 }
2644 else {
2645 /* Enable the Tx FIFO Empty Interrupt for this EP */
2646 if(ep->xfer_len > 0) {
2647 uint32_t fifoemptymsk = 0;
2648 fifoemptymsk = 1 << ep->num;
2649 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk,
2650 0, fifoemptymsk);
2651 }
2652 }
2653 }
2654
2655 /* EP enable, IN data in FIFO */
2656 depctl.b.cnak = 1;
2657 depctl.b.epena = 1;
2658 dwc_write_reg32(&in_regs->diepctl, depctl.d32);
2659
2660 depctl.d32 = dwc_read_reg32 (&core_if->dev_if->in_ep_regs[0]->diepctl);
2661 depctl.b.nextep = ep->num;
2662 dwc_write_reg32 (&core_if->dev_if->in_ep_regs[0]->diepctl, depctl.d32);
2663
2664 }
2665 else {
2666 /* OUT endpoint */
2667 dwc_otg_dev_out_ep_regs_t *out_regs =
2668 core_if->dev_if->out_ep_regs[ep->num];
2669
2670 depctl.d32 = dwc_read_reg32(&(out_regs->doepctl));
2671 deptsiz.d32 = dwc_read_reg32(&(out_regs->doeptsiz));
2672
2673 /* Zero Length Packet */
2674 deptsiz.b.xfersize = ep->maxpacket;
2675 deptsiz.b.pktcnt = 1;
2676
2677 if (core_if->dma_enable) {
2678 if (/*(core_if->dma_enable)&&*/(ep->dma_addr==DMA_ADDR_INVALID)) {
2679 ep->dma_addr=dma_map_single(NULL,(void *)(ep->xfer_buff),(ep->xfer_len),DMA_TO_DEVICE);
2680 }
2681 if (!core_if->dma_desc_enable) {
2682 dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32);
2683
2684
2685 VERIFY_PCD_DMA_ADDR(ep->dma_addr);
2686 dwc_write_reg32 (&(out_regs->doepdma),
2687 (uint32_t)ep->dma_addr);
2688 }
2689 }
2690 else {
2691 dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32);
2692 }
2693
2694 /* EP enable */
2695 depctl.b.cnak = 1;
2696 depctl.b.epena = 1;
2697
2698 dwc_write_reg32(&out_regs->doepctl, depctl.d32);
2699
2700 }
2701 }
2702
2703 /**
2704 * This function does the setup for a data transfer for EP0 and starts
2705 * the transfer. For an IN transfer, the packets will be loaded into
2706 * the appropriate Tx FIFO in the ISR. For OUT transfers, the packets are
2707 * unloaded from the Rx FIFO in the ISR.
2708 *
2709 * @param core_if Programming view of DWC_otg controller.
2710 * @param ep The EP0 data.
2711 */
2712 void dwc_otg_ep0_start_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
2713 {
2714 depctl_data_t depctl;
2715 deptsiz0_data_t deptsiz;
2716 gintmsk_data_t intr_mask = { .d32 = 0};
2717 dwc_otg_dma_desc_t* dma_desc;
2718
2719 DWC_DEBUGPL(DBG_PCD, "ep%d-%s xfer_len=%d xfer_cnt=%d "
2720 "xfer_buff=%p start_xfer_buff=%p, dma_addr=%.8x\n",
2721 ep->num, (ep->is_in?"IN":"OUT"), ep->xfer_len,
2722 ep->xfer_count, ep->xfer_buff, ep->start_xfer_buff,ep->dma_addr);
2723
2724 ep->total_len = ep->xfer_len;
2725
2726 /* IN endpoint */
2727 if (ep->is_in == 1) {
2728 dwc_otg_dev_in_ep_regs_t *in_regs =
2729 core_if->dev_if->in_ep_regs[0];
2730
2731 gnptxsts_data_t gtxstatus;
2732
2733 gtxstatus.d32 =
2734 dwc_read_reg32(&core_if->core_global_regs->gnptxsts);
2735
2736 if(core_if->en_multiple_tx_fifo == 0 && gtxstatus.b.nptxqspcavail == 0) {
2737 #ifdef DEBUG
2738 deptsiz.d32 = dwc_read_reg32(&in_regs->dieptsiz);
2739 DWC_DEBUGPL(DBG_PCD,"DIEPCTL0=%0x\n",
2740 dwc_read_reg32(&in_regs->diepctl));
2741 DWC_DEBUGPL(DBG_PCD, "DIEPTSIZ0=%0x (sz=%d, pcnt=%d)\n",
2742 deptsiz.d32,
2743 deptsiz.b.xfersize, deptsiz.b.pktcnt);
2744 DWC_PRINT("TX Queue or FIFO Full (0x%0x)\n",
2745 gtxstatus.d32);
2746 #endif
2747 return;
2748 }
2749
2750
2751 depctl.d32 = dwc_read_reg32(&in_regs->diepctl);
2752 deptsiz.d32 = dwc_read_reg32(&in_regs->dieptsiz);
2753
2754 /* Zero Length Packet? */
2755 if (ep->xfer_len == 0) {
2756 deptsiz.b.xfersize = 0;
2757 deptsiz.b.pktcnt = 1;
2758 }
2759 else {
2760 /* Program the transfer size and packet count
2761 * as follows: xfersize = N * maxpacket +
2762 * short_packet pktcnt = N + (short_packet
2763 * exist ? 1 : 0)
2764 */
2765 if (ep->xfer_len > ep->maxpacket) {
2766 ep->xfer_len = ep->maxpacket;
2767 deptsiz.b.xfersize = ep->maxpacket;
2768 }
2769 else {
2770 deptsiz.b.xfersize = ep->xfer_len;
2771 }
2772 deptsiz.b.pktcnt = 1;
2773
2774 }
2775 DWC_DEBUGPL(DBG_PCDV, "IN len=%d xfersize=%d pktcnt=%d [%08x]\n",
2776 ep->xfer_len,
2777 deptsiz.b.xfersize, deptsiz.b.pktcnt, deptsiz.d32);
2778 /* Write the DMA register */
2779 if (core_if->dma_enable) {
2780 if (/*(core_if->dma_enable)&&*/(ep->dma_addr==DMA_ADDR_INVALID)) {
2781 ep->dma_addr=dma_map_single(NULL,(void *)(ep->xfer_buff),(ep->xfer_len),DMA_TO_DEVICE);
2782 }
2783 if(core_if->dma_desc_enable == 0) {
2784 dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32);
2785
2786 VERIFY_PCD_DMA_ADDR(ep->dma_addr);
2787 dwc_write_reg32 (&(in_regs->diepdma),
2788 (uint32_t)ep->dma_addr);
2789 }
2790 else {
2791 dma_desc = core_if->dev_if->in_desc_addr;
2792
2793 /** DMA Descriptor Setup */
2794 dma_desc->status.b.bs = BS_HOST_BUSY;
2795 dma_desc->status.b.l = 1;
2796 dma_desc->status.b.ioc = 1;
2797 dma_desc->status.b.sp = (ep->xfer_len == ep->maxpacket) ? 0 : 1;
2798 dma_desc->status.b.bytes = ep->xfer_len;
2799 dma_desc->buf = ep->dma_addr;
2800 dma_desc->status.b.bs = BS_HOST_READY;
2801
2802 /** DIEPDMA0 Register write */
2803
2804 VERIFY_PCD_DMA_ADDR(core_if->dev_if->dma_in_desc_addr);
2805 dwc_write_reg32(&in_regs->diepdma, core_if->dev_if->dma_in_desc_addr);
2806 }
2807 }
2808 else {
2809 dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32);
2810 }
2811
2812 /* EP enable, IN data in FIFO */
2813 depctl.b.cnak = 1;
2814 depctl.b.epena = 1;
2815 dwc_write_reg32(&in_regs->diepctl, depctl.d32);
2816
2817 /**
2818 * Enable the Non-Periodic Tx FIFO empty interrupt, the
2819 * data will be written into the fifo by the ISR.
2820 */
2821 if (!core_if->dma_enable) {
2822 if(core_if->en_multiple_tx_fifo == 0) {
2823 intr_mask.b.nptxfempty = 1;
2824 dwc_modify_reg32(&core_if->core_global_regs->gintmsk,
2825 intr_mask.d32, intr_mask.d32);
2826 }
2827 else {
2828 /* Enable the Tx FIFO Empty Interrupt for this EP */
2829 if(ep->xfer_len > 0) {
2830 uint32_t fifoemptymsk = 0;
2831 fifoemptymsk |= 1 << ep->num;
2832 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk,
2833 0, fifoemptymsk);
2834 }
2835 }
2836 }
2837 }
2838 else {
2839 /* OUT endpoint */
2840 dwc_otg_dev_out_ep_regs_t *out_regs =
2841 core_if->dev_if->out_ep_regs[0];
2842
2843 depctl.d32 = dwc_read_reg32(&out_regs->doepctl);
2844 deptsiz.d32 = dwc_read_reg32(&out_regs->doeptsiz);
2845
2846 /* Program the transfer size and packet count as follows:
2847 * xfersize = N * (maxpacket + 4 - (maxpacket % 4))
2848 * pktcnt = N */
2849 /* Zero Length Packet */
2850 deptsiz.b.xfersize = ep->maxpacket;
2851 deptsiz.b.pktcnt = 1;
2852
2853 DWC_DEBUGPL(DBG_PCDV, "len=%d xfersize=%d pktcnt=%d\n",
2854 ep->xfer_len,
2855 deptsiz.b.xfersize, deptsiz.b.pktcnt);
2856
2857 if (core_if->dma_enable) {
2858 if (/*(core_if->dma_enable)&&*/(ep->dma_addr==DMA_ADDR_INVALID)) {
2859 ep->dma_addr=dma_map_single(NULL,(void *)(ep->xfer_buff),(ep->xfer_len),DMA_TO_DEVICE);
2860 }
2861 if(!core_if->dma_desc_enable) {
2862 dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32);
2863
2864
2865 VERIFY_PCD_DMA_ADDR(ep->dma_addr);
2866 dwc_write_reg32 (&(out_regs->doepdma),
2867 (uint32_t)ep->dma_addr);
2868 }
2869 else {
2870 dma_desc = core_if->dev_if->out_desc_addr;
2871
2872 /** DMA Descriptor Setup */
2873 dma_desc->status.b.bs = BS_HOST_BUSY;
2874 dma_desc->status.b.l = 1;
2875 dma_desc->status.b.ioc = 1;
2876 dma_desc->status.b.bytes = ep->maxpacket;
2877 dma_desc->buf = ep->dma_addr;
2878 dma_desc->status.b.bs = BS_HOST_READY;
2879
2880 /** DOEPDMA0 Register write */
2881 VERIFY_PCD_DMA_ADDR(core_if->dev_if->dma_out_desc_addr);
2882 dwc_write_reg32(&out_regs->doepdma, core_if->dev_if->dma_out_desc_addr);
2883 }
2884 }
2885 else {
2886 dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32);
2887 }
2888
2889 /* EP enable */
2890 depctl.b.cnak = 1;
2891 depctl.b.epena = 1;
2892 dwc_write_reg32 (&(out_regs->doepctl), depctl.d32);
2893 }
2894 }
2895
2896 /**
2897 * This function continues control IN transfers started by
2898 * dwc_otg_ep0_start_transfer, when the transfer does not fit in a
2899 * single packet. NOTE: The DIEPCTL0/DOEPCTL0 registers only have one
2900 * bit for the packet count.
2901 *
2902 * @param core_if Programming view of DWC_otg controller.
2903 * @param ep The EP0 data.
2904 */
2905 void dwc_otg_ep0_continue_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
2906 {
2907 depctl_data_t depctl;
2908 deptsiz0_data_t deptsiz;
2909 gintmsk_data_t intr_mask = { .d32 = 0};
2910 dwc_otg_dma_desc_t* dma_desc;
2911
2912 if (ep->is_in == 1) {
2913 dwc_otg_dev_in_ep_regs_t *in_regs =
2914 core_if->dev_if->in_ep_regs[0];
2915 gnptxsts_data_t tx_status = { .d32 = 0 };
2916
2917 tx_status.d32 = dwc_read_reg32(&core_if->core_global_regs->gnptxsts);
2918 /** @todo Should there be check for room in the Tx
2919 * Status Queue. If not remove the code above this comment. */
2920
2921 depctl.d32 = dwc_read_reg32(&in_regs->diepctl);
2922 deptsiz.d32 = dwc_read_reg32(&in_regs->dieptsiz);
2923
2924 /* Program the transfer size and packet count
2925 * as follows: xfersize = N * maxpacket +
2926 * short_packet pktcnt = N + (short_packet
2927 * exist ? 1 : 0)
2928 */
2929
2930
2931 if(core_if->dma_desc_enable == 0) {
2932 deptsiz.b.xfersize = (ep->total_len - ep->xfer_count) > ep->maxpacket ? ep->maxpacket :
2933 (ep->total_len - ep->xfer_count);
2934 deptsiz.b.pktcnt = 1;
2935 if(core_if->dma_enable == 0) {
2936 ep->xfer_len += deptsiz.b.xfersize;
2937 } else {
2938 ep->xfer_len = deptsiz.b.xfersize;
2939 }
2940 dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32);
2941 }
2942 else {
2943 ep->xfer_len = (ep->total_len - ep->xfer_count) > ep->maxpacket ? ep->maxpacket :
2944 (ep->total_len - ep->xfer_count);
2945
2946 dma_desc = core_if->dev_if->in_desc_addr;
2947
2948 /** DMA Descriptor Setup */
2949 dma_desc->status.b.bs = BS_HOST_BUSY;
2950 dma_desc->status.b.l = 1;
2951 dma_desc->status.b.ioc = 1;
2952 dma_desc->status.b.sp = (ep->xfer_len == ep->maxpacket) ? 0 : 1;
2953 dma_desc->status.b.bytes = ep->xfer_len;
2954 dma_desc->buf = ep->dma_addr;
2955 dma_desc->status.b.bs = BS_HOST_READY;
2956
2957
2958 /** DIEPDMA0 Register write */
2959 VERIFY_PCD_DMA_ADDR(core_if->dev_if->dma_in_desc_addr);
2960 dwc_write_reg32(&in_regs->diepdma, core_if->dev_if->dma_in_desc_addr);
2961 }
2962
2963
2964 DWC_DEBUGPL(DBG_PCDV, "IN len=%d xfersize=%d pktcnt=%d [%08x]\n",
2965 ep->xfer_len,
2966 deptsiz.b.xfersize, deptsiz.b.pktcnt, deptsiz.d32);
2967
2968 /* Write the DMA register */
2969 if (core_if->hwcfg2.b.architecture == DWC_INT_DMA_ARCH) {
2970 if(core_if->dma_desc_enable == 0){
2971
2972 VERIFY_PCD_DMA_ADDR(ep->dma_addr);
2973 dwc_write_reg32 (&(in_regs->diepdma), (uint32_t)ep->dma_addr);
2974 }
2975 }
2976
2977 /* EP enable, IN data in FIFO */
2978 depctl.b.cnak = 1;
2979 depctl.b.epena = 1;
2980 dwc_write_reg32(&in_regs->diepctl, depctl.d32);
2981
2982 /**
2983 * Enable the Non-Periodic Tx FIFO empty interrupt, the
2984 * data will be written into the fifo by the ISR.
2985 */
2986 if (!core_if->dma_enable) {
2987 if(core_if->en_multiple_tx_fifo == 0) {
2988 /* First clear it from GINTSTS */
2989 intr_mask.b.nptxfempty = 1;
2990 dwc_modify_reg32(&core_if->core_global_regs->gintmsk,
2991 intr_mask.d32, intr_mask.d32);
2992
2993 }
2994 else {
2995 /* Enable the Tx FIFO Empty Interrupt for this EP */
2996 if(ep->xfer_len > 0) {
2997 uint32_t fifoemptymsk = 0;
2998 fifoemptymsk |= 1 << ep->num;
2999 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk,
3000 0, fifoemptymsk);
3001 }
3002 }
3003 }
3004 }
3005 else {
3006 dwc_otg_dev_out_ep_regs_t *out_regs =
3007 core_if->dev_if->out_ep_regs[0];
3008
3009
3010 depctl.d32 = dwc_read_reg32(&out_regs->doepctl);
3011 deptsiz.d32 = dwc_read_reg32(&out_regs->doeptsiz);
3012
3013 /* Program the transfer size and packet count
3014 * as follows: xfersize = N * maxpacket +
3015 * short_packet pktcnt = N + (short_packet
3016 * exist ? 1 : 0)
3017 */
3018 deptsiz.b.xfersize = ep->maxpacket;
3019 deptsiz.b.pktcnt = 1;
3020
3021
3022 if(core_if->dma_desc_enable == 0) {
3023 dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32);
3024 }
3025 else {
3026 dma_desc = core_if->dev_if->out_desc_addr;
3027
3028 /** DMA Descriptor Setup */
3029 dma_desc->status.b.bs = BS_HOST_BUSY;
3030 dma_desc->status.b.l = 1;
3031 dma_desc->status.b.ioc = 1;
3032 dma_desc->status.b.bytes = ep->maxpacket;
3033 dma_desc->buf = ep->dma_addr;
3034 dma_desc->status.b.bs = BS_HOST_READY;
3035
3036 /** DOEPDMA0 Register write */
3037 VERIFY_PCD_DMA_ADDR(core_if->dev_if->dma_out_desc_addr);
3038 dwc_write_reg32(&out_regs->doepdma, core_if->dev_if->dma_out_desc_addr);
3039 }
3040
3041
3042 DWC_DEBUGPL(DBG_PCDV, "IN len=%d xfersize=%d pktcnt=%d [%08x]\n",
3043 ep->xfer_len,
3044 deptsiz.b.xfersize, deptsiz.b.pktcnt, deptsiz.d32);
3045
3046 /* Write the DMA register */
3047 if (core_if->hwcfg2.b.architecture == DWC_INT_DMA_ARCH) {
3048 if(core_if->dma_desc_enable == 0){
3049
3050 VERIFY_PCD_DMA_ADDR(ep->dma_addr);
3051 dwc_write_reg32 (&(out_regs->doepdma), (uint32_t)ep->dma_addr);
3052 }
3053 }
3054
3055 /* EP enable, IN data in FIFO */
3056 depctl.b.cnak = 1;
3057 depctl.b.epena = 1;
3058 dwc_write_reg32(&out_regs->doepctl, depctl.d32);
3059
3060 }
3061 }
3062
3063 #ifdef DEBUG
3064 void dump_msg(const u8 *buf, unsigned int length)
3065 {
3066 unsigned int start, num, i;
3067 char line[52], *p;
3068
3069 if (length >= 512)
3070 return;
3071 start = 0;
3072 while (length > 0) {
3073 num = min(length, 16u);
3074 p = line;
3075 for (i = 0; i < num; ++i)
3076 {
3077 if (i == 8)
3078 *p++ = ' ';
3079 sprintf(p, " %02x", buf[i]);
3080 p += 3;
3081 }
3082 *p = 0;
3083 DWC_PRINT("%6x: %s\n", start, line);
3084 buf += num;
3085 start += num;
3086 length -= num;
3087 }
3088 }
3089 #else
3090 static inline void dump_msg(const u8 *buf, unsigned int length)
3091 {
3092 }
3093 #endif
3094
3095 /**
3096 * This function writes a packet into the Tx FIFO associated with the
3097 * EP. For non-periodic EPs the non-periodic Tx FIFO is written. For
3098 * periodic EPs the periodic Tx FIFO associated with the EP is written
3099 * with all packets for the next micro-frame.
3100 *
3101 * @param core_if Programming view of DWC_otg controller.
3102 * @param ep The EP to write packet for.
3103 * @param dma Indicates if DMA is being used.
3104 */
3105 void dwc_otg_ep_write_packet(dwc_otg_core_if_t *core_if, dwc_ep_t *ep, int dma)
3106 {
3107 /**
3108 * The buffer is padded to DWORD on a per packet basis in
3109 * slave/dma mode if the MPS is not DWORD aligned. The last
3110 * packet, if short, is also padded to a multiple of DWORD.
3111 *
3112 * ep->xfer_buff always starts DWORD aligned in memory and is a
3113 * multiple of DWORD in length
3114 *
3115 * ep->xfer_len can be any number of bytes
3116 *
3117 * ep->xfer_count is a multiple of ep->maxpacket until the last
3118 * packet
3119 *
3120 * FIFO access is DWORD */
3121
3122 uint32_t i;
3123 uint32_t byte_count;
3124 uint32_t dword_count;
3125 uint32_t *fifo;
3126 uint32_t *data_buff = (uint32_t *)ep->xfer_buff;
3127
3128 DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s(%p,%p)\n", __func__, core_if, ep);
3129 if (ep->xfer_count >= ep->xfer_len) {
3130 DWC_WARN("%s() No data for EP%d!!!\n", __func__, ep->num);
3131 return;
3132 }
3133
3134 /* Find the byte length of the packet either short packet or MPS */
3135 if ((ep->xfer_len - ep->xfer_count) < ep->maxpacket) {
3136 byte_count = ep->xfer_len - ep->xfer_count;
3137 }
3138 else {
3139 byte_count = ep->maxpacket;
3140 }
3141
3142 /* Find the DWORD length, padded by extra bytes as neccessary if MPS
3143 * is not a multiple of DWORD */
3144 dword_count = (byte_count + 3) / 4;
3145
3146 #ifdef VERBOSE
3147 dump_msg(ep->xfer_buff, byte_count);
3148 #endif
3149
3150 /**@todo NGS Where are the Periodic Tx FIFO addresses
3151 * intialized? What should this be? */
3152
3153 fifo = core_if->data_fifo[ep->num];
3154
3155
3156 DWC_DEBUGPL((DBG_PCDV|DBG_CILV), "fifo=%p buff=%p *p=%08x bc=%d\n", fifo, data_buff, *data_buff, byte_count);
3157
3158 if (!dma) {
3159 for (i=0; i<dword_count; i++, data_buff++) {
3160 dwc_write_reg32(fifo, *data_buff);
3161 }
3162 }
3163
3164 ep->xfer_count += byte_count;
3165 ep->xfer_buff += byte_count;
3166 ep->dma_addr += byte_count;
3167 }
3168
3169 /**
3170 * Set the EP STALL.
3171 *
3172 * @param core_if Programming view of DWC_otg controller.
3173 * @param ep The EP to set the stall on.
3174 */
3175 void dwc_otg_ep_set_stall(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
3176 {
3177 depctl_data_t depctl;
3178 volatile uint32_t *depctl_addr;
3179
3180 DWC_DEBUGPL(DBG_PCDV, "%s ep%d-%s1\n", __func__, ep->num,
3181 (ep->is_in?"IN":"OUT"));
3182
3183 DWC_PRINT("%s ep%d-%s\n", __func__, ep->num,
3184 (ep->is_in?"in":"out"));
3185
3186 if (ep->is_in == 1) {
3187 depctl_addr = &(core_if->dev_if->in_ep_regs[ep->num]->diepctl);
3188 depctl.d32 = dwc_read_reg32(depctl_addr);
3189
3190 /* set the disable and stall bits */
3191 #if 0
3192 //epdis is set here but not cleared at latter dwc_otg_ep_clear_stall,
3193 //which cause the testusb item 13 failed(Host:pc, device: otg device)
3194 if (depctl.b.epena) {
3195 depctl.b.epdis = 1;
3196 }
3197 #endif
3198 depctl.b.stall = 1;
3199 dwc_write_reg32(depctl_addr, depctl.d32);
3200 }
3201 else {
3202 depctl_addr = &(core_if->dev_if->out_ep_regs[ep->num]->doepctl);
3203 depctl.d32 = dwc_read_reg32(depctl_addr);
3204
3205 /* set the stall bit */
3206 depctl.b.stall = 1;
3207 dwc_write_reg32(depctl_addr, depctl.d32);
3208 }
3209
3210 DWC_DEBUGPL(DBG_PCDV,"%s: DEPCTL(%.8x)=%0x\n",__func__,(u32)depctl_addr,dwc_read_reg32(depctl_addr));
3211
3212 return;
3213 }
3214
3215 /**
3216 * Clear the EP STALL.
3217 *
3218 * @param core_if Programming view of DWC_otg controller.
3219 * @param ep The EP to clear stall from.
3220 */
3221 void dwc_otg_ep_clear_stall(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
3222 {
3223 depctl_data_t depctl;
3224 volatile uint32_t *depctl_addr;
3225
3226 DWC_DEBUGPL(DBG_PCD, "%s ep%d-%s\n", __func__, ep->num,
3227 (ep->is_in?"IN":"OUT"));
3228
3229 if (ep->is_in == 1) {
3230 depctl_addr = &(core_if->dev_if->in_ep_regs[ep->num]->diepctl);
3231 }
3232 else {
3233 depctl_addr = &(core_if->dev_if->out_ep_regs[ep->num]->doepctl);
3234 }
3235
3236 depctl.d32 = dwc_read_reg32(depctl_addr);
3237
3238 /* clear the stall bits */
3239 depctl.b.stall = 0;
3240
3241 /*
3242 * USB Spec 9.4.5: For endpoints using data toggle, regardless
3243 * of whether an endpoint has the Halt feature set, a
3244 * ClearFeature(ENDPOINT_HALT) request always results in the
3245 * data toggle being reinitialized to DATA0.
3246 */
3247 if (ep->type == DWC_OTG_EP_TYPE_INTR ||
3248 ep->type == DWC_OTG_EP_TYPE_BULK) {
3249 depctl.b.setd0pid = 1; /* DATA0 */
3250 }
3251
3252 dwc_write_reg32(depctl_addr, depctl.d32);
3253 DWC_DEBUGPL(DBG_PCD,"DEPCTL=%0x\n",dwc_read_reg32(depctl_addr));
3254 return;
3255 }
3256
3257 /**
3258 * This function reads a packet from the Rx FIFO into the destination
3259 * buffer. To read SETUP data use dwc_otg_read_setup_packet.
3260 *
3261 * @param core_if Programming view of DWC_otg controller.
3262 * @param dest Destination buffer for the packet.
3263 * @param bytes Number of bytes to copy to the destination.
3264 */
3265 void dwc_otg_read_packet(dwc_otg_core_if_t *core_if,
3266 uint8_t *dest,
3267 uint16_t bytes)
3268 {
3269 int i;
3270 int word_count = (bytes + 3) / 4;
3271
3272 volatile uint32_t *fifo = core_if->data_fifo[0];
3273 uint32_t *data_buff = (uint32_t *)dest;
3274
3275 /**
3276 * @todo Account for the case where _dest is not dword aligned. This
3277 * requires reading data from the FIFO into a uint32_t temp buffer,
3278 * then moving it into the data buffer.
3279 */
3280
3281 DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s(%p,%p,%d)\n", __func__,
3282 core_if, dest, bytes);
3283
3284 for (i=0; i<word_count; i++, data_buff++)
3285 {
3286 *data_buff = dwc_read_reg32(fifo);
3287 }
3288
3289 return;
3290 }
3291
3292
3293
3294 /**
3295 * This functions reads the device registers and prints them
3296 *
3297 * @param core_if Programming view of DWC_otg controller.
3298 */
3299 void dwc_otg_dump_dev_registers(dwc_otg_core_if_t *core_if)
3300 {
3301 int i;
3302 volatile uint32_t *addr;
3303
3304 DWC_PRINT("Device Global Registers\n");
3305 addr=&core_if->dev_if->dev_global_regs->dcfg;
3306 DWC_PRINT("DCFG @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3307 addr=&core_if->dev_if->dev_global_regs->dctl;
3308 DWC_PRINT("DCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3309 addr=&core_if->dev_if->dev_global_regs->dsts;
3310 DWC_PRINT("DSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3311 addr=&core_if->dev_if->dev_global_regs->diepmsk;
3312 DWC_PRINT("DIEPMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3313 addr=&core_if->dev_if->dev_global_regs->doepmsk;
3314 DWC_PRINT("DOEPMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3315 addr=&core_if->dev_if->dev_global_regs->daint;
3316 DWC_PRINT("DAINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3317 addr=&core_if->dev_if->dev_global_regs->daintmsk;
3318 DWC_PRINT("DAINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3319 addr=&core_if->dev_if->dev_global_regs->dtknqr1;
3320 DWC_PRINT("DTKNQR1 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3321 if (core_if->hwcfg2.b.dev_token_q_depth > 6) {
3322 addr=&core_if->dev_if->dev_global_regs->dtknqr2;
3323 DWC_PRINT("DTKNQR2 @0x%08X : 0x%08X\n",
3324 (uint32_t)addr,dwc_read_reg32(addr));
3325 }
3326
3327 addr=&core_if->dev_if->dev_global_regs->dvbusdis;
3328 DWC_PRINT("DVBUSID @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3329
3330 addr=&core_if->dev_if->dev_global_regs->dvbuspulse;
3331 DWC_PRINT("DVBUSPULSE @0x%08X : 0x%08X\n",
3332 (uint32_t)addr,dwc_read_reg32(addr));
3333
3334 if (core_if->hwcfg2.b.dev_token_q_depth > 14) {
3335 addr=&core_if->dev_if->dev_global_regs->dtknqr3_dthrctl;
3336 DWC_PRINT("DTKNQR3_DTHRCTL @0x%08X : 0x%08X\n",
3337 (uint32_t)addr, dwc_read_reg32(addr));
3338 }
3339 /*
3340 if (core_if->hwcfg2.b.dev_token_q_depth > 22) {
3341 addr=&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk;
3342 DWC_PRINT("DTKNQR4 @0x%08X : 0x%08X\n",
3343 (uint32_t)addr, dwc_read_reg32(addr));
3344 }
3345 */
3346 addr=&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk;
3347 DWC_PRINT("FIFOEMPMSK @0x%08X : 0x%08X\n", (uint32_t)addr, dwc_read_reg32(addr));
3348
3349 addr=&core_if->dev_if->dev_global_regs->deachint;
3350 DWC_PRINT("DEACHINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3351 addr=&core_if->dev_if->dev_global_regs->deachintmsk;
3352 DWC_PRINT("DEACHINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3353
3354 for (i=0; i<= core_if->dev_if->num_in_eps; i++) {
3355 addr=&core_if->dev_if->dev_global_regs->diepeachintmsk[i];
3356 DWC_PRINT("DIEPEACHINTMSK[%d] @0x%08X : 0x%08X\n", i, (uint32_t)addr, dwc_read_reg32(addr));
3357 }
3358
3359
3360 for (i=0; i<= core_if->dev_if->num_out_eps; i++) {
3361 addr=&core_if->dev_if->dev_global_regs->doepeachintmsk[i];
3362 DWC_PRINT("DOEPEACHINTMSK[%d] @0x%08X : 0x%08X\n", i, (uint32_t)addr, dwc_read_reg32(addr));
3363 }
3364
3365 for (i=0; i<= core_if->dev_if->num_in_eps; i++) {
3366 DWC_PRINT("Device IN EP %d Registers\n", i);
3367 addr=&core_if->dev_if->in_ep_regs[i]->diepctl;
3368 DWC_PRINT("DIEPCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3369 addr=&core_if->dev_if->in_ep_regs[i]->diepint;
3370 DWC_PRINT("DIEPINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3371 addr=&core_if->dev_if->in_ep_regs[i]->dieptsiz;
3372 DWC_PRINT("DIETSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3373 addr=&core_if->dev_if->in_ep_regs[i]->diepdma;
3374 DWC_PRINT("DIEPDMA @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3375 addr=&core_if->dev_if->in_ep_regs[i]->dtxfsts;
3376 DWC_PRINT("DTXFSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3377 //reading depdmab in non desc dma mode would halt the ahb bus...
3378 if(core_if->dma_desc_enable){
3379 addr=&core_if->dev_if->in_ep_regs[i]->diepdmab;
3380 DWC_PRINT("DIEPDMAB @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3381 }
3382 }
3383
3384
3385 for (i=0; i<= core_if->dev_if->num_out_eps; i++) {
3386 DWC_PRINT("Device OUT EP %d Registers\n", i);
3387 addr=&core_if->dev_if->out_ep_regs[i]->doepctl;
3388 DWC_PRINT("DOEPCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3389 addr=&core_if->dev_if->out_ep_regs[i]->doepfn;
3390 DWC_PRINT("DOEPFN @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3391 addr=&core_if->dev_if->out_ep_regs[i]->doepint;
3392 DWC_PRINT("DOEPINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3393 addr=&core_if->dev_if->out_ep_regs[i]->doeptsiz;
3394 DWC_PRINT("DOETSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3395 addr=&core_if->dev_if->out_ep_regs[i]->doepdma;
3396 DWC_PRINT("DOEPDMA @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3397
3398 //reading depdmab in non desc dma mode would halt the ahb bus...
3399 if(core_if->dma_desc_enable){
3400 addr=&core_if->dev_if->out_ep_regs[i]->doepdmab;
3401 DWC_PRINT("DOEPDMAB @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3402 }
3403
3404 }
3405
3406
3407
3408 return;
3409 }
3410
3411 /**
3412 * This functions reads the SPRAM and prints its content
3413 *
3414 * @param core_if Programming view of DWC_otg controller.
3415 */
3416 void dwc_otg_dump_spram(dwc_otg_core_if_t *core_if)
3417 {
3418 volatile uint8_t *addr, *start_addr, *end_addr;
3419
3420 DWC_PRINT("SPRAM Data:\n");
3421 start_addr = (void*)core_if->core_global_regs;
3422 DWC_PRINT("Base Address: 0x%8X\n", (uint32_t)start_addr);
3423 start_addr += 0x00028000;
3424 end_addr=(void*)core_if->core_global_regs;
3425 end_addr += 0x000280e0;
3426
3427 for(addr = start_addr; addr < end_addr; addr+=16)
3428 {
3429 DWC_PRINT("0x%8X:\t%2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X\n", (uint32_t)addr,
3430 addr[0],
3431 addr[1],
3432 addr[2],
3433 addr[3],
3434 addr[4],
3435 addr[5],
3436 addr[6],
3437 addr[7],
3438 addr[8],
3439 addr[9],
3440 addr[10],
3441 addr[11],
3442 addr[12],
3443 addr[13],
3444 addr[14],
3445 addr[15]
3446 );
3447 }
3448
3449 return;
3450 }
3451 /**
3452 * This function reads the host registers and prints them
3453 *
3454 * @param core_if Programming view of DWC_otg controller.
3455 */
3456 void dwc_otg_dump_host_registers(dwc_otg_core_if_t *core_if)
3457 {
3458 int i;
3459 volatile uint32_t *addr;
3460
3461 DWC_PRINT("Host Global Registers\n");
3462 addr=&core_if->host_if->host_global_regs->hcfg;
3463 DWC_PRINT("HCFG @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3464 addr=&core_if->host_if->host_global_regs->hfir;
3465 DWC_PRINT("HFIR @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3466 addr=&core_if->host_if->host_global_regs->hfnum;
3467 DWC_PRINT("HFNUM @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3468 addr=&core_if->host_if->host_global_regs->hptxsts;
3469 DWC_PRINT("HPTXSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3470 addr=&core_if->host_if->host_global_regs->haint;
3471 DWC_PRINT("HAINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3472 addr=&core_if->host_if->host_global_regs->haintmsk;
3473 DWC_PRINT("HAINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3474 addr=core_if->host_if->hprt0;
3475 DWC_PRINT("HPRT0 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3476
3477 for (i=0; i<core_if->core_params->host_channels; i++)
3478 {
3479 DWC_PRINT("Host Channel %d Specific Registers\n", i);
3480 addr=&core_if->host_if->hc_regs[i]->hcchar;
3481 DWC_PRINT("HCCHAR @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3482 addr=&core_if->host_if->hc_regs[i]->hcsplt;
3483 DWC_PRINT("HCSPLT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3484 addr=&core_if->host_if->hc_regs[i]->hcint;
3485 DWC_PRINT("HCINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3486 addr=&core_if->host_if->hc_regs[i]->hcintmsk;
3487 DWC_PRINT("HCINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3488 addr=&core_if->host_if->hc_regs[i]->hctsiz;
3489 DWC_PRINT("HCTSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3490 addr=&core_if->host_if->hc_regs[i]->hcdma;
3491 DWC_PRINT("HCDMA @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3492 }
3493 return;
3494 }
3495
3496 /**
3497 * This function reads the core global registers and prints them
3498 *
3499 * @param core_if Programming view of DWC_otg controller.
3500 */
3501 void dwc_otg_dump_global_registers(dwc_otg_core_if_t *core_if)
3502 {
3503 int i,size;
3504 char* str;
3505 volatile uint32_t *addr;
3506
3507 DWC_PRINT("Core Global Registers\n");
3508 addr=&core_if->core_global_regs->gotgctl;
3509 DWC_PRINT("GOTGCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3510 addr=&core_if->core_global_regs->gotgint;
3511 DWC_PRINT("GOTGINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3512 addr=&core_if->core_global_regs->gahbcfg;
3513 DWC_PRINT("GAHBCFG @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3514 addr=&core_if->core_global_regs->gusbcfg;
3515 DWC_PRINT("GUSBCFG @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3516 addr=&core_if->core_global_regs->grstctl;
3517 DWC_PRINT("GRSTCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3518 addr=&core_if->core_global_regs->gintsts;
3519 DWC_PRINT("GINTSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3520 addr=&core_if->core_global_regs->gintmsk;
3521 DWC_PRINT("GINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3522 addr=&core_if->core_global_regs->grxstsr;
3523 DWC_PRINT("GRXSTSR @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3524 //addr=&core_if->core_global_regs->grxstsp;
3525 //DWC_PRINT("GRXSTSP @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3526 addr=&core_if->core_global_regs->grxfsiz;
3527 DWC_PRINT("GRXFSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3528 addr=&core_if->core_global_regs->gnptxfsiz;
3529 DWC_PRINT("GNPTXFSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3530 addr=&core_if->core_global_regs->gnptxsts;
3531 DWC_PRINT("GNPTXSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3532 addr=&core_if->core_global_regs->gi2cctl;
3533 DWC_PRINT("GI2CCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3534 addr=&core_if->core_global_regs->gpvndctl;
3535 DWC_PRINT("GPVNDCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3536 addr=&core_if->core_global_regs->ggpio;
3537 DWC_PRINT("GGPIO @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3538 addr=&core_if->core_global_regs->guid;
3539 DWC_PRINT("GUID @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3540 addr=&core_if->core_global_regs->gsnpsid;
3541 DWC_PRINT("GSNPSID @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3542 addr=&core_if->core_global_regs->ghwcfg1;
3543 DWC_PRINT("GHWCFG1 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3544 addr=&core_if->core_global_regs->ghwcfg2;
3545 DWC_PRINT("GHWCFG2 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3546 addr=&core_if->core_global_regs->ghwcfg3;
3547 DWC_PRINT("GHWCFG3 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3548 addr=&core_if->core_global_regs->ghwcfg4;
3549 DWC_PRINT("GHWCFG4 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3550 addr=&core_if->core_global_regs->hptxfsiz;
3551 DWC_PRINT("HPTXFSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3552
3553 size=(core_if->hwcfg4.b.ded_fifo_en)?
3554 core_if->hwcfg4.b.num_in_eps:core_if->hwcfg4.b.num_dev_perio_in_ep;
3555 str=(core_if->hwcfg4.b.ded_fifo_en)?"DIEPTXF":"DPTXFSIZ";
3556 for (i=0; i<size; i++)
3557 {
3558 addr=&core_if->core_global_regs->dptxfsiz_dieptxf[i];
3559 DWC_PRINT("%s[%d] @0x%08X : 0x%08X\n",str,i,(uint32_t)addr,dwc_read_reg32(addr));
3560 }
3561 }
3562
3563 /**
3564 * Flush a Tx FIFO.
3565 *
3566 * @param core_if Programming view of DWC_otg controller.
3567 * @param num Tx FIFO to flush.
3568 */
3569 void dwc_otg_flush_tx_fifo(dwc_otg_core_if_t *core_if,
3570 const int num)
3571 {
3572 dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
3573 volatile grstctl_t greset = { .d32 = 0};
3574 int count = 0;
3575
3576 DWC_DEBUGPL((DBG_CIL|DBG_PCDV), "Flush Tx FIFO %d\n", num);
3577
3578 greset.b.txfflsh = 1;
3579 greset.b.txfnum = num;
3580 dwc_write_reg32(&global_regs->grstctl, greset.d32);
3581
3582 do {
3583 greset.d32 = dwc_read_reg32(&global_regs->grstctl);
3584 if (++count > 10000) {
3585 DWC_WARN("%s() HANG! GRSTCTL=%0x GNPTXSTS=0x%08x\n",
3586 __func__, greset.d32,
3587 dwc_read_reg32(&global_regs->gnptxsts));
3588 break;
3589 }
3590 udelay(1);
3591 }
3592 while (greset.b.txfflsh == 1);
3593
3594 /* Wait for 3 PHY Clocks*/
3595 UDELAY(1);
3596 }
3597
3598 /**
3599 * Flush Rx FIFO.
3600 *
3601 * @param core_if Programming view of DWC_otg controller.
3602 */
3603 void dwc_otg_flush_rx_fifo(dwc_otg_core_if_t *core_if)
3604 {
3605 dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
3606 volatile grstctl_t greset = { .d32 = 0};
3607 int count = 0;
3608
3609 DWC_DEBUGPL((DBG_CIL|DBG_PCDV), "%s\n", __func__);
3610 /*
3611 *
3612 */
3613 greset.b.rxfflsh = 1;
3614 dwc_write_reg32(&global_regs->grstctl, greset.d32);
3615
3616 do {
3617 greset.d32 = dwc_read_reg32(&global_regs->grstctl);
3618 if (++count > 10000) {
3619 DWC_WARN("%s() HANG! GRSTCTL=%0x\n", __func__,
3620 greset.d32);
3621 break;
3622 }
3623 udelay(1);
3624 }
3625 while (greset.b.rxfflsh == 1);
3626
3627 /* Wait for 3 PHY Clocks*/
3628 UDELAY(1);
3629 }
3630
3631 /**
3632 * Do core a soft reset of the core. Be careful with this because it
3633 * resets all the internal state machines of the core.
3634 */
3635 void dwc_otg_core_reset(dwc_otg_core_if_t *core_if)
3636 {
3637 dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
3638 volatile grstctl_t greset = { .d32 = 0};
3639 int count = 0;
3640
3641 DWC_DEBUGPL(DBG_CILV, "%s\n", __func__);
3642 /* Wait for AHB master IDLE state. */
3643 do {
3644 UDELAY(10);
3645 greset.d32 = dwc_read_reg32(&global_regs->grstctl);
3646 if (++count > 100000) {
3647 DWC_WARN("%s() HANG! AHB Idle GRSTCTL=%0x\n", __func__,
3648 greset.d32);
3649 return;
3650 }
3651 }
3652 while (greset.b.ahbidle == 0);
3653
3654 /* Core Soft Reset */
3655 count = 0;
3656 greset.b.csftrst = 1;
3657 dwc_write_reg32(&global_regs->grstctl, greset.d32);
3658 do {
3659 greset.d32 = dwc_read_reg32(&global_regs->grstctl);
3660 if (++count > 10000) {
3661 DWC_WARN("%s() HANG! Soft Reset GRSTCTL=%0x\n", __func__,
3662 greset.d32);
3663 break;
3664 }
3665 udelay(1);
3666 }
3667 while (greset.b.csftrst == 1);
3668
3669 /* Wait for 3 PHY Clocks*/
3670 MDELAY(100);
3671
3672 DWC_DEBUGPL(DBG_CILV, "GINTSTS=%.8x\n", dwc_read_reg32(&global_regs->gintsts));
3673 DWC_DEBUGPL(DBG_CILV, "GINTSTS=%.8x\n", dwc_read_reg32(&global_regs->gintsts));
3674 DWC_DEBUGPL(DBG_CILV, "GINTSTS=%.8x\n", dwc_read_reg32(&global_regs->gintsts));
3675
3676 }
3677
3678
3679
3680 /**
3681 * Register HCD callbacks. The callbacks are used to start and stop
3682 * the HCD for interrupt processing.
3683 *
3684 * @param core_if Programming view of DWC_otg controller.
3685 * @param cb the HCD callback structure.
3686 * @param p pointer to be passed to callback function (usb_hcd*).
3687 */
3688 void dwc_otg_cil_register_hcd_callbacks(dwc_otg_core_if_t *core_if,
3689 dwc_otg_cil_callbacks_t *cb,
3690 void *p)
3691 {
3692 core_if->hcd_cb = cb;
3693 cb->p = p;
3694 }
3695
3696 /**
3697 * Register PCD callbacks. The callbacks are used to start and stop
3698 * the PCD for interrupt processing.
3699 *
3700 * @param core_if Programming view of DWC_otg controller.
3701 * @param cb the PCD callback structure.
3702 * @param p pointer to be passed to callback function (pcd*).
3703 */
3704 void dwc_otg_cil_register_pcd_callbacks(dwc_otg_core_if_t *core_if,
3705 dwc_otg_cil_callbacks_t *cb,
3706 void *p)
3707 {
3708 core_if->pcd_cb = cb;
3709 cb->p = p;
3710 }
3711
3712 #ifdef DWC_EN_ISOC
3713
3714 /**
3715 * This function writes isoc data per 1 (micro)frame into tx fifo
3716 *
3717 * @param core_if Programming view of DWC_otg controller.
3718 * @param ep The EP to start the transfer on.
3719 *
3720 */
3721 void write_isoc_frame_data(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
3722 {
3723 dwc_otg_dev_in_ep_regs_t *ep_regs;
3724 dtxfsts_data_t txstatus = {.d32 = 0};
3725 uint32_t len = 0;
3726 uint32_t dwords;
3727
3728 ep->xfer_len = ep->data_per_frame;
3729 ep->xfer_count = 0;
3730
3731 ep_regs = core_if->dev_if->in_ep_regs[ep->num];
3732
3733 len = ep->xfer_len - ep->xfer_count;
3734
3735 if (len > ep->maxpacket) {
3736 len = ep->maxpacket;
3737 }
3738
3739 dwords = (len + 3)/4;
3740
3741 /* While there is space in the queue and space in the FIFO and
3742 * More data to tranfer, Write packets to the Tx FIFO */
3743 txstatus.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dtxfsts);
3744 DWC_DEBUGPL(DBG_PCDV, "b4 dtxfsts[%d]=0x%08x\n",ep->num,txstatus.d32);
3745
3746 while (txstatus.b.txfspcavail > dwords &&
3747 ep->xfer_count < ep->xfer_len &&
3748 ep->xfer_len != 0) {
3749 /* Write the FIFO */
3750 dwc_otg_ep_write_packet(core_if, ep, 0);
3751
3752 len = ep->xfer_len - ep->xfer_count;
3753 if (len > ep->maxpacket) {
3754 len = ep->maxpacket;
3755 }
3756
3757 dwords = (len + 3)/4;
3758 txstatus.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dtxfsts);
3759 DWC_DEBUGPL(DBG_PCDV,"dtxfsts[%d]=0x%08x\n", ep->num, txstatus.d32);
3760 }
3761 }
3762
3763
3764 /**
3765 * This function initializes a descriptor chain for Isochronous transfer
3766 *
3767 * @param core_if Programming view of DWC_otg controller.
3768 * @param ep The EP to start the transfer on.
3769 *
3770 */
3771 void dwc_otg_iso_ep_start_frm_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
3772 {
3773 deptsiz_data_t deptsiz = { .d32 = 0 };
3774 depctl_data_t depctl = { .d32 = 0 };
3775 dsts_data_t dsts = { .d32 = 0 };
3776 volatile uint32_t *addr;
3777
3778 if(ep->is_in) {
3779 addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl;
3780 } else {
3781 addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl;
3782 }
3783
3784 ep->xfer_len = ep->data_per_frame;
3785 ep->xfer_count = 0;
3786 ep->xfer_buff = ep->cur_pkt_addr;
3787 ep->dma_addr = ep->cur_pkt_dma_addr;
3788
3789 if(ep->is_in) {
3790 /* Program the transfer size and packet count
3791 * as follows: xfersize = N * maxpacket +
3792 * short_packet pktcnt = N + (short_packet
3793 * exist ? 1 : 0)
3794 */
3795 deptsiz.b.xfersize = ep->xfer_len;
3796 deptsiz.b.pktcnt =
3797 (ep->xfer_len - 1 + ep->maxpacket) /
3798 ep->maxpacket;
3799 deptsiz.b.mc = deptsiz.b.pktcnt;
3800 dwc_write_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dieptsiz, deptsiz.d32);
3801
3802 /* Write the DMA register */
3803 if (core_if->dma_enable) {
3804 dwc_write_reg32 (&(core_if->dev_if->in_ep_regs[ep->num]->diepdma), (uint32_t)ep->dma_addr);
3805 }
3806 } else {
3807 deptsiz.b.pktcnt =
3808 (ep->xfer_len + (ep->maxpacket - 1)) /
3809 ep->maxpacket;
3810 deptsiz.b.xfersize = deptsiz.b.pktcnt * ep->maxpacket;
3811
3812 dwc_write_reg32(&core_if->dev_if->out_ep_regs[ep->num]->doeptsiz, deptsiz.d32);
3813
3814 if (core_if->dma_enable) {
3815 dwc_write_reg32 (&(core_if->dev_if->out_ep_regs[ep->num]->doepdma),
3816 (uint32_t)ep->dma_addr);
3817 }
3818 }
3819
3820
3821 /** Enable endpoint, clear nak */
3822
3823 depctl.d32 = 0;
3824 if(ep->bInterval == 1) {
3825 dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts);
3826 ep->next_frame = dsts.b.soffn + ep->bInterval;
3827
3828 if(ep->next_frame & 0x1) {
3829 depctl.b.setd1pid = 1;
3830 } else {
3831 depctl.b.setd0pid = 1;
3832 }
3833 } else {
3834 ep->next_frame += ep->bInterval;
3835
3836 if(ep->next_frame & 0x1) {
3837 depctl.b.setd1pid = 1;
3838 } else {
3839 depctl.b.setd0pid = 1;
3840 }
3841 }
3842 depctl.b.epena = 1;
3843 depctl.b.cnak = 1;
3844
3845 dwc_modify_reg32(addr, 0, depctl.d32);
3846 depctl.d32 = dwc_read_reg32(addr);
3847
3848 if(ep->is_in && core_if->dma_enable == 0) {
3849 write_isoc_frame_data(core_if, ep);
3850 }
3851
3852 }
3853
3854 #endif //DWC_EN_ISOC