[ramips] add feature gpio
[openwrt/svn-archive/archive.git] / target / linux / ramips / files / drivers / usb / dwc_otg / dwc_otg_cil.c
1 /* ==========================================================================
2 * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_cil.c $
3 * $Revision: 1.7 $
4 * $Date: 2008-12-22 11:43:05 $
5 * $Change: 1117667 $
6 *
7 * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
8 * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
9 * otherwise expressly agreed to in writing between Synopsys and you.
10 *
11 * The Software IS NOT an item of Licensed Software or Licensed Product under
12 * any End User Software License Agreement or Agreement for Licensed Product
13 * with Synopsys or any supplement thereto. You are permitted to use and
14 * redistribute this Software in source and binary forms, with or without
15 * modification, provided that redistributions of source code must retain this
16 * notice. You may not view, use, disclose, copy or distribute this file or
17 * any information contained herein except pursuant to this license grant from
18 * Synopsys. If you do not agree with this notice, including the disclaimer
19 * below, then you are not authorized to use the Software.
20 *
21 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
25 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
31 * DAMAGE.
32 * ========================================================================== */
33
34 /** @file
35 *
36 * The Core Interface Layer provides basic services for accessing and
37 * managing the DWC_otg hardware. These services are used by both the
38 * Host Controller Driver and the Peripheral Controller Driver.
39 *
40 * The CIL manages the memory map for the core so that the HCD and PCD
41 * don't have to do this separately. It also handles basic tasks like
42 * reading/writing the registers and data FIFOs in the controller.
43 * Some of the data access functions provide encapsulation of several
44 * operations required to perform a task, such as writing multiple
45 * registers to start a transfer. Finally, the CIL performs basic
46 * services that are not specific to either the host or device modes
47 * of operation. These services include management of the OTG Host
48 * Negotiation Protocol (HNP) and Session Request Protocol (SRP). A
49 * Diagnostic API is also provided to allow testing of the controller
50 * hardware.
51 *
52 * The Core Interface Layer has the following requirements:
53 * - Provides basic controller operations.
54 * - Minimal use of OS services.
55 * - The OS services used will be abstracted by using inline functions
56 * or macros.
57 *
58 */
59 #include <asm/unaligned.h>
60 #include <linux/dma-mapping.h>
61 #ifdef DEBUG
62 #include <linux/jiffies.h>
63 #endif
64
65 #include "linux/dwc_otg_plat.h"
66 #include "dwc_otg_regs.h"
67 #include "dwc_otg_cil.h"
68
69 /* Included only to access hc->qh for non-dword buffer handling
70 * TODO: account it
71 */
72 #include "dwc_otg_hcd.h"
73
74 /**
75 * This function is called to initialize the DWC_otg CSR data
76 * structures. The register addresses in the device and host
77 * structures are initialized from the base address supplied by the
78 * caller. The calling function must make the OS calls to get the
79 * base address of the DWC_otg controller registers. The core_params
80 * argument holds the parameters that specify how the core should be
81 * configured.
82 *
83 * @param[in] reg_base_addr Base address of DWC_otg core registers
84 * @param[in] core_params Pointer to the core configuration parameters
85 *
86 */
87 dwc_otg_core_if_t *dwc_otg_cil_init(const uint32_t *reg_base_addr,
88 dwc_otg_core_params_t *core_params)
89 {
90 dwc_otg_core_if_t *core_if = 0;
91 dwc_otg_dev_if_t *dev_if = 0;
92 dwc_otg_host_if_t *host_if = 0;
93 uint8_t *reg_base = (uint8_t *)reg_base_addr;
94 int i = 0;
95
96 DWC_DEBUGPL(DBG_CILV, "%s(%p,%p)\n", __func__, reg_base_addr, core_params);
97
98 core_if = kmalloc(sizeof(dwc_otg_core_if_t), GFP_KERNEL);
99
100 if (core_if == 0) {
101 DWC_DEBUGPL(DBG_CIL, "Allocation of dwc_otg_core_if_t failed\n");
102 return 0;
103 }
104
105 memset(core_if, 0, sizeof(dwc_otg_core_if_t));
106
107 core_if->core_params = core_params;
108 core_if->core_global_regs = (dwc_otg_core_global_regs_t *)reg_base;
109
110 /*
111 * Allocate the Device Mode structures.
112 */
113 dev_if = kmalloc(sizeof(dwc_otg_dev_if_t), GFP_KERNEL);
114
115 if (dev_if == 0) {
116 DWC_DEBUGPL(DBG_CIL, "Allocation of dwc_otg_dev_if_t failed\n");
117 kfree(core_if);
118 return 0;
119 }
120
121 dev_if->dev_global_regs =
122 (dwc_otg_device_global_regs_t *)(reg_base + DWC_DEV_GLOBAL_REG_OFFSET);
123
124 for (i=0; i<MAX_EPS_CHANNELS; i++)
125 {
126 dev_if->in_ep_regs[i] = (dwc_otg_dev_in_ep_regs_t *)
127 (reg_base + DWC_DEV_IN_EP_REG_OFFSET +
128 (i * DWC_EP_REG_OFFSET));
129
130 dev_if->out_ep_regs[i] = (dwc_otg_dev_out_ep_regs_t *)
131 (reg_base + DWC_DEV_OUT_EP_REG_OFFSET +
132 (i * DWC_EP_REG_OFFSET));
133 DWC_DEBUGPL(DBG_CILV, "in_ep_regs[%d]->diepctl=%p\n",
134 i, &dev_if->in_ep_regs[i]->diepctl);
135 DWC_DEBUGPL(DBG_CILV, "out_ep_regs[%d]->doepctl=%p\n",
136 i, &dev_if->out_ep_regs[i]->doepctl);
137 }
138
139 dev_if->speed = 0; // unknown
140
141 core_if->dev_if = dev_if;
142
143 /*
144 * Allocate the Host Mode structures.
145 */
146 host_if = kmalloc(sizeof(dwc_otg_host_if_t), GFP_KERNEL);
147
148 if (host_if == 0) {
149 DWC_DEBUGPL(DBG_CIL, "Allocation of dwc_otg_host_if_t failed\n");
150 kfree(dev_if);
151 kfree(core_if);
152 return 0;
153 }
154
155 host_if->host_global_regs = (dwc_otg_host_global_regs_t *)
156 (reg_base + DWC_OTG_HOST_GLOBAL_REG_OFFSET);
157
158 host_if->hprt0 = (uint32_t*)(reg_base + DWC_OTG_HOST_PORT_REGS_OFFSET);
159
160 for (i=0; i<MAX_EPS_CHANNELS; i++)
161 {
162 host_if->hc_regs[i] = (dwc_otg_hc_regs_t *)
163 (reg_base + DWC_OTG_HOST_CHAN_REGS_OFFSET +
164 (i * DWC_OTG_CHAN_REGS_OFFSET));
165 DWC_DEBUGPL(DBG_CILV, "hc_reg[%d]->hcchar=%p\n",
166 i, &host_if->hc_regs[i]->hcchar);
167 }
168
169 host_if->num_host_channels = MAX_EPS_CHANNELS;
170 core_if->host_if = host_if;
171
172 for (i=0; i<MAX_EPS_CHANNELS; i++)
173 {
174 core_if->data_fifo[i] =
175 (uint32_t *)(reg_base + DWC_OTG_DATA_FIFO_OFFSET +
176 (i * DWC_OTG_DATA_FIFO_SIZE));
177 DWC_DEBUGPL(DBG_CILV, "data_fifo[%d]=0x%08x\n",
178 i, (unsigned)core_if->data_fifo[i]);
179 }
180
181 core_if->pcgcctl = (uint32_t*)(reg_base + DWC_OTG_PCGCCTL_OFFSET);
182
183 /*
184 * Store the contents of the hardware configuration registers here for
185 * easy access later.
186 */
187 core_if->hwcfg1.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg1);
188 core_if->hwcfg2.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg2);
189 core_if->hwcfg3.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg3);
190 core_if->hwcfg4.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg4);
191
192 DWC_DEBUGPL(DBG_CILV,"hwcfg1=%08x\n",core_if->hwcfg1.d32);
193 DWC_DEBUGPL(DBG_CILV,"hwcfg2=%08x\n",core_if->hwcfg2.d32);
194 DWC_DEBUGPL(DBG_CILV,"hwcfg3=%08x\n",core_if->hwcfg3.d32);
195 DWC_DEBUGPL(DBG_CILV,"hwcfg4=%08x\n",core_if->hwcfg4.d32);
196
197 core_if->hcfg.d32 = dwc_read_reg32(&core_if->host_if->host_global_regs->hcfg);
198 core_if->dcfg.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dcfg);
199
200 DWC_DEBUGPL(DBG_CILV,"hcfg=%08x\n",core_if->hcfg.d32);
201 DWC_DEBUGPL(DBG_CILV,"dcfg=%08x\n",core_if->dcfg.d32);
202
203 DWC_DEBUGPL(DBG_CILV,"op_mode=%0x\n",core_if->hwcfg2.b.op_mode);
204 DWC_DEBUGPL(DBG_CILV,"arch=%0x\n",core_if->hwcfg2.b.architecture);
205 DWC_DEBUGPL(DBG_CILV,"num_dev_ep=%d\n",core_if->hwcfg2.b.num_dev_ep);
206 DWC_DEBUGPL(DBG_CILV,"num_host_chan=%d\n",core_if->hwcfg2.b.num_host_chan);
207 DWC_DEBUGPL(DBG_CILV,"nonperio_tx_q_depth=0x%0x\n",core_if->hwcfg2.b.nonperio_tx_q_depth);
208 DWC_DEBUGPL(DBG_CILV,"host_perio_tx_q_depth=0x%0x\n",core_if->hwcfg2.b.host_perio_tx_q_depth);
209 DWC_DEBUGPL(DBG_CILV,"dev_token_q_depth=0x%0x\n",core_if->hwcfg2.b.dev_token_q_depth);
210
211 DWC_DEBUGPL(DBG_CILV,"Total FIFO SZ=%d\n", core_if->hwcfg3.b.dfifo_depth);
212 DWC_DEBUGPL(DBG_CILV,"xfer_size_cntr_width=%0x\n", core_if->hwcfg3.b.xfer_size_cntr_width);
213
214 /*
215 * Set the SRP sucess bit for FS-I2c
216 */
217 core_if->srp_success = 0;
218 core_if->srp_timer_started = 0;
219
220
221 /*
222 * Create new workqueue and init works
223 */
224 core_if->wq_otg = create_singlethread_workqueue("dwc_otg");
225 if(core_if->wq_otg == 0) {
226 DWC_DEBUGPL(DBG_CIL, "Creation of wq_otg failed\n");
227 kfree(host_if);
228 kfree(dev_if);
229 kfree(core_if);
230 return 0 * HZ;
231 }
232
233
234
235 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
236
237 INIT_WORK(&core_if->w_conn_id, w_conn_id_status_change, core_if);
238 INIT_WORK(&core_if->w_wkp, w_wakeup_detected, core_if);
239
240 #else
241
242 INIT_WORK(&core_if->w_conn_id, w_conn_id_status_change);
243 INIT_DELAYED_WORK(&core_if->w_wkp, w_wakeup_detected);
244
245 #endif
246 return core_if;
247 }
248
249 /**
250 * This function frees the structures allocated by dwc_otg_cil_init().
251 *
252 * @param[in] core_if The core interface pointer returned from
253 * dwc_otg_cil_init().
254 *
255 */
256 void dwc_otg_cil_remove(dwc_otg_core_if_t *core_if)
257 {
258 /* Disable all interrupts */
259 dwc_modify_reg32(&core_if->core_global_regs->gahbcfg, 1, 0);
260 dwc_write_reg32(&core_if->core_global_regs->gintmsk, 0);
261
262 if (core_if->wq_otg) {
263 destroy_workqueue(core_if->wq_otg);
264 }
265 if (core_if->dev_if) {
266 kfree(core_if->dev_if);
267 }
268 if (core_if->host_if) {
269 kfree(core_if->host_if);
270 }
271 kfree(core_if);
272 }
273
274 /**
275 * This function enables the controller's Global Interrupt in the AHB Config
276 * register.
277 *
278 * @param[in] core_if Programming view of DWC_otg controller.
279 */
280 void dwc_otg_enable_global_interrupts(dwc_otg_core_if_t *core_if)
281 {
282 gahbcfg_data_t ahbcfg = { .d32 = 0};
283 ahbcfg.b.glblintrmsk = 1; /* Enable interrupts */
284 dwc_modify_reg32(&core_if->core_global_regs->gahbcfg, 0, ahbcfg.d32);
285 }
286
287 /**
288 * This function disables the controller's Global Interrupt in the AHB Config
289 * register.
290 *
291 * @param[in] core_if Programming view of DWC_otg controller.
292 */
293 void dwc_otg_disable_global_interrupts(dwc_otg_core_if_t *core_if)
294 {
295 gahbcfg_data_t ahbcfg = { .d32 = 0};
296 ahbcfg.b.glblintrmsk = 1; /* Enable interrupts */
297 dwc_modify_reg32(&core_if->core_global_regs->gahbcfg, ahbcfg.d32, 0);
298 }
299
300 /**
301 * This function initializes the commmon interrupts, used in both
302 * device and host modes.
303 *
304 * @param[in] core_if Programming view of the DWC_otg controller
305 *
306 */
307 static void dwc_otg_enable_common_interrupts(dwc_otg_core_if_t *core_if)
308 {
309 dwc_otg_core_global_regs_t *global_regs =
310 core_if->core_global_regs;
311 gintmsk_data_t intr_mask = { .d32 = 0};
312
313 /* Clear any pending OTG Interrupts */
314 dwc_write_reg32(&global_regs->gotgint, 0xFFFFFFFF);
315
316 /* Clear any pending interrupts */
317 dwc_write_reg32(&global_regs->gintsts, 0xFFFFFFFF);
318
319 /*
320 * Enable the interrupts in the GINTMSK.
321 */
322 intr_mask.b.modemismatch = 1;
323 intr_mask.b.otgintr = 1;
324
325 if (!core_if->dma_enable) {
326 intr_mask.b.rxstsqlvl = 1;
327 }
328
329 intr_mask.b.conidstschng = 1;
330 intr_mask.b.wkupintr = 1;
331 intr_mask.b.disconnect = 1;
332 intr_mask.b.usbsuspend = 1;
333 intr_mask.b.sessreqintr = 1;
334 dwc_write_reg32(&global_regs->gintmsk, intr_mask.d32);
335 }
336
337 /**
338 * Initializes the FSLSPClkSel field of the HCFG register depending on the PHY
339 * type.
340 */
341 static void init_fslspclksel(dwc_otg_core_if_t *core_if)
342 {
343 uint32_t val;
344 hcfg_data_t hcfg;
345
346 if (((core_if->hwcfg2.b.hs_phy_type == 2) &&
347 (core_if->hwcfg2.b.fs_phy_type == 1) &&
348 (core_if->core_params->ulpi_fs_ls)) ||
349 (core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS)) {
350 /* Full speed PHY */
351 val = DWC_HCFG_48_MHZ;
352 }
353 else {
354 /* High speed PHY running at full speed or high speed */
355 val = DWC_HCFG_30_60_MHZ;
356 }
357
358 DWC_DEBUGPL(DBG_CIL, "Initializing HCFG.FSLSPClkSel to 0x%1x\n", val);
359 hcfg.d32 = dwc_read_reg32(&core_if->host_if->host_global_regs->hcfg);
360 hcfg.b.fslspclksel = val;
361 dwc_write_reg32(&core_if->host_if->host_global_regs->hcfg, hcfg.d32);
362 }
363
364 /**
365 * Initializes the DevSpd field of the DCFG register depending on the PHY type
366 * and the enumeration speed of the device.
367 */
368 static void init_devspd(dwc_otg_core_if_t *core_if)
369 {
370 uint32_t val;
371 dcfg_data_t dcfg;
372
373 if (((core_if->hwcfg2.b.hs_phy_type == 2) &&
374 (core_if->hwcfg2.b.fs_phy_type == 1) &&
375 (core_if->core_params->ulpi_fs_ls)) ||
376 (core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS)) {
377 /* Full speed PHY */
378 val = 0x3;
379 }
380 else if (core_if->core_params->speed == DWC_SPEED_PARAM_FULL) {
381 /* High speed PHY running at full speed */
382 val = 0x1;
383 }
384 else {
385 /* High speed PHY running at high speed */
386 val = 0x0;
387 }
388
389 DWC_DEBUGPL(DBG_CIL, "Initializing DCFG.DevSpd to 0x%1x\n", val);
390
391 dcfg.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dcfg);
392 dcfg.b.devspd = val;
393 dwc_write_reg32(&core_if->dev_if->dev_global_regs->dcfg, dcfg.d32);
394 }
395
396 /**
397 * This function calculates the number of IN EPS
398 * using GHWCFG1 and GHWCFG2 registers values
399 *
400 * @param core_if Programming view of the DWC_otg controller
401 */
402 static uint32_t calc_num_in_eps(dwc_otg_core_if_t *core_if)
403 {
404 uint32_t num_in_eps = 0;
405 uint32_t num_eps = core_if->hwcfg2.b.num_dev_ep;
406 uint32_t hwcfg1 = core_if->hwcfg1.d32 >> 3;
407 uint32_t num_tx_fifos = core_if->hwcfg4.b.num_in_eps;
408 int i;
409
410
411 for(i = 0; i < num_eps; ++i)
412 {
413 if(!(hwcfg1 & 0x1))
414 num_in_eps++;
415
416 hwcfg1 >>= 2;
417 }
418
419 if(core_if->hwcfg4.b.ded_fifo_en) {
420 num_in_eps = (num_in_eps > num_tx_fifos) ? num_tx_fifos : num_in_eps;
421 }
422
423 return num_in_eps;
424 }
425
426
427 /**
428 * This function calculates the number of OUT EPS
429 * using GHWCFG1 and GHWCFG2 registers values
430 *
431 * @param core_if Programming view of the DWC_otg controller
432 */
433 static uint32_t calc_num_out_eps(dwc_otg_core_if_t *core_if)
434 {
435 uint32_t num_out_eps = 0;
436 uint32_t num_eps = core_if->hwcfg2.b.num_dev_ep;
437 uint32_t hwcfg1 = core_if->hwcfg1.d32 >> 2;
438 int i;
439
440 for(i = 0; i < num_eps; ++i)
441 {
442 if(!(hwcfg1 & 0x2))
443 num_out_eps++;
444
445 hwcfg1 >>= 2;
446 }
447 return num_out_eps;
448 }
449 /**
450 * This function initializes the DWC_otg controller registers and
451 * prepares the core for device mode or host mode operation.
452 *
453 * @param core_if Programming view of the DWC_otg controller
454 *
455 */
456 void dwc_otg_core_init(dwc_otg_core_if_t *core_if)
457 {
458 int i = 0;
459 dwc_otg_core_global_regs_t *global_regs =
460 core_if->core_global_regs;
461 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
462 gahbcfg_data_t ahbcfg = { .d32 = 0 };
463 gusbcfg_data_t usbcfg = { .d32 = 0 };
464 gi2cctl_data_t i2cctl = { .d32 = 0 };
465
466 DWC_DEBUGPL(DBG_CILV, "dwc_otg_core_init(%p)\n", core_if);
467
468 /* Common Initialization */
469
470 usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
471
472 // usbcfg.b.tx_end_delay = 1;
473 /* Program the ULPI External VBUS bit if needed */
474 usbcfg.b.ulpi_ext_vbus_drv =
475 (core_if->core_params->phy_ulpi_ext_vbus == DWC_PHY_ULPI_EXTERNAL_VBUS) ? 1 : 0;
476
477 /* Set external TS Dline pulsing */
478 usbcfg.b.term_sel_dl_pulse = (core_if->core_params->ts_dline == 1) ? 1 : 0;
479 dwc_write_reg32 (&global_regs->gusbcfg, usbcfg.d32);
480
481
482 /* Reset the Controller */
483 dwc_otg_core_reset(core_if);
484
485 /* Initialize parameters from Hardware configuration registers. */
486 dev_if->num_in_eps = calc_num_in_eps(core_if);
487 dev_if->num_out_eps = calc_num_out_eps(core_if);
488
489
490 DWC_DEBUGPL(DBG_CIL, "num_dev_perio_in_ep=%d\n", core_if->hwcfg4.b.num_dev_perio_in_ep);
491
492 for (i=0; i < core_if->hwcfg4.b.num_dev_perio_in_ep; i++)
493 {
494 dev_if->perio_tx_fifo_size[i] =
495 dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i]) >> 16;
496 DWC_DEBUGPL(DBG_CIL, "Periodic Tx FIFO SZ #%d=0x%0x\n",
497 i, dev_if->perio_tx_fifo_size[i]);
498 }
499
500 for (i=0; i < core_if->hwcfg4.b.num_in_eps; i++)
501 {
502 dev_if->tx_fifo_size[i] =
503 dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i]) >> 16;
504 DWC_DEBUGPL(DBG_CIL, "Tx FIFO SZ #%d=0x%0x\n",
505 i, dev_if->perio_tx_fifo_size[i]);
506 }
507
508 core_if->total_fifo_size = core_if->hwcfg3.b.dfifo_depth;
509 core_if->rx_fifo_size =
510 dwc_read_reg32(&global_regs->grxfsiz);
511 core_if->nperio_tx_fifo_size =
512 dwc_read_reg32(&global_regs->gnptxfsiz) >> 16;
513
514 DWC_DEBUGPL(DBG_CIL, "Total FIFO SZ=%d\n", core_if->total_fifo_size);
515 DWC_DEBUGPL(DBG_CIL, "Rx FIFO SZ=%d\n", core_if->rx_fifo_size);
516 DWC_DEBUGPL(DBG_CIL, "NP Tx FIFO SZ=%d\n", core_if->nperio_tx_fifo_size);
517
518 /* This programming sequence needs to happen in FS mode before any other
519 * programming occurs */
520 if ((core_if->core_params->speed == DWC_SPEED_PARAM_FULL) &&
521 (core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS)) {
522 /* If FS mode with FS PHY */
523
524 /* core_init() is now called on every switch so only call the
525 * following for the first time through. */
526 if (!core_if->phy_init_done) {
527 core_if->phy_init_done = 1;
528 DWC_DEBUGPL(DBG_CIL, "FS_PHY detected\n");
529 usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
530 usbcfg.b.physel = 1;
531 dwc_write_reg32 (&global_regs->gusbcfg, usbcfg.d32);
532
533 /* Reset after a PHY select */
534 dwc_otg_core_reset(core_if);
535 }
536
537 /* Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also
538 * do this on HNP Dev/Host mode switches (done in dev_init and
539 * host_init). */
540 if (dwc_otg_is_host_mode(core_if)) {
541 init_fslspclksel(core_if);
542 }
543 else {
544 init_devspd(core_if);
545 }
546
547 if (core_if->core_params->i2c_enable) {
548 DWC_DEBUGPL(DBG_CIL, "FS_PHY Enabling I2c\n");
549 /* Program GUSBCFG.OtgUtmifsSel to I2C */
550 usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
551 usbcfg.b.otgutmifssel = 1;
552 dwc_write_reg32 (&global_regs->gusbcfg, usbcfg.d32);
553
554 /* Program GI2CCTL.I2CEn */
555 i2cctl.d32 = dwc_read_reg32(&global_regs->gi2cctl);
556 i2cctl.b.i2cdevaddr = 1;
557 i2cctl.b.i2cen = 0;
558 dwc_write_reg32 (&global_regs->gi2cctl, i2cctl.d32);
559 i2cctl.b.i2cen = 1;
560 dwc_write_reg32 (&global_regs->gi2cctl, i2cctl.d32);
561 }
562
563 } /* endif speed == DWC_SPEED_PARAM_FULL */
564
565 else {
566 /* High speed PHY. */
567 if (!core_if->phy_init_done) {
568 core_if->phy_init_done = 1;
569 /* HS PHY parameters. These parameters are preserved
570 * during soft reset so only program the first time. Do
571 * a soft reset immediately after setting phyif. */
572 usbcfg.b.ulpi_utmi_sel = core_if->core_params->phy_type;
573 if (usbcfg.b.ulpi_utmi_sel == 1) {
574 /* ULPI interface */
575 usbcfg.b.phyif = 0;
576 usbcfg.b.ddrsel = core_if->core_params->phy_ulpi_ddr;
577 }
578 else {
579 /* UTMI+ interface */
580 if (core_if->core_params->phy_utmi_width == 16) {
581 usbcfg.b.phyif = 1;
582 }
583 else {
584 usbcfg.b.phyif = 0;
585 }
586 }
587
588 dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32);
589
590 /* Reset after setting the PHY parameters */
591 dwc_otg_core_reset(core_if);
592 }
593 }
594
595 if ((core_if->hwcfg2.b.hs_phy_type == 2) &&
596 (core_if->hwcfg2.b.fs_phy_type == 1) &&
597 (core_if->core_params->ulpi_fs_ls)) {
598 DWC_DEBUGPL(DBG_CIL, "Setting ULPI FSLS\n");
599 usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
600 usbcfg.b.ulpi_fsls = 1;
601 usbcfg.b.ulpi_clk_sus_m = 1;
602 dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32);
603 }
604 else {
605 usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
606 usbcfg.b.ulpi_fsls = 0;
607 usbcfg.b.ulpi_clk_sus_m = 0;
608 dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32);
609 }
610
611 /* Program the GAHBCFG Register.*/
612 switch (core_if->hwcfg2.b.architecture) {
613
614 case DWC_SLAVE_ONLY_ARCH:
615 DWC_DEBUGPL(DBG_CIL, "Slave Only Mode\n");
616 ahbcfg.b.nptxfemplvl_txfemplvl = DWC_GAHBCFG_TXFEMPTYLVL_HALFEMPTY;
617 ahbcfg.b.ptxfemplvl = DWC_GAHBCFG_TXFEMPTYLVL_HALFEMPTY;
618 core_if->dma_enable = 0;
619 core_if->dma_desc_enable = 0;
620 break;
621
622 case DWC_EXT_DMA_ARCH:
623 DWC_DEBUGPL(DBG_CIL, "External DMA Mode\n");
624 ahbcfg.b.hburstlen = core_if->core_params->dma_burst_size;
625 core_if->dma_enable = (core_if->core_params->dma_enable != 0);
626 core_if->dma_desc_enable = (core_if->core_params->dma_desc_enable != 0);
627 break;
628
629 case DWC_INT_DMA_ARCH:
630 DWC_DEBUGPL(DBG_CIL, "Internal DMA Mode\n");
631 ahbcfg.b.hburstlen = DWC_GAHBCFG_INT_DMA_BURST_INCR;
632 core_if->dma_enable = (core_if->core_params->dma_enable != 0);
633 core_if->dma_desc_enable = (core_if->core_params->dma_desc_enable != 0);
634 break;
635
636 }
637 ahbcfg.b.dmaenable = core_if->dma_enable;
638 dwc_write_reg32(&global_regs->gahbcfg, ahbcfg.d32);
639
640 core_if->en_multiple_tx_fifo = core_if->hwcfg4.b.ded_fifo_en;
641
642 core_if->pti_enh_enable = core_if->core_params->pti_enable != 0;
643 core_if->multiproc_int_enable = core_if->core_params->mpi_enable;
644 DWC_PRINT("Periodic Transfer Interrupt Enhancement - %s\n", ((core_if->pti_enh_enable) ? "enabled": "disabled"));
645 DWC_PRINT("Multiprocessor Interrupt Enhancement - %s\n", ((core_if->multiproc_int_enable) ? "enabled": "disabled"));
646
647 /*
648 * Program the GUSBCFG register.
649 */
650 usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
651
652 switch (core_if->hwcfg2.b.op_mode) {
653 case DWC_MODE_HNP_SRP_CAPABLE:
654 usbcfg.b.hnpcap = (core_if->core_params->otg_cap ==
655 DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE);
656 usbcfg.b.srpcap = (core_if->core_params->otg_cap !=
657 DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE);
658 break;
659
660 case DWC_MODE_SRP_ONLY_CAPABLE:
661 usbcfg.b.hnpcap = 0;
662 usbcfg.b.srpcap = (core_if->core_params->otg_cap !=
663 DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE);
664 break;
665
666 case DWC_MODE_NO_HNP_SRP_CAPABLE:
667 usbcfg.b.hnpcap = 0;
668 usbcfg.b.srpcap = 0;
669 break;
670
671 case DWC_MODE_SRP_CAPABLE_DEVICE:
672 usbcfg.b.hnpcap = 0;
673 usbcfg.b.srpcap = (core_if->core_params->otg_cap !=
674 DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE);
675 break;
676
677 case DWC_MODE_NO_SRP_CAPABLE_DEVICE:
678 usbcfg.b.hnpcap = 0;
679 usbcfg.b.srpcap = 0;
680 break;
681
682 case DWC_MODE_SRP_CAPABLE_HOST:
683 usbcfg.b.hnpcap = 0;
684 usbcfg.b.srpcap = (core_if->core_params->otg_cap !=
685 DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE);
686 break;
687
688 case DWC_MODE_NO_SRP_CAPABLE_HOST:
689 usbcfg.b.hnpcap = 0;
690 usbcfg.b.srpcap = 0;
691 break;
692 }
693
694 dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32);
695
696 /* Enable common interrupts */
697 dwc_otg_enable_common_interrupts(core_if);
698
699 /* Do device or host intialization based on mode during PCD
700 * and HCD initialization */
701 if (dwc_otg_is_host_mode(core_if)) {
702 DWC_DEBUGPL(DBG_ANY, "Host Mode\n");
703 core_if->op_state = A_HOST;
704 }
705 else {
706 DWC_DEBUGPL(DBG_ANY, "Device Mode\n");
707 core_if->op_state = B_PERIPHERAL;
708 #ifdef DWC_DEVICE_ONLY
709 dwc_otg_core_dev_init(core_if);
710 #endif
711 }
712 }
713
714
715 /**
716 * This function enables the Device mode interrupts.
717 *
718 * @param core_if Programming view of DWC_otg controller
719 */
720 void dwc_otg_enable_device_interrupts(dwc_otg_core_if_t *core_if)
721 {
722 gintmsk_data_t intr_mask = { .d32 = 0};
723 dwc_otg_core_global_regs_t *global_regs =
724 core_if->core_global_regs;
725
726 DWC_DEBUGPL(DBG_CIL, "%s()\n", __func__);
727
728 /* Disable all interrupts. */
729 dwc_write_reg32(&global_regs->gintmsk, 0);
730
731 /* Clear any pending interrupts */
732 dwc_write_reg32(&global_regs->gintsts, 0xFFFFFFFF);
733
734 /* Enable the common interrupts */
735 dwc_otg_enable_common_interrupts(core_if);
736
737 /* Enable interrupts */
738 intr_mask.b.usbreset = 1;
739 intr_mask.b.enumdone = 1;
740
741 if(!core_if->multiproc_int_enable) {
742 intr_mask.b.inepintr = 1;
743 intr_mask.b.outepintr = 1;
744 }
745
746 intr_mask.b.erlysuspend = 1;
747
748 if(core_if->en_multiple_tx_fifo == 0) {
749 intr_mask.b.epmismatch = 1;
750 }
751
752
753 #ifdef DWC_EN_ISOC
754 if(core_if->dma_enable) {
755 if(core_if->dma_desc_enable == 0) {
756 if(core_if->pti_enh_enable) {
757 dctl_data_t dctl = { .d32 = 0 };
758 dctl.b.ifrmnum = 1;
759 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dctl, 0, dctl.d32);
760 } else {
761 intr_mask.b.incomplisoin = 1;
762 intr_mask.b.incomplisoout = 1;
763 }
764 }
765 } else {
766 intr_mask.b.incomplisoin = 1;
767 intr_mask.b.incomplisoout = 1;
768 }
769 #endif // DWC_EN_ISOC
770
771 /** @todo NGS: Should this be a module parameter? */
772 #ifdef USE_PERIODIC_EP
773 intr_mask.b.isooutdrop = 1;
774 intr_mask.b.eopframe = 1;
775 intr_mask.b.incomplisoin = 1;
776 intr_mask.b.incomplisoout = 1;
777 #endif
778
779 dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, intr_mask.d32);
780
781 DWC_DEBUGPL(DBG_CIL, "%s() gintmsk=%0x\n", __func__,
782 dwc_read_reg32(&global_regs->gintmsk));
783 }
784
785 /**
786 * This function initializes the DWC_otg controller registers for
787 * device mode.
788 *
789 * @param core_if Programming view of DWC_otg controller
790 *
791 */
792 void dwc_otg_core_dev_init(dwc_otg_core_if_t *core_if)
793 {
794 int i;
795 dwc_otg_core_global_regs_t *global_regs =
796 core_if->core_global_regs;
797 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
798 dwc_otg_core_params_t *params = core_if->core_params;
799 dcfg_data_t dcfg = { .d32 = 0};
800 grstctl_t resetctl = { .d32 = 0 };
801 uint32_t rx_fifo_size;
802 fifosize_data_t nptxfifosize;
803 fifosize_data_t txfifosize;
804 dthrctl_data_t dthrctl;
805 fifosize_data_t ptxfifosize;
806
807 /* Restart the Phy Clock */
808 dwc_write_reg32(core_if->pcgcctl, 0);
809
810 /* Device configuration register */
811 init_devspd(core_if);
812 dcfg.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dcfg);
813 dcfg.b.descdma = (core_if->dma_desc_enable) ? 1 : 0;
814 dcfg.b.perfrint = DWC_DCFG_FRAME_INTERVAL_80;
815
816 dwc_write_reg32(&dev_if->dev_global_regs->dcfg, dcfg.d32);
817
818 /* Configure data FIFO sizes */
819 if (core_if->hwcfg2.b.dynamic_fifo && params->enable_dynamic_fifo) {
820 DWC_DEBUGPL(DBG_CIL, "Total FIFO Size=%d\n", core_if->total_fifo_size);
821 DWC_DEBUGPL(DBG_CIL, "Rx FIFO Size=%d\n", params->dev_rx_fifo_size);
822 DWC_DEBUGPL(DBG_CIL, "NP Tx FIFO Size=%d\n", params->dev_nperio_tx_fifo_size);
823
824 /* Rx FIFO */
825 DWC_DEBUGPL(DBG_CIL, "initial grxfsiz=%08x\n",
826 dwc_read_reg32(&global_regs->grxfsiz));
827
828 rx_fifo_size = params->dev_rx_fifo_size;
829 dwc_write_reg32(&global_regs->grxfsiz, rx_fifo_size);
830
831 DWC_DEBUGPL(DBG_CIL, "new grxfsiz=%08x\n",
832 dwc_read_reg32(&global_regs->grxfsiz));
833
834 /** Set Periodic Tx FIFO Mask all bits 0 */
835 core_if->p_tx_msk = 0;
836
837 /** Set Tx FIFO Mask all bits 0 */
838 core_if->tx_msk = 0;
839
840 if(core_if->en_multiple_tx_fifo == 0) {
841 /* Non-periodic Tx FIFO */
842 DWC_DEBUGPL(DBG_CIL, "initial gnptxfsiz=%08x\n",
843 dwc_read_reg32(&global_regs->gnptxfsiz));
844
845 nptxfifosize.b.depth = params->dev_nperio_tx_fifo_size;
846 nptxfifosize.b.startaddr = params->dev_rx_fifo_size;
847
848 dwc_write_reg32(&global_regs->gnptxfsiz, nptxfifosize.d32);
849
850 DWC_DEBUGPL(DBG_CIL, "new gnptxfsiz=%08x\n",
851 dwc_read_reg32(&global_regs->gnptxfsiz));
852
853 /**@todo NGS: Fix Periodic FIFO Sizing! */
854 /*
855 * Periodic Tx FIFOs These FIFOs are numbered from 1 to 15.
856 * Indexes of the FIFO size module parameters in the
857 * dev_perio_tx_fifo_size array and the FIFO size registers in
858 * the dptxfsiz array run from 0 to 14.
859 */
860 /** @todo Finish debug of this */
861 ptxfifosize.b.startaddr = nptxfifosize.b.startaddr + nptxfifosize.b.depth;
862 for (i=0; i < core_if->hwcfg4.b.num_dev_perio_in_ep; i++)
863 {
864 ptxfifosize.b.depth = params->dev_perio_tx_fifo_size[i];
865 DWC_DEBUGPL(DBG_CIL, "initial dptxfsiz_dieptxf[%d]=%08x\n", i,
866 dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i]));
867 dwc_write_reg32(&global_regs->dptxfsiz_dieptxf[i],
868 ptxfifosize.d32);
869 DWC_DEBUGPL(DBG_CIL, "new dptxfsiz_dieptxf[%d]=%08x\n", i,
870 dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i]));
871 ptxfifosize.b.startaddr += ptxfifosize.b.depth;
872 }
873 }
874 else {
875 /*
876 * Tx FIFOs These FIFOs are numbered from 1 to 15.
877 * Indexes of the FIFO size module parameters in the
878 * dev_tx_fifo_size array and the FIFO size registers in
879 * the dptxfsiz_dieptxf array run from 0 to 14.
880 */
881
882
883 /* Non-periodic Tx FIFO */
884 DWC_DEBUGPL(DBG_CIL, "initial gnptxfsiz=%08x\n",
885 dwc_read_reg32(&global_regs->gnptxfsiz));
886
887 nptxfifosize.b.depth = params->dev_nperio_tx_fifo_size;
888 nptxfifosize.b.startaddr = params->dev_rx_fifo_size;
889
890 dwc_write_reg32(&global_regs->gnptxfsiz, nptxfifosize.d32);
891
892 DWC_DEBUGPL(DBG_CIL, "new gnptxfsiz=%08x\n",
893 dwc_read_reg32(&global_regs->gnptxfsiz));
894
895 txfifosize.b.startaddr = nptxfifosize.b.startaddr + nptxfifosize.b.depth;
896 /*
897 Modify by kaiker ,for RT3052 device mode config
898
899 In RT3052,Since the _core_if->hwcfg4.b.num_dev_perio_in_ep is
900 configed to 0 so these TX_FIF0 not config.IN EP will can't
901 more than 1 if not modify it.
902
903 */
904 #if 1
905 for (i=1 ; i <= dev_if->num_in_eps; i++)
906 #else
907 for (i=1; i < _core_if->hwcfg4.b.num_dev_perio_in_ep; i++)
908 #endif
909 {
910
911 txfifosize.b.depth = params->dev_tx_fifo_size[i];
912
913 DWC_DEBUGPL(DBG_CIL, "initial dptxfsiz_dieptxf[%d]=%08x\n", i,
914 dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i]));
915
916 dwc_write_reg32(&global_regs->dptxfsiz_dieptxf[i-1],
917 txfifosize.d32);
918
919 DWC_DEBUGPL(DBG_CIL, "new dptxfsiz_dieptxf[%d]=%08x\n", i,
920 dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i-1]));
921
922 txfifosize.b.startaddr += txfifosize.b.depth;
923 }
924 }
925 }
926 /* Flush the FIFOs */
927 dwc_otg_flush_tx_fifo(core_if, 0x10); /* all Tx FIFOs */
928 dwc_otg_flush_rx_fifo(core_if);
929
930 /* Flush the Learning Queue. */
931 resetctl.b.intknqflsh = 1;
932 dwc_write_reg32(&core_if->core_global_regs->grstctl, resetctl.d32);
933
934 /* Clear all pending Device Interrupts */
935
936 if(core_if->multiproc_int_enable) {
937 }
938
939 /** @todo - if the condition needed to be checked
940 * or in any case all pending interrutps should be cleared?
941 */
942 if(core_if->multiproc_int_enable) {
943 for(i = 0; i < core_if->dev_if->num_in_eps; ++i) {
944 dwc_write_reg32(&dev_if->dev_global_regs->diepeachintmsk[i], 0);
945 }
946
947 for(i = 0; i < core_if->dev_if->num_out_eps; ++i) {
948 dwc_write_reg32(&dev_if->dev_global_regs->doepeachintmsk[i], 0);
949 }
950
951 dwc_write_reg32(&dev_if->dev_global_regs->deachint, 0xFFFFFFFF);
952 dwc_write_reg32(&dev_if->dev_global_regs->deachintmsk, 0);
953 } else {
954 dwc_write_reg32(&dev_if->dev_global_regs->diepmsk, 0);
955 dwc_write_reg32(&dev_if->dev_global_regs->doepmsk, 0);
956 dwc_write_reg32(&dev_if->dev_global_regs->daint, 0xFFFFFFFF);
957 dwc_write_reg32(&dev_if->dev_global_regs->daintmsk, 0);
958 }
959
960 for (i=0; i <= dev_if->num_in_eps; i++)
961 {
962 depctl_data_t depctl;
963 depctl.d32 = dwc_read_reg32(&dev_if->in_ep_regs[i]->diepctl);
964 if (depctl.b.epena) {
965 depctl.d32 = 0;
966 depctl.b.epdis = 1;
967 depctl.b.snak = 1;
968 }
969 else {
970 depctl.d32 = 0;
971 }
972
973 dwc_write_reg32(&dev_if->in_ep_regs[i]->diepctl, depctl.d32);
974
975
976 dwc_write_reg32(&dev_if->in_ep_regs[i]->dieptsiz, 0);
977 dwc_write_reg32(&dev_if->in_ep_regs[i]->diepdma, 0);
978 dwc_write_reg32(&dev_if->in_ep_regs[i]->diepint, 0xFF);
979 }
980
981 for (i=0; i <= dev_if->num_out_eps; i++)
982 {
983 depctl_data_t depctl;
984 depctl.d32 = dwc_read_reg32(&dev_if->out_ep_regs[i]->doepctl);
985 if (depctl.b.epena) {
986 depctl.d32 = 0;
987 depctl.b.epdis = 1;
988 depctl.b.snak = 1;
989 }
990 else {
991 depctl.d32 = 0;
992 }
993
994 dwc_write_reg32(&dev_if->out_ep_regs[i]->doepctl, depctl.d32);
995
996 dwc_write_reg32(&dev_if->out_ep_regs[i]->doeptsiz, 0);
997 dwc_write_reg32(&dev_if->out_ep_regs[i]->doepdma, 0);
998 dwc_write_reg32(&dev_if->out_ep_regs[i]->doepint, 0xFF);
999 }
1000
1001 if(core_if->en_multiple_tx_fifo && core_if->dma_enable) {
1002 dev_if->non_iso_tx_thr_en = params->thr_ctl & 0x1;
1003 dev_if->iso_tx_thr_en = (params->thr_ctl >> 1) & 0x1;
1004 dev_if->rx_thr_en = (params->thr_ctl >> 2) & 0x1;
1005
1006 dev_if->rx_thr_length = params->rx_thr_length;
1007 dev_if->tx_thr_length = params->tx_thr_length;
1008
1009 dev_if->setup_desc_index = 0;
1010
1011 dthrctl.d32 = 0;
1012 dthrctl.b.non_iso_thr_en = dev_if->non_iso_tx_thr_en;
1013 dthrctl.b.iso_thr_en = dev_if->iso_tx_thr_en;
1014 dthrctl.b.tx_thr_len = dev_if->tx_thr_length;
1015 dthrctl.b.rx_thr_en = dev_if->rx_thr_en;
1016 dthrctl.b.rx_thr_len = dev_if->rx_thr_length;
1017
1018 dwc_write_reg32(&dev_if->dev_global_regs->dtknqr3_dthrctl, dthrctl.d32);
1019
1020 DWC_DEBUGPL(DBG_CIL, "Non ISO Tx Thr - %d\nISO Tx Thr - %d\nRx Thr - %d\nTx Thr Len - %d\nRx Thr Len - %d\n",
1021 dthrctl.b.non_iso_thr_en, dthrctl.b.iso_thr_en, dthrctl.b.rx_thr_en, dthrctl.b.tx_thr_len, dthrctl.b.rx_thr_len);
1022
1023 }
1024
1025 dwc_otg_enable_device_interrupts(core_if);
1026
1027 {
1028 diepmsk_data_t msk = { .d32 = 0 };
1029 msk.b.txfifoundrn = 1;
1030 if(core_if->multiproc_int_enable) {
1031 dwc_modify_reg32(&dev_if->dev_global_regs->diepeachintmsk[0], msk.d32, msk.d32);
1032 } else {
1033 dwc_modify_reg32(&dev_if->dev_global_regs->diepmsk, msk.d32, msk.d32);
1034 }
1035 }
1036
1037
1038 if(core_if->multiproc_int_enable) {
1039 /* Set NAK on Babble */
1040 dctl_data_t dctl = { .d32 = 0};
1041 dctl.b.nakonbble = 1;
1042 dwc_modify_reg32(&dev_if->dev_global_regs->dctl, 0, dctl.d32);
1043 }
1044 }
1045
1046 /**
1047 * This function enables the Host mode interrupts.
1048 *
1049 * @param core_if Programming view of DWC_otg controller
1050 */
1051 void dwc_otg_enable_host_interrupts(dwc_otg_core_if_t *core_if)
1052 {
1053 dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
1054 gintmsk_data_t intr_mask = { .d32 = 0 };
1055
1056 DWC_DEBUGPL(DBG_CIL, "%s()\n", __func__);
1057
1058 /* Disable all interrupts. */
1059 dwc_write_reg32(&global_regs->gintmsk, 0);
1060
1061 /* Clear any pending interrupts. */
1062 dwc_write_reg32(&global_regs->gintsts, 0xFFFFFFFF);
1063
1064 /* Enable the common interrupts */
1065 dwc_otg_enable_common_interrupts(core_if);
1066
1067 /*
1068 * Enable host mode interrupts without disturbing common
1069 * interrupts.
1070 */
1071 intr_mask.b.sofintr = 1;
1072 intr_mask.b.portintr = 1;
1073 intr_mask.b.hcintr = 1;
1074
1075 dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, intr_mask.d32);
1076 }
1077
1078 /**
1079 * This function disables the Host Mode interrupts.
1080 *
1081 * @param core_if Programming view of DWC_otg controller
1082 */
1083 void dwc_otg_disable_host_interrupts(dwc_otg_core_if_t *core_if)
1084 {
1085 dwc_otg_core_global_regs_t *global_regs =
1086 core_if->core_global_regs;
1087 gintmsk_data_t intr_mask = { .d32 = 0 };
1088
1089 DWC_DEBUGPL(DBG_CILV, "%s()\n", __func__);
1090
1091 /*
1092 * Disable host mode interrupts without disturbing common
1093 * interrupts.
1094 */
1095 intr_mask.b.sofintr = 1;
1096 intr_mask.b.portintr = 1;
1097 intr_mask.b.hcintr = 1;
1098 intr_mask.b.ptxfempty = 1;
1099 intr_mask.b.nptxfempty = 1;
1100
1101 dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, 0);
1102 }
1103
1104 /**
1105 * This function initializes the DWC_otg controller registers for
1106 * host mode.
1107 *
1108 * This function flushes the Tx and Rx FIFOs and it flushes any entries in the
1109 * request queues. Host channels are reset to ensure that they are ready for
1110 * performing transfers.
1111 *
1112 * @param core_if Programming view of DWC_otg controller
1113 *
1114 */
1115 void dwc_otg_core_host_init(dwc_otg_core_if_t *core_if)
1116 {
1117 dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
1118 dwc_otg_host_if_t *host_if = core_if->host_if;
1119 dwc_otg_core_params_t *params = core_if->core_params;
1120 hprt0_data_t hprt0 = { .d32 = 0 };
1121 fifosize_data_t nptxfifosize;
1122 fifosize_data_t ptxfifosize;
1123 int i;
1124 hcchar_data_t hcchar;
1125 hcfg_data_t hcfg;
1126 dwc_otg_hc_regs_t *hc_regs;
1127 int num_channels;
1128 gotgctl_data_t gotgctl = { .d32 = 0 };
1129
1130 DWC_DEBUGPL(DBG_CILV,"%s(%p)\n", __func__, core_if);
1131
1132 /* Restart the Phy Clock */
1133 dwc_write_reg32(core_if->pcgcctl, 0);
1134
1135 /* Initialize Host Configuration Register */
1136 init_fslspclksel(core_if);
1137 if (core_if->core_params->speed == DWC_SPEED_PARAM_FULL)
1138 {
1139 hcfg.d32 = dwc_read_reg32(&host_if->host_global_regs->hcfg);
1140 hcfg.b.fslssupp = 1;
1141 dwc_write_reg32(&host_if->host_global_regs->hcfg, hcfg.d32);
1142 }
1143
1144 /* Configure data FIFO sizes */
1145 if (core_if->hwcfg2.b.dynamic_fifo && params->enable_dynamic_fifo) {
1146 DWC_DEBUGPL(DBG_CIL,"Total FIFO Size=%d\n", core_if->total_fifo_size);
1147 DWC_DEBUGPL(DBG_CIL,"Rx FIFO Size=%d\n", params->host_rx_fifo_size);
1148 DWC_DEBUGPL(DBG_CIL,"NP Tx FIFO Size=%d\n", params->host_nperio_tx_fifo_size);
1149 DWC_DEBUGPL(DBG_CIL,"P Tx FIFO Size=%d\n", params->host_perio_tx_fifo_size);
1150
1151 /* Rx FIFO */
1152 DWC_DEBUGPL(DBG_CIL,"initial grxfsiz=%08x\n", dwc_read_reg32(&global_regs->grxfsiz));
1153 dwc_write_reg32(&global_regs->grxfsiz, params->host_rx_fifo_size);
1154 DWC_DEBUGPL(DBG_CIL,"new grxfsiz=%08x\n", dwc_read_reg32(&global_regs->grxfsiz));
1155
1156 /* Non-periodic Tx FIFO */
1157 DWC_DEBUGPL(DBG_CIL,"initial gnptxfsiz=%08x\n", dwc_read_reg32(&global_regs->gnptxfsiz));
1158 nptxfifosize.b.depth = params->host_nperio_tx_fifo_size;
1159 nptxfifosize.b.startaddr = params->host_rx_fifo_size;
1160 dwc_write_reg32(&global_regs->gnptxfsiz, nptxfifosize.d32);
1161 DWC_DEBUGPL(DBG_CIL,"new gnptxfsiz=%08x\n", dwc_read_reg32(&global_regs->gnptxfsiz));
1162
1163 /* Periodic Tx FIFO */
1164 DWC_DEBUGPL(DBG_CIL,"initial hptxfsiz=%08x\n", dwc_read_reg32(&global_regs->hptxfsiz));
1165 ptxfifosize.b.depth = params->host_perio_tx_fifo_size;
1166 ptxfifosize.b.startaddr = nptxfifosize.b.startaddr + nptxfifosize.b.depth;
1167 dwc_write_reg32(&global_regs->hptxfsiz, ptxfifosize.d32);
1168 DWC_DEBUGPL(DBG_CIL,"new hptxfsiz=%08x\n", dwc_read_reg32(&global_regs->hptxfsiz));
1169 }
1170
1171 /* Clear Host Set HNP Enable in the OTG Control Register */
1172 gotgctl.b.hstsethnpen = 1;
1173 dwc_modify_reg32(&global_regs->gotgctl, gotgctl.d32, 0);
1174
1175 /* Make sure the FIFOs are flushed. */
1176 dwc_otg_flush_tx_fifo(core_if, 0x10 /* all Tx FIFOs */);
1177 dwc_otg_flush_rx_fifo(core_if);
1178
1179 /* Flush out any leftover queued requests. */
1180 num_channels = core_if->core_params->host_channels;
1181 for (i = 0; i < num_channels; i++)
1182 {
1183 hc_regs = core_if->host_if->hc_regs[i];
1184 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1185 hcchar.b.chen = 0;
1186 hcchar.b.chdis = 1;
1187 hcchar.b.epdir = 0;
1188 dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
1189 }
1190
1191 /* Halt all channels to put them into a known state. */
1192 for (i = 0; i < num_channels; i++)
1193 {
1194 int count = 0;
1195 hc_regs = core_if->host_if->hc_regs[i];
1196 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1197 hcchar.b.chen = 1;
1198 hcchar.b.chdis = 1;
1199 hcchar.b.epdir = 0;
1200 dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
1201 DWC_DEBUGPL(DBG_HCDV, "%s: Halt channel %d\n", __func__, i);
1202 do {
1203 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1204 if (++count > 1000)
1205 {
1206 DWC_ERROR("%s: Unable to clear halt on channel %d\n",
1207 __func__, i);
1208 break;
1209 }
1210 }
1211 while (hcchar.b.chen);
1212 }
1213
1214 /* Turn on the vbus power. */
1215 DWC_PRINT("Init: Port Power? op_state=%d\n", core_if->op_state);
1216 if (core_if->op_state == A_HOST) {
1217 hprt0.d32 = dwc_otg_read_hprt0(core_if);
1218 DWC_PRINT("Init: Power Port (%d)\n", hprt0.b.prtpwr);
1219 if (hprt0.b.prtpwr == 0) {
1220 hprt0.b.prtpwr = 1;
1221 dwc_write_reg32(host_if->hprt0, hprt0.d32);
1222 }
1223 }
1224
1225 dwc_otg_enable_host_interrupts(core_if);
1226 }
1227
1228 /**
1229 * Prepares a host channel for transferring packets to/from a specific
1230 * endpoint. The HCCHARn register is set up with the characteristics specified
1231 * in _hc. Host channel interrupts that may need to be serviced while this
1232 * transfer is in progress are enabled.
1233 *
1234 * @param core_if Programming view of DWC_otg controller
1235 * @param hc Information needed to initialize the host channel
1236 */
1237 void dwc_otg_hc_init(dwc_otg_core_if_t *core_if, dwc_hc_t *hc)
1238 {
1239 uint32_t intr_enable;
1240 hcintmsk_data_t hc_intr_mask;
1241 gintmsk_data_t gintmsk = { .d32 = 0 };
1242 hcchar_data_t hcchar;
1243 hcsplt_data_t hcsplt;
1244
1245 uint8_t hc_num = hc->hc_num;
1246 dwc_otg_host_if_t *host_if = core_if->host_if;
1247 dwc_otg_hc_regs_t *hc_regs = host_if->hc_regs[hc_num];
1248
1249 /* Clear old interrupt conditions for this host channel. */
1250 hc_intr_mask.d32 = 0xFFFFFFFF;
1251 hc_intr_mask.b.reserved = 0;
1252 dwc_write_reg32(&hc_regs->hcint, hc_intr_mask.d32);
1253
1254 /* Enable channel interrupts required for this transfer. */
1255 hc_intr_mask.d32 = 0;
1256 hc_intr_mask.b.chhltd = 1;
1257 if (core_if->dma_enable) {
1258 hc_intr_mask.b.ahberr = 1;
1259 if (hc->error_state && !hc->do_split &&
1260 hc->ep_type != DWC_OTG_EP_TYPE_ISOC) {
1261 hc_intr_mask.b.ack = 1;
1262 if (hc->ep_is_in) {
1263 hc_intr_mask.b.datatglerr = 1;
1264 if (hc->ep_type != DWC_OTG_EP_TYPE_INTR) {
1265 hc_intr_mask.b.nak = 1;
1266 }
1267 }
1268 }
1269 }
1270 else {
1271 switch (hc->ep_type) {
1272 case DWC_OTG_EP_TYPE_CONTROL:
1273 case DWC_OTG_EP_TYPE_BULK:
1274 hc_intr_mask.b.xfercompl = 1;
1275 hc_intr_mask.b.stall = 1;
1276 hc_intr_mask.b.xacterr = 1;
1277 hc_intr_mask.b.datatglerr = 1;
1278 if (hc->ep_is_in) {
1279 hc_intr_mask.b.bblerr = 1;
1280 }
1281 else {
1282 hc_intr_mask.b.nak = 1;
1283 hc_intr_mask.b.nyet = 1;
1284 if (hc->do_ping) {
1285 hc_intr_mask.b.ack = 1;
1286 }
1287 }
1288
1289 if (hc->do_split) {
1290 hc_intr_mask.b.nak = 1;
1291 if (hc->complete_split) {
1292 hc_intr_mask.b.nyet = 1;
1293 }
1294 else {
1295 hc_intr_mask.b.ack = 1;
1296 }
1297 }
1298
1299 if (hc->error_state) {
1300 hc_intr_mask.b.ack = 1;
1301 }
1302 break;
1303 case DWC_OTG_EP_TYPE_INTR:
1304 hc_intr_mask.b.xfercompl = 1;
1305 hc_intr_mask.b.nak = 1;
1306 hc_intr_mask.b.stall = 1;
1307 hc_intr_mask.b.xacterr = 1;
1308 hc_intr_mask.b.datatglerr = 1;
1309 hc_intr_mask.b.frmovrun = 1;
1310
1311 if (hc->ep_is_in) {
1312 hc_intr_mask.b.bblerr = 1;
1313 }
1314 if (hc->error_state) {
1315 hc_intr_mask.b.ack = 1;
1316 }
1317 if (hc->do_split) {
1318 if (hc->complete_split) {
1319 hc_intr_mask.b.nyet = 1;
1320 }
1321 else {
1322 hc_intr_mask.b.ack = 1;
1323 }
1324 }
1325 break;
1326 case DWC_OTG_EP_TYPE_ISOC:
1327 hc_intr_mask.b.xfercompl = 1;
1328 hc_intr_mask.b.frmovrun = 1;
1329 hc_intr_mask.b.ack = 1;
1330
1331 if (hc->ep_is_in) {
1332 hc_intr_mask.b.xacterr = 1;
1333 hc_intr_mask.b.bblerr = 1;
1334 }
1335 break;
1336 }
1337 }
1338 dwc_write_reg32(&hc_regs->hcintmsk, hc_intr_mask.d32);
1339
1340 // if(hc->ep_type == DWC_OTG_EP_TYPE_BULK && !hc->ep_is_in)
1341 // hc->max_packet = 512;
1342 /* Enable the top level host channel interrupt. */
1343 intr_enable = (1 << hc_num);
1344 dwc_modify_reg32(&host_if->host_global_regs->haintmsk, 0, intr_enable);
1345
1346 /* Make sure host channel interrupts are enabled. */
1347 gintmsk.b.hcintr = 1;
1348 dwc_modify_reg32(&core_if->core_global_regs->gintmsk, 0, gintmsk.d32);
1349
1350 /*
1351 * Program the HCCHARn register with the endpoint characteristics for
1352 * the current transfer.
1353 */
1354 hcchar.d32 = 0;
1355 hcchar.b.devaddr = hc->dev_addr;
1356 hcchar.b.epnum = hc->ep_num;
1357 hcchar.b.epdir = hc->ep_is_in;
1358 hcchar.b.lspddev = (hc->speed == DWC_OTG_EP_SPEED_LOW);
1359 hcchar.b.eptype = hc->ep_type;
1360 hcchar.b.mps = hc->max_packet;
1361
1362 dwc_write_reg32(&host_if->hc_regs[hc_num]->hcchar, hcchar.d32);
1363
1364 DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num);
1365 DWC_DEBUGPL(DBG_HCDV, " Dev Addr: %d\n", hcchar.b.devaddr);
1366 DWC_DEBUGPL(DBG_HCDV, " Ep Num: %d\n", hcchar.b.epnum);
1367 DWC_DEBUGPL(DBG_HCDV, " Is In: %d\n", hcchar.b.epdir);
1368 DWC_DEBUGPL(DBG_HCDV, " Is Low Speed: %d\n", hcchar.b.lspddev);
1369 DWC_DEBUGPL(DBG_HCDV, " Ep Type: %d\n", hcchar.b.eptype);
1370 DWC_DEBUGPL(DBG_HCDV, " Max Pkt: %d\n", hcchar.b.mps);
1371 DWC_DEBUGPL(DBG_HCDV, " Multi Cnt: %d\n", hcchar.b.multicnt);
1372
1373 /*
1374 * Program the HCSPLIT register for SPLITs
1375 */
1376 hcsplt.d32 = 0;
1377 if (hc->do_split) {
1378 DWC_DEBUGPL(DBG_HCDV, "Programming HC %d with split --> %s\n", hc->hc_num,
1379 hc->complete_split ? "CSPLIT" : "SSPLIT");
1380 hcsplt.b.compsplt = hc->complete_split;
1381 hcsplt.b.xactpos = hc->xact_pos;
1382 hcsplt.b.hubaddr = hc->hub_addr;
1383 hcsplt.b.prtaddr = hc->port_addr;
1384 DWC_DEBUGPL(DBG_HCDV, " comp split %d\n", hc->complete_split);
1385 DWC_DEBUGPL(DBG_HCDV, " xact pos %d\n", hc->xact_pos);
1386 DWC_DEBUGPL(DBG_HCDV, " hub addr %d\n", hc->hub_addr);
1387 DWC_DEBUGPL(DBG_HCDV, " port addr %d\n", hc->port_addr);
1388 DWC_DEBUGPL(DBG_HCDV, " is_in %d\n", hc->ep_is_in);
1389 DWC_DEBUGPL(DBG_HCDV, " Max Pkt: %d\n", hcchar.b.mps);
1390 DWC_DEBUGPL(DBG_HCDV, " xferlen: %d\n", hc->xfer_len);
1391 }
1392 dwc_write_reg32(&host_if->hc_regs[hc_num]->hcsplt, hcsplt.d32);
1393
1394 }
1395
1396 /**
1397 * Attempts to halt a host channel. This function should only be called in
1398 * Slave mode or to abort a transfer in either Slave mode or DMA mode. Under
1399 * normal circumstances in DMA mode, the controller halts the channel when the
1400 * transfer is complete or a condition occurs that requires application
1401 * intervention.
1402 *
1403 * In slave mode, checks for a free request queue entry, then sets the Channel
1404 * Enable and Channel Disable bits of the Host Channel Characteristics
1405 * register of the specified channel to intiate the halt. If there is no free
1406 * request queue entry, sets only the Channel Disable bit of the HCCHARn
1407 * register to flush requests for this channel. In the latter case, sets a
1408 * flag to indicate that the host channel needs to be halted when a request
1409 * queue slot is open.
1410 *
1411 * In DMA mode, always sets the Channel Enable and Channel Disable bits of the
1412 * HCCHARn register. The controller ensures there is space in the request
1413 * queue before submitting the halt request.
1414 *
1415 * Some time may elapse before the core flushes any posted requests for this
1416 * host channel and halts. The Channel Halted interrupt handler completes the
1417 * deactivation of the host channel.
1418 *
1419 * @param core_if Controller register interface.
1420 * @param hc Host channel to halt.
1421 * @param halt_status Reason for halting the channel.
1422 */
1423 void dwc_otg_hc_halt(dwc_otg_core_if_t *core_if,
1424 dwc_hc_t *hc,
1425 dwc_otg_halt_status_e halt_status)
1426 {
1427 gnptxsts_data_t nptxsts;
1428 hptxsts_data_t hptxsts;
1429 hcchar_data_t hcchar;
1430 dwc_otg_hc_regs_t *hc_regs;
1431 dwc_otg_core_global_regs_t *global_regs;
1432 dwc_otg_host_global_regs_t *host_global_regs;
1433
1434 hc_regs = core_if->host_if->hc_regs[hc->hc_num];
1435 global_regs = core_if->core_global_regs;
1436 host_global_regs = core_if->host_if->host_global_regs;
1437
1438 WARN_ON(halt_status == DWC_OTG_HC_XFER_NO_HALT_STATUS);
1439
1440 if (halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE ||
1441 halt_status == DWC_OTG_HC_XFER_AHB_ERR) {
1442 /*
1443 * Disable all channel interrupts except Ch Halted. The QTD
1444 * and QH state associated with this transfer has been cleared
1445 * (in the case of URB_DEQUEUE), so the channel needs to be
1446 * shut down carefully to prevent crashes.
1447 */
1448 hcintmsk_data_t hcintmsk;
1449 hcintmsk.d32 = 0;
1450 hcintmsk.b.chhltd = 1;
1451 dwc_write_reg32(&hc_regs->hcintmsk, hcintmsk.d32);
1452
1453 /*
1454 * Make sure no other interrupts besides halt are currently
1455 * pending. Handling another interrupt could cause a crash due
1456 * to the QTD and QH state.
1457 */
1458 dwc_write_reg32(&hc_regs->hcint, ~hcintmsk.d32);
1459
1460 /*
1461 * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR
1462 * even if the channel was already halted for some other
1463 * reason.
1464 */
1465 hc->halt_status = halt_status;
1466
1467 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1468 if (hcchar.b.chen == 0) {
1469 /*
1470 * The channel is either already halted or it hasn't
1471 * started yet. In DMA mode, the transfer may halt if
1472 * it finishes normally or a condition occurs that
1473 * requires driver intervention. Don't want to halt
1474 * the channel again. In either Slave or DMA mode,
1475 * it's possible that the transfer has been assigned
1476 * to a channel, but not started yet when an URB is
1477 * dequeued. Don't want to halt a channel that hasn't
1478 * started yet.
1479 */
1480 return;
1481 }
1482 }
1483
1484 if (hc->halt_pending) {
1485 /*
1486 * A halt has already been issued for this channel. This might
1487 * happen when a transfer is aborted by a higher level in
1488 * the stack.
1489 */
1490 #ifdef DEBUG
1491 DWC_PRINT("*** %s: Channel %d, _hc->halt_pending already set ***\n",
1492 __func__, hc->hc_num);
1493
1494 /* dwc_otg_dump_global_registers(core_if); */
1495 /* dwc_otg_dump_host_registers(core_if); */
1496 #endif
1497 return;
1498 }
1499
1500 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1501 hcchar.b.chen = 1;
1502 hcchar.b.chdis = 1;
1503
1504 if (!core_if->dma_enable) {
1505 /* Check for space in the request queue to issue the halt. */
1506 if (hc->ep_type == DWC_OTG_EP_TYPE_CONTROL ||
1507 hc->ep_type == DWC_OTG_EP_TYPE_BULK) {
1508 nptxsts.d32 = dwc_read_reg32(&global_regs->gnptxsts);
1509 if (nptxsts.b.nptxqspcavail == 0) {
1510 hcchar.b.chen = 0;
1511 }
1512 }
1513 else {
1514 hptxsts.d32 = dwc_read_reg32(&host_global_regs->hptxsts);
1515 if ((hptxsts.b.ptxqspcavail == 0) || (core_if->queuing_high_bandwidth)) {
1516 hcchar.b.chen = 0;
1517 }
1518 }
1519 }
1520
1521 dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
1522
1523 hc->halt_status = halt_status;
1524
1525 if (hcchar.b.chen) {
1526 hc->halt_pending = 1;
1527 hc->halt_on_queue = 0;
1528 }
1529 else {
1530 hc->halt_on_queue = 1;
1531 }
1532
1533 DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num);
1534 DWC_DEBUGPL(DBG_HCDV, " hcchar: 0x%08x\n", hcchar.d32);
1535 DWC_DEBUGPL(DBG_HCDV, " halt_pending: %d\n", hc->halt_pending);
1536 DWC_DEBUGPL(DBG_HCDV, " halt_on_queue: %d\n", hc->halt_on_queue);
1537 DWC_DEBUGPL(DBG_HCDV, " halt_status: %d\n", hc->halt_status);
1538
1539 return;
1540 }
1541
1542 /**
1543 * Clears the transfer state for a host channel. This function is normally
1544 * called after a transfer is done and the host channel is being released.
1545 *
1546 * @param core_if Programming view of DWC_otg controller.
1547 * @param hc Identifies the host channel to clean up.
1548 */
1549 void dwc_otg_hc_cleanup(dwc_otg_core_if_t *core_if, dwc_hc_t *hc)
1550 {
1551 dwc_otg_hc_regs_t *hc_regs;
1552
1553 hc->xfer_started = 0;
1554
1555 /*
1556 * Clear channel interrupt enables and any unhandled channel interrupt
1557 * conditions.
1558 */
1559 hc_regs = core_if->host_if->hc_regs[hc->hc_num];
1560 dwc_write_reg32(&hc_regs->hcintmsk, 0);
1561 dwc_write_reg32(&hc_regs->hcint, 0xFFFFFFFF);
1562
1563 #ifdef DEBUG
1564 del_timer(&core_if->hc_xfer_timer[hc->hc_num]);
1565 {
1566 hcchar_data_t hcchar;
1567 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1568 if (hcchar.b.chdis) {
1569 DWC_WARN("%s: chdis set, channel %d, hcchar 0x%08x\n",
1570 __func__, hc->hc_num, hcchar.d32);
1571 }
1572 }
1573 #endif
1574 }
1575
1576 /**
1577 * Sets the channel property that indicates in which frame a periodic transfer
1578 * should occur. This is always set to the _next_ frame. This function has no
1579 * effect on non-periodic transfers.
1580 *
1581 * @param core_if Programming view of DWC_otg controller.
1582 * @param hc Identifies the host channel to set up and its properties.
1583 * @param hcchar Current value of the HCCHAR register for the specified host
1584 * channel.
1585 */
1586 static inline void hc_set_even_odd_frame(dwc_otg_core_if_t *core_if,
1587 dwc_hc_t *hc,
1588 hcchar_data_t *hcchar)
1589 {
1590 if (hc->ep_type == DWC_OTG_EP_TYPE_INTR ||
1591 hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
1592 hfnum_data_t hfnum;
1593 hfnum.d32 = dwc_read_reg32(&core_if->host_if->host_global_regs->hfnum);
1594
1595 /* 1 if _next_ frame is odd, 0 if it's even */
1596 hcchar->b.oddfrm = (hfnum.b.frnum & 0x1) ? 0 : 1;
1597 #ifdef DEBUG
1598 if (hc->ep_type == DWC_OTG_EP_TYPE_INTR && hc->do_split && !hc->complete_split) {
1599 switch (hfnum.b.frnum & 0x7) {
1600 case 7:
1601 core_if->hfnum_7_samples++;
1602 core_if->hfnum_7_frrem_accum += hfnum.b.frrem;
1603 break;
1604 case 0:
1605 core_if->hfnum_0_samples++;
1606 core_if->hfnum_0_frrem_accum += hfnum.b.frrem;
1607 break;
1608 default:
1609 core_if->hfnum_other_samples++;
1610 core_if->hfnum_other_frrem_accum += hfnum.b.frrem;
1611 break;
1612 }
1613 }
1614 #endif
1615 }
1616 }
1617
1618 #ifdef DEBUG
1619 static void hc_xfer_timeout(unsigned long ptr)
1620 {
1621 hc_xfer_info_t *xfer_info = (hc_xfer_info_t *)ptr;
1622 int hc_num = xfer_info->hc->hc_num;
1623 DWC_WARN("%s: timeout on channel %d\n", __func__, hc_num);
1624 DWC_WARN(" start_hcchar_val 0x%08x\n", xfer_info->core_if->start_hcchar_val[hc_num]);
1625 }
1626 #endif
1627
1628 /*
1629 * This function does the setup for a data transfer for a host channel and
1630 * starts the transfer. May be called in either Slave mode or DMA mode. In
1631 * Slave mode, the caller must ensure that there is sufficient space in the
1632 * request queue and Tx Data FIFO.
1633 *
1634 * For an OUT transfer in Slave mode, it loads a data packet into the
1635 * appropriate FIFO. If necessary, additional data packets will be loaded in
1636 * the Host ISR.
1637 *
1638 * For an IN transfer in Slave mode, a data packet is requested. The data
1639 * packets are unloaded from the Rx FIFO in the Host ISR. If necessary,
1640 * additional data packets are requested in the Host ISR.
1641 *
1642 * For a PING transfer in Slave mode, the Do Ping bit is set in the egards,
1643 *
1644 * Steven
1645 *
1646 * register along with a packet count of 1 and the channel is enabled. This
1647 * causes a single PING transaction to occur. Other fields in HCTSIZ are
1648 * simply set to 0 since no data transfer occurs in this case.
1649 *
1650 * For a PING transfer in DMA mode, the HCTSIZ register is initialized with
1651 * all the information required to perform the subsequent data transfer. In
1652 * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the
1653 * controller performs the entire PING protocol, then starts the data
1654 * transfer.
1655 *
1656 * @param core_if Programming view of DWC_otg controller.
1657 * @param hc Information needed to initialize the host channel. The xfer_len
1658 * value may be reduced to accommodate the max widths of the XferSize and
1659 * PktCnt fields in the HCTSIZn register. The multi_count value may be changed
1660 * to reflect the final xfer_len value.
1661 */
1662 void dwc_otg_hc_start_transfer(dwc_otg_core_if_t *core_if, dwc_hc_t *hc)
1663 {
1664 hcchar_data_t hcchar;
1665 hctsiz_data_t hctsiz;
1666 uint16_t num_packets;
1667 uint32_t max_hc_xfer_size = core_if->core_params->max_transfer_size;
1668 uint16_t max_hc_pkt_count = core_if->core_params->max_packet_count;
1669 dwc_otg_hc_regs_t *hc_regs = core_if->host_if->hc_regs[hc->hc_num];
1670
1671 hctsiz.d32 = 0;
1672
1673 if (hc->do_ping) {
1674 if (!core_if->dma_enable) {
1675 dwc_otg_hc_do_ping(core_if, hc);
1676 hc->xfer_started = 1;
1677 return;
1678 }
1679 else {
1680 hctsiz.b.dopng = 1;
1681 }
1682 }
1683
1684 if (hc->do_split) {
1685 num_packets = 1;
1686
1687 if (hc->complete_split && !hc->ep_is_in) {
1688 /* For CSPLIT OUT Transfer, set the size to 0 so the
1689 * core doesn't expect any data written to the FIFO */
1690 hc->xfer_len = 0;
1691 }
1692 else if (hc->ep_is_in || (hc->xfer_len > hc->max_packet)) {
1693 hc->xfer_len = hc->max_packet;
1694 }
1695 else if (!hc->ep_is_in && (hc->xfer_len > 188)) {
1696 hc->xfer_len = 188;
1697 }
1698
1699 hctsiz.b.xfersize = hc->xfer_len;
1700 }
1701 else {
1702 /*
1703 * Ensure that the transfer length and packet count will fit
1704 * in the widths allocated for them in the HCTSIZn register.
1705 */
1706 if (hc->ep_type == DWC_OTG_EP_TYPE_INTR ||
1707 hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
1708 /*
1709 * Make sure the transfer size is no larger than one
1710 * (micro)frame's worth of data. (A check was done
1711 * when the periodic transfer was accepted to ensure
1712 * that a (micro)frame's worth of data can be
1713 * programmed into a channel.)
1714 */
1715 uint32_t max_periodic_len = hc->multi_count * hc->max_packet;
1716 if (hc->xfer_len > max_periodic_len) {
1717 hc->xfer_len = max_periodic_len;
1718 }
1719 else {
1720 }
1721
1722 }
1723 else if (hc->xfer_len > max_hc_xfer_size) {
1724 /* Make sure that xfer_len is a multiple of max packet size. */
1725 hc->xfer_len = max_hc_xfer_size - hc->max_packet + 1;
1726 }
1727
1728 if (hc->xfer_len > 0) {
1729 num_packets = (hc->xfer_len + hc->max_packet - 1) / hc->max_packet;
1730 if (num_packets > max_hc_pkt_count) {
1731 num_packets = max_hc_pkt_count;
1732 hc->xfer_len = num_packets * hc->max_packet;
1733 }
1734 }
1735 else {
1736 /* Need 1 packet for transfer length of 0. */
1737 num_packets = 1;
1738 }
1739
1740 if (hc->ep_is_in) {
1741 /* Always program an integral # of max packets for IN transfers. */
1742 hc->xfer_len = num_packets * hc->max_packet;
1743 }
1744
1745 if (hc->ep_type == DWC_OTG_EP_TYPE_INTR ||
1746 hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
1747 /*
1748 * Make sure that the multi_count field matches the
1749 * actual transfer length.
1750 */
1751 hc->multi_count = num_packets;
1752 }
1753
1754 if (hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
1755 /* Set up the initial PID for the transfer. */
1756 if (hc->speed == DWC_OTG_EP_SPEED_HIGH) {
1757 if (hc->ep_is_in) {
1758 if (hc->multi_count == 1) {
1759 hc->data_pid_start = DWC_OTG_HC_PID_DATA0;
1760 }
1761 else if (hc->multi_count == 2) {
1762 hc->data_pid_start = DWC_OTG_HC_PID_DATA1;
1763 }
1764 else {
1765 hc->data_pid_start = DWC_OTG_HC_PID_DATA2;
1766 }
1767 }
1768 else {
1769 if (hc->multi_count == 1) {
1770 hc->data_pid_start = DWC_OTG_HC_PID_DATA0;
1771 }
1772 else {
1773 hc->data_pid_start = DWC_OTG_HC_PID_MDATA;
1774 }
1775 }
1776 }
1777 else {
1778 hc->data_pid_start = DWC_OTG_HC_PID_DATA0;
1779 }
1780 }
1781
1782 hctsiz.b.xfersize = hc->xfer_len;
1783 }
1784
1785 hc->start_pkt_count = num_packets;
1786 hctsiz.b.pktcnt = num_packets;
1787 hctsiz.b.pid = hc->data_pid_start;
1788 dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32);
1789
1790 DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num);
1791 DWC_DEBUGPL(DBG_HCDV, " Xfer Size: %d\n", hctsiz.b.xfersize);
1792 DWC_DEBUGPL(DBG_HCDV, " Num Pkts: %d\n", hctsiz.b.pktcnt);
1793 DWC_DEBUGPL(DBG_HCDV, " Start PID: %d\n", hctsiz.b.pid);
1794
1795 if (core_if->dma_enable) {
1796 #if defined (CONFIG_DWC_OTG_HOST_ONLY)
1797 if ((uint32_t)hc->xfer_buff & 0x3) {
1798 /* non DWORD-aligned buffer case*/
1799 if(!hc->qh->dw_align_buf) {
1800 hc->qh->dw_align_buf =
1801 dma_alloc_coherent(NULL,
1802 core_if->core_params->max_transfer_size,
1803 &hc->qh->dw_align_buf_dma,
1804 GFP_ATOMIC | GFP_DMA);
1805 if (!hc->qh->dw_align_buf) {
1806
1807 DWC_ERROR("%s: Failed to allocate memory to handle "
1808 "non-dword aligned buffer case\n", __func__);
1809 return;
1810 }
1811
1812 }
1813 if (!hc->ep_is_in) {
1814 memcpy(hc->qh->dw_align_buf, phys_to_virt((uint32_t)hc->xfer_buff), hc->xfer_len);
1815 }
1816
1817 dwc_write_reg32(&hc_regs->hcdma, hc->qh->dw_align_buf_dma);
1818 }
1819 else
1820 #endif
1821 dwc_write_reg32(&hc_regs->hcdma, (uint32_t)hc->xfer_buff);
1822 }
1823
1824 /* Start the split */
1825 if (hc->do_split) {
1826 hcsplt_data_t hcsplt;
1827 hcsplt.d32 = dwc_read_reg32 (&hc_regs->hcsplt);
1828 hcsplt.b.spltena = 1;
1829 dwc_write_reg32(&hc_regs->hcsplt, hcsplt.d32);
1830 }
1831
1832 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1833 hcchar.b.multicnt = hc->multi_count;
1834 hc_set_even_odd_frame(core_if, hc, &hcchar);
1835 #ifdef DEBUG
1836 core_if->start_hcchar_val[hc->hc_num] = hcchar.d32;
1837 if (hcchar.b.chdis) {
1838 DWC_WARN("%s: chdis set, channel %d, hcchar 0x%08x\n",
1839 __func__, hc->hc_num, hcchar.d32);
1840 }
1841 #endif
1842
1843 /* Set host channel enable after all other setup is complete. */
1844 hcchar.b.chen = 1;
1845 hcchar.b.chdis = 0;
1846 dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
1847
1848 hc->xfer_started = 1;
1849 hc->requests++;
1850
1851 if (!core_if->dma_enable &&
1852 !hc->ep_is_in && hc->xfer_len > 0) {
1853 /* Load OUT packet into the appropriate Tx FIFO. */
1854 dwc_otg_hc_write_packet(core_if, hc);
1855 }
1856
1857 #ifdef DEBUG
1858 /* Start a timer for this transfer. */
1859 core_if->hc_xfer_timer[hc->hc_num].function = hc_xfer_timeout;
1860 core_if->hc_xfer_info[hc->hc_num].core_if = core_if;
1861 core_if->hc_xfer_info[hc->hc_num].hc = hc;
1862 core_if->hc_xfer_timer[hc->hc_num].data = (unsigned long)(&core_if->hc_xfer_info[hc->hc_num]);
1863 core_if->hc_xfer_timer[hc->hc_num].expires = jiffies + (HZ*10);
1864 add_timer(&core_if->hc_xfer_timer[hc->hc_num]);
1865 #endif
1866 }
1867
1868 /**
1869 * This function continues a data transfer that was started by previous call
1870 * to <code>dwc_otg_hc_start_transfer</code>. The caller must ensure there is
1871 * sufficient space in the request queue and Tx Data FIFO. This function
1872 * should only be called in Slave mode. In DMA mode, the controller acts
1873 * autonomously to complete transfers programmed to a host channel.
1874 *
1875 * For an OUT transfer, a new data packet is loaded into the appropriate FIFO
1876 * if there is any data remaining to be queued. For an IN transfer, another
1877 * data packet is always requested. For the SETUP phase of a control transfer,
1878 * this function does nothing.
1879 *
1880 * @return 1 if a new request is queued, 0 if no more requests are required
1881 * for this transfer.
1882 */
1883 int dwc_otg_hc_continue_transfer(dwc_otg_core_if_t *core_if, dwc_hc_t *hc)
1884 {
1885 DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num);
1886
1887 if (hc->do_split) {
1888 /* SPLITs always queue just once per channel */
1889 return 0;
1890 }
1891 else if (hc->data_pid_start == DWC_OTG_HC_PID_SETUP) {
1892 /* SETUPs are queued only once since they can't be NAKed. */
1893 return 0;
1894 }
1895 else if (hc->ep_is_in) {
1896 /*
1897 * Always queue another request for other IN transfers. If
1898 * back-to-back INs are issued and NAKs are received for both,
1899 * the driver may still be processing the first NAK when the
1900 * second NAK is received. When the interrupt handler clears
1901 * the NAK interrupt for the first NAK, the second NAK will
1902 * not be seen. So we can't depend on the NAK interrupt
1903 * handler to requeue a NAKed request. Instead, IN requests
1904 * are issued each time this function is called. When the
1905 * transfer completes, the extra requests for the channel will
1906 * be flushed.
1907 */
1908 hcchar_data_t hcchar;
1909 dwc_otg_hc_regs_t *hc_regs = core_if->host_if->hc_regs[hc->hc_num];
1910
1911 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1912 hc_set_even_odd_frame(core_if, hc, &hcchar);
1913 hcchar.b.chen = 1;
1914 hcchar.b.chdis = 0;
1915 DWC_DEBUGPL(DBG_HCDV, " IN xfer: hcchar = 0x%08x\n", hcchar.d32);
1916 dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
1917 hc->requests++;
1918 return 1;
1919 }
1920 else {
1921 /* OUT transfers. */
1922 if (hc->xfer_count < hc->xfer_len) {
1923 if (hc->ep_type == DWC_OTG_EP_TYPE_INTR ||
1924 hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
1925 hcchar_data_t hcchar;
1926 dwc_otg_hc_regs_t *hc_regs;
1927 hc_regs = core_if->host_if->hc_regs[hc->hc_num];
1928 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1929 hc_set_even_odd_frame(core_if, hc, &hcchar);
1930 }
1931
1932 /* Load OUT packet into the appropriate Tx FIFO. */
1933 dwc_otg_hc_write_packet(core_if, hc);
1934 hc->requests++;
1935 return 1;
1936 }
1937 else {
1938 return 0;
1939 }
1940 }
1941 }
1942
1943 /**
1944 * Starts a PING transfer. This function should only be called in Slave mode.
1945 * The Do Ping bit is set in the HCTSIZ register, then the channel is enabled.
1946 */
1947 void dwc_otg_hc_do_ping(dwc_otg_core_if_t *core_if, dwc_hc_t *hc)
1948 {
1949 hcchar_data_t hcchar;
1950 hctsiz_data_t hctsiz;
1951 dwc_otg_hc_regs_t *hc_regs = core_if->host_if->hc_regs[hc->hc_num];
1952
1953 DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num);
1954
1955 hctsiz.d32 = 0;
1956 hctsiz.b.dopng = 1;
1957 hctsiz.b.pktcnt = 1;
1958 dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32);
1959
1960 hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
1961 hcchar.b.chen = 1;
1962 hcchar.b.chdis = 0;
1963 dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
1964 }
1965
1966 /*
1967 * This function writes a packet into the Tx FIFO associated with the Host
1968 * Channel. For a channel associated with a non-periodic EP, the non-periodic
1969 * Tx FIFO is written. For a channel associated with a periodic EP, the
1970 * periodic Tx FIFO is written. This function should only be called in Slave
1971 * mode.
1972 *
1973 * Upon return the xfer_buff and xfer_count fields in _hc are incremented by
1974 * then number of bytes written to the Tx FIFO.
1975 */
1976 void dwc_otg_hc_write_packet(dwc_otg_core_if_t *core_if, dwc_hc_t *hc)
1977 {
1978 uint32_t i;
1979 uint32_t remaining_count;
1980 uint32_t byte_count;
1981 uint32_t dword_count;
1982
1983 uint32_t *data_buff = (uint32_t *)(hc->xfer_buff);
1984 uint32_t *data_fifo = core_if->data_fifo[hc->hc_num];
1985
1986 remaining_count = hc->xfer_len - hc->xfer_count;
1987 if (remaining_count > hc->max_packet) {
1988 byte_count = hc->max_packet;
1989 }
1990 else {
1991 byte_count = remaining_count;
1992 }
1993
1994 dword_count = (byte_count + 3) / 4;
1995
1996 if ((((unsigned long)data_buff) & 0x3) == 0) {
1997 /* xfer_buff is DWORD aligned. */
1998 for (i = 0; i < dword_count; i++, data_buff++)
1999 {
2000 dwc_write_reg32(data_fifo, *data_buff);
2001 }
2002 }
2003 else {
2004 /* xfer_buff is not DWORD aligned. */
2005 for (i = 0; i < dword_count; i++, data_buff++)
2006 {
2007 dwc_write_reg32(data_fifo, get_unaligned(data_buff));
2008 }
2009 }
2010
2011 hc->xfer_count += byte_count;
2012 hc->xfer_buff += byte_count;
2013 }
2014
2015 /**
2016 * Gets the current USB frame number. This is the frame number from the last
2017 * SOF packet.
2018 */
2019 uint32_t dwc_otg_get_frame_number(dwc_otg_core_if_t *core_if)
2020 {
2021 dsts_data_t dsts;
2022 dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts);
2023
2024 /* read current frame/microframe number from DSTS register */
2025 return dsts.b.soffn;
2026 }
2027
2028 /**
2029 * This function reads a setup packet from the Rx FIFO into the destination
2030 * buffer. This function is called from the Rx Status Queue Level (RxStsQLvl)
2031 * Interrupt routine when a SETUP packet has been received in Slave mode.
2032 *
2033 * @param core_if Programming view of DWC_otg controller.
2034 * @param dest Destination buffer for packet data.
2035 */
2036 void dwc_otg_read_setup_packet(dwc_otg_core_if_t *core_if, uint32_t *dest)
2037 {
2038 /* Get the 8 bytes of a setup transaction data */
2039
2040 /* Pop 2 DWORDS off the receive data FIFO into memory */
2041 dest[0] = dwc_read_reg32(core_if->data_fifo[0]);
2042 dest[1] = dwc_read_reg32(core_if->data_fifo[0]);
2043 }
2044
2045
2046 /**
2047 * This function enables EP0 OUT to receive SETUP packets and configures EP0
2048 * IN for transmitting packets. It is normally called when the
2049 * "Enumeration Done" interrupt occurs.
2050 *
2051 * @param core_if Programming view of DWC_otg controller.
2052 * @param ep The EP0 data.
2053 */
2054 void dwc_otg_ep0_activate(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
2055 {
2056 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
2057 dsts_data_t dsts;
2058 depctl_data_t diepctl;
2059 depctl_data_t doepctl;
2060 dctl_data_t dctl = { .d32 = 0 };
2061
2062 /* Read the Device Status and Endpoint 0 Control registers */
2063 dsts.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dsts);
2064 diepctl.d32 = dwc_read_reg32(&dev_if->in_ep_regs[0]->diepctl);
2065 doepctl.d32 = dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl);
2066
2067 /* Set the MPS of the IN EP based on the enumeration speed */
2068 switch (dsts.b.enumspd) {
2069 case DWC_DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ:
2070 case DWC_DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ:
2071 case DWC_DSTS_ENUMSPD_FS_PHY_48MHZ:
2072 diepctl.b.mps = DWC_DEP0CTL_MPS_64;
2073 break;
2074 case DWC_DSTS_ENUMSPD_LS_PHY_6MHZ:
2075 diepctl.b.mps = DWC_DEP0CTL_MPS_8;
2076 break;
2077 }
2078
2079 dwc_write_reg32(&dev_if->in_ep_regs[0]->diepctl, diepctl.d32);
2080
2081 /* Enable OUT EP for receive */
2082 doepctl.b.epena = 1;
2083 dwc_write_reg32(&dev_if->out_ep_regs[0]->doepctl, doepctl.d32);
2084
2085 #ifdef VERBOSE
2086 DWC_DEBUGPL(DBG_PCDV,"doepctl0=%0x\n",
2087 dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl));
2088 DWC_DEBUGPL(DBG_PCDV,"diepctl0=%0x\n",
2089 dwc_read_reg32(&dev_if->in_ep_regs[0]->diepctl));
2090 #endif
2091 dctl.b.cgnpinnak = 1;
2092
2093 dwc_modify_reg32(&dev_if->dev_global_regs->dctl, dctl.d32, dctl.d32);
2094 DWC_DEBUGPL(DBG_PCDV,"dctl=%0x\n",
2095 dwc_read_reg32(&dev_if->dev_global_regs->dctl));
2096 }
2097
2098 /**
2099 * This function activates an EP. The Device EP control register for
2100 * the EP is configured as defined in the ep structure. Note: This
2101 * function is not used for EP0.
2102 *
2103 * @param core_if Programming view of DWC_otg controller.
2104 * @param ep The EP to activate.
2105 */
2106 void dwc_otg_ep_activate(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
2107 {
2108 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
2109 depctl_data_t depctl;
2110 volatile uint32_t *addr;
2111 daint_data_t daintmsk = { .d32 = 0 };
2112
2113 DWC_DEBUGPL(DBG_PCDV, "%s() EP%d-%s\n", __func__, ep->num,
2114 (ep->is_in?"IN":"OUT"));
2115
2116 /* Read DEPCTLn register */
2117 if (ep->is_in == 1) {
2118 addr = &dev_if->in_ep_regs[ep->num]->diepctl;
2119 daintmsk.ep.in = 1<<ep->num;
2120 }
2121 else {
2122 addr = &dev_if->out_ep_regs[ep->num]->doepctl;
2123 daintmsk.ep.out = 1<<ep->num;
2124 }
2125
2126 /* If the EP is already active don't change the EP Control
2127 * register. */
2128 depctl.d32 = dwc_read_reg32(addr);
2129 if (!depctl.b.usbactep) {
2130 depctl.b.mps = ep->maxpacket;
2131 depctl.b.eptype = ep->type;
2132 depctl.b.txfnum = ep->tx_fifo_num;
2133
2134 if (ep->type == DWC_OTG_EP_TYPE_ISOC) {
2135 depctl.b.setd0pid = 1; // ???
2136 }
2137 else {
2138 depctl.b.setd0pid = 1;
2139 }
2140 depctl.b.usbactep = 1;
2141
2142 dwc_write_reg32(addr, depctl.d32);
2143 DWC_DEBUGPL(DBG_PCDV,"DEPCTL=%08x\n", dwc_read_reg32(addr));
2144 }
2145
2146 /* Enable the Interrupt for this EP */
2147 if(core_if->multiproc_int_enable) {
2148 if (ep->is_in == 1) {
2149 diepmsk_data_t diepmsk = { .d32 = 0};
2150 diepmsk.b.xfercompl = 1;
2151 diepmsk.b.timeout = 1;
2152 diepmsk.b.epdisabled = 1;
2153 diepmsk.b.ahberr = 1;
2154 diepmsk.b.intknepmis = 1;
2155 diepmsk.b.txfifoundrn = 1; //?????
2156
2157
2158 if(core_if->dma_desc_enable) {
2159 diepmsk.b.bna = 1;
2160 }
2161 /*
2162 if(core_if->dma_enable) {
2163 doepmsk.b.nak = 1;
2164 }
2165 */
2166 dwc_write_reg32(&dev_if->dev_global_regs->diepeachintmsk[ep->num], diepmsk.d32);
2167
2168 } else {
2169 doepmsk_data_t doepmsk = { .d32 = 0};
2170 doepmsk.b.xfercompl = 1;
2171 doepmsk.b.ahberr = 1;
2172 doepmsk.b.epdisabled = 1;
2173
2174
2175 if(core_if->dma_desc_enable) {
2176 doepmsk.b.bna = 1;
2177 }
2178 /*
2179 doepmsk.b.babble = 1;
2180 doepmsk.b.nyet = 1;
2181 doepmsk.b.nak = 1;
2182 */
2183 dwc_write_reg32(&dev_if->dev_global_regs->doepeachintmsk[ep->num], doepmsk.d32);
2184 }
2185 dwc_modify_reg32(&dev_if->dev_global_regs->deachintmsk,
2186 0, daintmsk.d32);
2187 } else {
2188 dwc_modify_reg32(&dev_if->dev_global_regs->daintmsk,
2189 0, daintmsk.d32);
2190 }
2191
2192 DWC_DEBUGPL(DBG_PCDV,"DAINTMSK=%0x\n",
2193 dwc_read_reg32(&dev_if->dev_global_regs->daintmsk));
2194
2195 ep->stall_clear_flag = 0;
2196 return;
2197 }
2198
2199 /**
2200 * This function deactivates an EP. This is done by clearing the USB Active
2201 * EP bit in the Device EP control register. Note: This function is not used
2202 * for EP0. EP0 cannot be deactivated.
2203 *
2204 * @param core_if Programming view of DWC_otg controller.
2205 * @param ep The EP to deactivate.
2206 */
2207 void dwc_otg_ep_deactivate(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
2208 {
2209 depctl_data_t depctl = { .d32 = 0 };
2210 volatile uint32_t *addr;
2211 daint_data_t daintmsk = { .d32 = 0};
2212
2213 /* Read DEPCTLn register */
2214 if (ep->is_in == 1) {
2215 addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl;
2216 daintmsk.ep.in = 1<<ep->num;
2217 }
2218 else {
2219 addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl;
2220 daintmsk.ep.out = 1<<ep->num;
2221 }
2222
2223 depctl.b.usbactep = 0;
2224
2225 if(core_if->dma_desc_enable)
2226 depctl.b.epdis = 1;
2227
2228 dwc_write_reg32(addr, depctl.d32);
2229
2230 /* Disable the Interrupt for this EP */
2231 if(core_if->multiproc_int_enable) {
2232 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->deachintmsk,
2233 daintmsk.d32, 0);
2234
2235 if (ep->is_in == 1) {
2236 dwc_write_reg32(&core_if->dev_if->dev_global_regs->diepeachintmsk[ep->num], 0);
2237 } else {
2238 dwc_write_reg32(&core_if->dev_if->dev_global_regs->doepeachintmsk[ep->num], 0);
2239 }
2240 } else {
2241 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->daintmsk,
2242 daintmsk.d32, 0);
2243 }
2244 }
2245
2246 /**
2247 * This function does the setup for a data transfer for an EP and
2248 * starts the transfer. For an IN transfer, the packets will be
2249 * loaded into the appropriate Tx FIFO in the ISR. For OUT transfers,
2250 * the packets are unloaded from the Rx FIFO in the ISR. the ISR.
2251 *
2252 * @param core_if Programming view of DWC_otg controller.
2253 * @param ep The EP to start the transfer on.
2254 */
2255 static void init_dma_desc_chain(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
2256 {
2257 dwc_otg_dma_desc_t* dma_desc;
2258 uint32_t offset;
2259 uint32_t xfer_est;
2260 int i;
2261
2262 ep->desc_cnt = ( ep->total_len / ep->maxxfer) +
2263 ((ep->total_len % ep->maxxfer) ? 1 : 0);
2264 if(!ep->desc_cnt)
2265 ep->desc_cnt = 1;
2266
2267 dma_desc = ep->desc_addr;
2268 xfer_est = ep->total_len;
2269 offset = 0;
2270 for( i = 0; i < ep->desc_cnt; ++i) {
2271 /** DMA Descriptor Setup */
2272 if(xfer_est > ep->maxxfer) {
2273 dma_desc->status.b.bs = BS_HOST_BUSY;
2274 dma_desc->status.b.l = 0;
2275 dma_desc->status.b.ioc = 0;
2276 dma_desc->status.b.sp = 0;
2277 dma_desc->status.b.bytes = ep->maxxfer;
2278 dma_desc->buf = ep->dma_addr + offset;
2279 dma_desc->status.b.bs = BS_HOST_READY;
2280
2281 xfer_est -= ep->maxxfer;
2282 offset += ep->maxxfer;
2283 } else {
2284 dma_desc->status.b.bs = BS_HOST_BUSY;
2285 dma_desc->status.b.l = 1;
2286 dma_desc->status.b.ioc = 1;
2287 if(ep->is_in) {
2288 dma_desc->status.b.sp = (xfer_est % ep->maxpacket) ?
2289 1 : ((ep->sent_zlp) ? 1 : 0);
2290 dma_desc->status.b.bytes = xfer_est;
2291 } else {
2292 dma_desc->status.b.bytes = xfer_est + ((4 - (xfer_est & 0x3)) & 0x3) ;
2293 }
2294
2295 dma_desc->buf = ep->dma_addr + offset;
2296 dma_desc->status.b.bs = BS_HOST_READY;
2297 }
2298 dma_desc ++;
2299 }
2300 }
2301
2302 /**
2303 * This function does the setup for a data transfer for an EP and
2304 * starts the transfer. For an IN transfer, the packets will be
2305 * loaded into the appropriate Tx FIFO in the ISR. For OUT transfers,
2306 * the packets are unloaded from the Rx FIFO in the ISR. the ISR.
2307 *
2308 * @param core_if Programming view of DWC_otg controller.
2309 * @param ep The EP to start the transfer on.
2310 */
2311
2312 void dwc_otg_ep_start_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
2313 {
2314 depctl_data_t depctl;
2315 deptsiz_data_t deptsiz;
2316 gintmsk_data_t intr_mask = { .d32 = 0};
2317
2318 DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s()\n", __func__);
2319
2320 DWC_DEBUGPL(DBG_PCD, "ep%d-%s xfer_len=%d xfer_cnt=%d "
2321 "xfer_buff=%p start_xfer_buff=%p\n",
2322 ep->num, (ep->is_in?"IN":"OUT"), ep->xfer_len,
2323 ep->xfer_count, ep->xfer_buff, ep->start_xfer_buff);
2324
2325 /* IN endpoint */
2326 if (ep->is_in == 1) {
2327 dwc_otg_dev_in_ep_regs_t *in_regs =
2328 core_if->dev_if->in_ep_regs[ep->num];
2329
2330 gnptxsts_data_t gtxstatus;
2331
2332 gtxstatus.d32 =
2333 dwc_read_reg32(&core_if->core_global_regs->gnptxsts);
2334
2335 if(core_if->en_multiple_tx_fifo == 0 && gtxstatus.b.nptxqspcavail == 0) {
2336 #ifdef DEBUG
2337 DWC_PRINT("TX Queue Full (0x%0x)\n", gtxstatus.d32);
2338 #endif
2339 return;
2340 }
2341
2342 depctl.d32 = dwc_read_reg32(&(in_regs->diepctl));
2343 deptsiz.d32 = dwc_read_reg32(&(in_regs->dieptsiz));
2344
2345 ep->xfer_len += (ep->maxxfer < (ep->total_len - ep->xfer_len)) ?
2346 ep->maxxfer : (ep->total_len - ep->xfer_len);
2347
2348 /* Zero Length Packet? */
2349 if ((ep->xfer_len - ep->xfer_count) == 0) {
2350 deptsiz.b.xfersize = 0;
2351 deptsiz.b.pktcnt = 1;
2352 }
2353 else {
2354 /* Program the transfer size and packet count
2355 * as follows: xfersize = N * maxpacket +
2356 * short_packet pktcnt = N + (short_packet
2357 * exist ? 1 : 0)
2358 */
2359 deptsiz.b.xfersize = ep->xfer_len - ep->xfer_count;
2360 deptsiz.b.pktcnt =
2361 (ep->xfer_len - ep->xfer_count - 1 + ep->maxpacket) /
2362 ep->maxpacket;
2363 }
2364
2365
2366 /* Write the DMA register */
2367 if (core_if->dma_enable) {
2368 if (core_if->dma_desc_enable == 0) {
2369 dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32);
2370 dwc_write_reg32 (&(in_regs->diepdma),
2371 (uint32_t)ep->dma_addr);
2372 }
2373 else {
2374 init_dma_desc_chain(core_if, ep);
2375 /** DIEPDMAn Register write */
2376 dwc_write_reg32(&in_regs->diepdma, ep->dma_desc_addr);
2377 }
2378 }
2379 else {
2380 dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32);
2381 if(ep->type != DWC_OTG_EP_TYPE_ISOC) {
2382 /**
2383 * Enable the Non-Periodic Tx FIFO empty interrupt,
2384 * or the Tx FIFO epmty interrupt in dedicated Tx FIFO mode,
2385 * the data will be written into the fifo by the ISR.
2386 */
2387 if(core_if->en_multiple_tx_fifo == 0) {
2388 intr_mask.b.nptxfempty = 1;
2389 dwc_modify_reg32(&core_if->core_global_regs->gintmsk,
2390 intr_mask.d32, intr_mask.d32);
2391 }
2392 else {
2393 /* Enable the Tx FIFO Empty Interrupt for this EP */
2394 if(ep->xfer_len > 0) {
2395 uint32_t fifoemptymsk = 0;
2396 fifoemptymsk = 1 << ep->num;
2397 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk,
2398 0, fifoemptymsk);
2399
2400 }
2401 }
2402 }
2403 }
2404
2405 /* EP enable, IN data in FIFO */
2406 depctl.b.cnak = 1;
2407 depctl.b.epena = 1;
2408 dwc_write_reg32(&in_regs->diepctl, depctl.d32);
2409
2410 depctl.d32 = dwc_read_reg32 (&core_if->dev_if->in_ep_regs[0]->diepctl);
2411 depctl.b.nextep = ep->num;
2412 dwc_write_reg32 (&core_if->dev_if->in_ep_regs[0]->diepctl, depctl.d32);
2413
2414 }
2415 else {
2416 /* OUT endpoint */
2417 dwc_otg_dev_out_ep_regs_t *out_regs =
2418 core_if->dev_if->out_ep_regs[ep->num];
2419
2420 depctl.d32 = dwc_read_reg32(&(out_regs->doepctl));
2421 deptsiz.d32 = dwc_read_reg32(&(out_regs->doeptsiz));
2422
2423 ep->xfer_len += (ep->maxxfer < (ep->total_len - ep->xfer_len)) ?
2424 ep->maxxfer : (ep->total_len - ep->xfer_len);
2425
2426 /* Program the transfer size and packet count as follows:
2427 *
2428 * pktcnt = N
2429 * xfersize = N * maxpacket
2430 */
2431 if ((ep->xfer_len - ep->xfer_count) == 0) {
2432 /* Zero Length Packet */
2433 deptsiz.b.xfersize = ep->maxpacket;
2434 deptsiz.b.pktcnt = 1;
2435 }
2436 else {
2437 deptsiz.b.pktcnt =
2438 (ep->xfer_len - ep->xfer_count + (ep->maxpacket - 1)) /
2439 ep->maxpacket;
2440 ep->xfer_len = deptsiz.b.pktcnt * ep->maxpacket + ep->xfer_count;
2441 deptsiz.b.xfersize = ep->xfer_len - ep->xfer_count;
2442 }
2443
2444 DWC_DEBUGPL(DBG_PCDV, "ep%d xfersize=%d pktcnt=%d\n",
2445 ep->num,
2446 deptsiz.b.xfersize, deptsiz.b.pktcnt);
2447
2448 if (core_if->dma_enable) {
2449 if (!core_if->dma_desc_enable) {
2450 dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32);
2451
2452 dwc_write_reg32 (&(out_regs->doepdma),
2453 (uint32_t)ep->dma_addr);
2454 }
2455 else {
2456 init_dma_desc_chain(core_if, ep);
2457
2458 /** DOEPDMAn Register write */
2459 dwc_write_reg32(&out_regs->doepdma, ep->dma_desc_addr);
2460 }
2461 }
2462 else {
2463 dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32);
2464 }
2465
2466 /* EP enable */
2467 depctl.b.cnak = 1;
2468 depctl.b.epena = 1;
2469
2470 dwc_write_reg32(&out_regs->doepctl, depctl.d32);
2471
2472 DWC_DEBUGPL(DBG_PCD, "DOEPCTL=%08x DOEPTSIZ=%08x\n",
2473 dwc_read_reg32(&out_regs->doepctl),
2474 dwc_read_reg32(&out_regs->doeptsiz));
2475 DWC_DEBUGPL(DBG_PCD, "DAINTMSK=%08x GINTMSK=%08x\n",
2476 dwc_read_reg32(&core_if->dev_if->dev_global_regs->daintmsk),
2477 dwc_read_reg32(&core_if->core_global_regs->gintmsk));
2478 }
2479 }
2480
2481 /**
2482 * This function setup a zero length transfer in Buffer DMA and
2483 * Slave modes for usb requests with zero field set
2484 *
2485 * @param core_if Programming view of DWC_otg controller.
2486 * @param ep The EP to start the transfer on.
2487 *
2488 */
2489 void dwc_otg_ep_start_zl_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
2490 {
2491
2492 depctl_data_t depctl;
2493 deptsiz_data_t deptsiz;
2494 gintmsk_data_t intr_mask = { .d32 = 0};
2495
2496 DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s()\n", __func__);
2497
2498 /* IN endpoint */
2499 if (ep->is_in == 1) {
2500 dwc_otg_dev_in_ep_regs_t *in_regs =
2501 core_if->dev_if->in_ep_regs[ep->num];
2502
2503 depctl.d32 = dwc_read_reg32(&(in_regs->diepctl));
2504 deptsiz.d32 = dwc_read_reg32(&(in_regs->dieptsiz));
2505
2506 deptsiz.b.xfersize = 0;
2507 deptsiz.b.pktcnt = 1;
2508
2509
2510 /* Write the DMA register */
2511 if (core_if->dma_enable) {
2512 if (core_if->dma_desc_enable == 0) {
2513 dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32);
2514 dwc_write_reg32 (&(in_regs->diepdma),
2515 (uint32_t)ep->dma_addr);
2516 }
2517 }
2518 else {
2519 dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32);
2520 /**
2521 * Enable the Non-Periodic Tx FIFO empty interrupt,
2522 * or the Tx FIFO epmty interrupt in dedicated Tx FIFO mode,
2523 * the data will be written into the fifo by the ISR.
2524 */
2525 if(core_if->en_multiple_tx_fifo == 0) {
2526 intr_mask.b.nptxfempty = 1;
2527 dwc_modify_reg32(&core_if->core_global_regs->gintmsk,
2528 intr_mask.d32, intr_mask.d32);
2529 }
2530 else {
2531 /* Enable the Tx FIFO Empty Interrupt for this EP */
2532 if(ep->xfer_len > 0) {
2533 uint32_t fifoemptymsk = 0;
2534 fifoemptymsk = 1 << ep->num;
2535 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk,
2536 0, fifoemptymsk);
2537 }
2538 }
2539 }
2540
2541 /* EP enable, IN data in FIFO */
2542 depctl.b.cnak = 1;
2543 depctl.b.epena = 1;
2544 dwc_write_reg32(&in_regs->diepctl, depctl.d32);
2545
2546 depctl.d32 = dwc_read_reg32 (&core_if->dev_if->in_ep_regs[0]->diepctl);
2547 depctl.b.nextep = ep->num;
2548 dwc_write_reg32 (&core_if->dev_if->in_ep_regs[0]->diepctl, depctl.d32);
2549
2550 }
2551 else {
2552 /* OUT endpoint */
2553 dwc_otg_dev_out_ep_regs_t *out_regs =
2554 core_if->dev_if->out_ep_regs[ep->num];
2555
2556 depctl.d32 = dwc_read_reg32(&(out_regs->doepctl));
2557 deptsiz.d32 = dwc_read_reg32(&(out_regs->doeptsiz));
2558
2559 /* Zero Length Packet */
2560 deptsiz.b.xfersize = ep->maxpacket;
2561 deptsiz.b.pktcnt = 1;
2562
2563 if (core_if->dma_enable) {
2564 if (!core_if->dma_desc_enable) {
2565 dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32);
2566
2567 dwc_write_reg32 (&(out_regs->doepdma),
2568 (uint32_t)ep->dma_addr);
2569 }
2570 }
2571 else {
2572 dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32);
2573 }
2574
2575 /* EP enable */
2576 depctl.b.cnak = 1;
2577 depctl.b.epena = 1;
2578
2579 dwc_write_reg32(&out_regs->doepctl, depctl.d32);
2580
2581 }
2582 }
2583
2584 /**
2585 * This function does the setup for a data transfer for EP0 and starts
2586 * the transfer. For an IN transfer, the packets will be loaded into
2587 * the appropriate Tx FIFO in the ISR. For OUT transfers, the packets are
2588 * unloaded from the Rx FIFO in the ISR.
2589 *
2590 * @param core_if Programming view of DWC_otg controller.
2591 * @param ep The EP0 data.
2592 */
2593 void dwc_otg_ep0_start_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
2594 {
2595 depctl_data_t depctl;
2596 deptsiz0_data_t deptsiz;
2597 gintmsk_data_t intr_mask = { .d32 = 0};
2598 dwc_otg_dma_desc_t* dma_desc;
2599
2600 DWC_DEBUGPL(DBG_PCD, "ep%d-%s xfer_len=%d xfer_cnt=%d "
2601 "xfer_buff=%p start_xfer_buff=%p \n",
2602 ep->num, (ep->is_in?"IN":"OUT"), ep->xfer_len,
2603 ep->xfer_count, ep->xfer_buff, ep->start_xfer_buff);
2604
2605 ep->total_len = ep->xfer_len;
2606
2607 /* IN endpoint */
2608 if (ep->is_in == 1) {
2609 dwc_otg_dev_in_ep_regs_t *in_regs =
2610 core_if->dev_if->in_ep_regs[0];
2611
2612 gnptxsts_data_t gtxstatus;
2613
2614 gtxstatus.d32 =
2615 dwc_read_reg32(&core_if->core_global_regs->gnptxsts);
2616
2617 if(core_if->en_multiple_tx_fifo == 0 && gtxstatus.b.nptxqspcavail == 0) {
2618 #ifdef DEBUG
2619 deptsiz.d32 = dwc_read_reg32(&in_regs->dieptsiz);
2620 DWC_DEBUGPL(DBG_PCD,"DIEPCTL0=%0x\n",
2621 dwc_read_reg32(&in_regs->diepctl));
2622 DWC_DEBUGPL(DBG_PCD, "DIEPTSIZ0=%0x (sz=%d, pcnt=%d)\n",
2623 deptsiz.d32,
2624 deptsiz.b.xfersize, deptsiz.b.pktcnt);
2625 DWC_PRINT("TX Queue or FIFO Full (0x%0x)\n",
2626 gtxstatus.d32);
2627 #endif
2628 return;
2629 }
2630
2631
2632 depctl.d32 = dwc_read_reg32(&in_regs->diepctl);
2633 deptsiz.d32 = dwc_read_reg32(&in_regs->dieptsiz);
2634
2635 /* Zero Length Packet? */
2636 if (ep->xfer_len == 0) {
2637 deptsiz.b.xfersize = 0;
2638 deptsiz.b.pktcnt = 1;
2639 }
2640 else {
2641 /* Program the transfer size and packet count
2642 * as follows: xfersize = N * maxpacket +
2643 * short_packet pktcnt = N + (short_packet
2644 * exist ? 1 : 0)
2645 */
2646 if (ep->xfer_len > ep->maxpacket) {
2647 ep->xfer_len = ep->maxpacket;
2648 deptsiz.b.xfersize = ep->maxpacket;
2649 }
2650 else {
2651 deptsiz.b.xfersize = ep->xfer_len;
2652 }
2653 deptsiz.b.pktcnt = 1;
2654
2655 }
2656 DWC_DEBUGPL(DBG_PCDV, "IN len=%d xfersize=%d pktcnt=%d [%08x]\n",
2657 ep->xfer_len,
2658 deptsiz.b.xfersize, deptsiz.b.pktcnt, deptsiz.d32);
2659
2660 /* Write the DMA register */
2661 if (core_if->dma_enable) {
2662 if(core_if->dma_desc_enable == 0) {
2663 dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32);
2664
2665 dwc_write_reg32 (&(in_regs->diepdma),
2666 (uint32_t)ep->dma_addr);
2667 }
2668 else {
2669 dma_desc = core_if->dev_if->in_desc_addr;
2670
2671 /** DMA Descriptor Setup */
2672 dma_desc->status.b.bs = BS_HOST_BUSY;
2673 dma_desc->status.b.l = 1;
2674 dma_desc->status.b.ioc = 1;
2675 dma_desc->status.b.sp = (ep->xfer_len == ep->maxpacket) ? 0 : 1;
2676 dma_desc->status.b.bytes = ep->xfer_len;
2677 dma_desc->buf = ep->dma_addr;
2678 dma_desc->status.b.bs = BS_HOST_READY;
2679
2680 /** DIEPDMA0 Register write */
2681 dwc_write_reg32(&in_regs->diepdma, core_if->dev_if->dma_in_desc_addr);
2682 }
2683 }
2684 else {
2685 dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32);
2686 }
2687
2688 /* EP enable, IN data in FIFO */
2689 depctl.b.cnak = 1;
2690 depctl.b.epena = 1;
2691 dwc_write_reg32(&in_regs->diepctl, depctl.d32);
2692
2693 /**
2694 * Enable the Non-Periodic Tx FIFO empty interrupt, the
2695 * data will be written into the fifo by the ISR.
2696 */
2697 if (!core_if->dma_enable) {
2698 if(core_if->en_multiple_tx_fifo == 0) {
2699 intr_mask.b.nptxfempty = 1;
2700 dwc_modify_reg32(&core_if->core_global_regs->gintmsk,
2701 intr_mask.d32, intr_mask.d32);
2702 }
2703 else {
2704 /* Enable the Tx FIFO Empty Interrupt for this EP */
2705 if(ep->xfer_len > 0) {
2706 uint32_t fifoemptymsk = 0;
2707 fifoemptymsk |= 1 << ep->num;
2708 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk,
2709 0, fifoemptymsk);
2710 }
2711 }
2712 }
2713 }
2714 else {
2715 /* OUT endpoint */
2716 dwc_otg_dev_out_ep_regs_t *out_regs =
2717 core_if->dev_if->out_ep_regs[0];
2718
2719 depctl.d32 = dwc_read_reg32(&out_regs->doepctl);
2720 deptsiz.d32 = dwc_read_reg32(&out_regs->doeptsiz);
2721
2722 /* Program the transfer size and packet count as follows:
2723 * xfersize = N * (maxpacket + 4 - (maxpacket % 4))
2724 * pktcnt = N */
2725 /* Zero Length Packet */
2726 deptsiz.b.xfersize = ep->maxpacket;
2727 deptsiz.b.pktcnt = 1;
2728
2729 DWC_DEBUGPL(DBG_PCDV, "len=%d xfersize=%d pktcnt=%d\n",
2730 ep->xfer_len,
2731 deptsiz.b.xfersize, deptsiz.b.pktcnt);
2732
2733 if (core_if->dma_enable) {
2734 if(!core_if->dma_desc_enable) {
2735 dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32);
2736
2737 dwc_write_reg32 (&(out_regs->doepdma),
2738 (uint32_t)ep->dma_addr);
2739 }
2740 else {
2741 dma_desc = core_if->dev_if->out_desc_addr;
2742
2743 /** DMA Descriptor Setup */
2744 dma_desc->status.b.bs = BS_HOST_BUSY;
2745 dma_desc->status.b.l = 1;
2746 dma_desc->status.b.ioc = 1;
2747 dma_desc->status.b.bytes = ep->maxpacket;
2748 dma_desc->buf = ep->dma_addr;
2749 dma_desc->status.b.bs = BS_HOST_READY;
2750
2751 /** DOEPDMA0 Register write */
2752 dwc_write_reg32(&out_regs->doepdma, core_if->dev_if->dma_out_desc_addr);
2753 }
2754 }
2755 else {
2756 dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32);
2757 }
2758
2759 /* EP enable */
2760 depctl.b.cnak = 1;
2761 depctl.b.epena = 1;
2762 dwc_write_reg32 (&(out_regs->doepctl), depctl.d32);
2763 }
2764 }
2765
2766 /**
2767 * This function continues control IN transfers started by
2768 * dwc_otg_ep0_start_transfer, when the transfer does not fit in a
2769 * single packet. NOTE: The DIEPCTL0/DOEPCTL0 registers only have one
2770 * bit for the packet count.
2771 *
2772 * @param core_if Programming view of DWC_otg controller.
2773 * @param ep The EP0 data.
2774 */
2775 void dwc_otg_ep0_continue_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
2776 {
2777 depctl_data_t depctl;
2778 deptsiz0_data_t deptsiz;
2779 gintmsk_data_t intr_mask = { .d32 = 0};
2780 dwc_otg_dma_desc_t* dma_desc;
2781
2782 if (ep->is_in == 1) {
2783 dwc_otg_dev_in_ep_regs_t *in_regs =
2784 core_if->dev_if->in_ep_regs[0];
2785 gnptxsts_data_t tx_status = { .d32 = 0 };
2786
2787 tx_status.d32 = dwc_read_reg32(&core_if->core_global_regs->gnptxsts);
2788 /** @todo Should there be check for room in the Tx
2789 * Status Queue. If not remove the code above this comment. */
2790
2791 depctl.d32 = dwc_read_reg32(&in_regs->diepctl);
2792 deptsiz.d32 = dwc_read_reg32(&in_regs->dieptsiz);
2793
2794 /* Program the transfer size and packet count
2795 * as follows: xfersize = N * maxpacket +
2796 * short_packet pktcnt = N + (short_packet
2797 * exist ? 1 : 0)
2798 */
2799
2800
2801 if(core_if->dma_desc_enable == 0) {
2802 deptsiz.b.xfersize = (ep->total_len - ep->xfer_count) > ep->maxpacket ? ep->maxpacket :
2803 (ep->total_len - ep->xfer_count);
2804 deptsiz.b.pktcnt = 1;
2805 if(core_if->dma_enable == 0) {
2806 ep->xfer_len += deptsiz.b.xfersize;
2807 } else {
2808 ep->xfer_len = deptsiz.b.xfersize;
2809 }
2810 dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32);
2811 }
2812 else {
2813 ep->xfer_len = (ep->total_len - ep->xfer_count) > ep->maxpacket ? ep->maxpacket :
2814 (ep->total_len - ep->xfer_count);
2815
2816 dma_desc = core_if->dev_if->in_desc_addr;
2817
2818 /** DMA Descriptor Setup */
2819 dma_desc->status.b.bs = BS_HOST_BUSY;
2820 dma_desc->status.b.l = 1;
2821 dma_desc->status.b.ioc = 1;
2822 dma_desc->status.b.sp = (ep->xfer_len == ep->maxpacket) ? 0 : 1;
2823 dma_desc->status.b.bytes = ep->xfer_len;
2824 dma_desc->buf = ep->dma_addr;
2825 dma_desc->status.b.bs = BS_HOST_READY;
2826
2827 /** DIEPDMA0 Register write */
2828 dwc_write_reg32(&in_regs->diepdma, core_if->dev_if->dma_in_desc_addr);
2829 }
2830
2831
2832 DWC_DEBUGPL(DBG_PCDV, "IN len=%d xfersize=%d pktcnt=%d [%08x]\n",
2833 ep->xfer_len,
2834 deptsiz.b.xfersize, deptsiz.b.pktcnt, deptsiz.d32);
2835
2836 /* Write the DMA register */
2837 if (core_if->hwcfg2.b.architecture == DWC_INT_DMA_ARCH) {
2838 if(core_if->dma_desc_enable == 0)
2839 dwc_write_reg32 (&(in_regs->diepdma), (uint32_t)ep->dma_addr);
2840 }
2841
2842 /* EP enable, IN data in FIFO */
2843 depctl.b.cnak = 1;
2844 depctl.b.epena = 1;
2845 dwc_write_reg32(&in_regs->diepctl, depctl.d32);
2846
2847 /**
2848 * Enable the Non-Periodic Tx FIFO empty interrupt, the
2849 * data will be written into the fifo by the ISR.
2850 */
2851 if (!core_if->dma_enable) {
2852 if(core_if->en_multiple_tx_fifo == 0) {
2853 /* First clear it from GINTSTS */
2854 intr_mask.b.nptxfempty = 1;
2855 dwc_modify_reg32(&core_if->core_global_regs->gintmsk,
2856 intr_mask.d32, intr_mask.d32);
2857
2858 }
2859 else {
2860 /* Enable the Tx FIFO Empty Interrupt for this EP */
2861 if(ep->xfer_len > 0) {
2862 uint32_t fifoemptymsk = 0;
2863 fifoemptymsk |= 1 << ep->num;
2864 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk,
2865 0, fifoemptymsk);
2866 }
2867 }
2868 }
2869 }
2870 else {
2871 dwc_otg_dev_out_ep_regs_t *out_regs =
2872 core_if->dev_if->out_ep_regs[0];
2873
2874
2875 depctl.d32 = dwc_read_reg32(&out_regs->doepctl);
2876 deptsiz.d32 = dwc_read_reg32(&out_regs->doeptsiz);
2877
2878 /* Program the transfer size and packet count
2879 * as follows: xfersize = N * maxpacket +
2880 * short_packet pktcnt = N + (short_packet
2881 * exist ? 1 : 0)
2882 */
2883 deptsiz.b.xfersize = ep->maxpacket;
2884 deptsiz.b.pktcnt = 1;
2885
2886
2887 if(core_if->dma_desc_enable == 0) {
2888 dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32);
2889 }
2890 else {
2891 dma_desc = core_if->dev_if->out_desc_addr;
2892
2893 /** DMA Descriptor Setup */
2894 dma_desc->status.b.bs = BS_HOST_BUSY;
2895 dma_desc->status.b.l = 1;
2896 dma_desc->status.b.ioc = 1;
2897 dma_desc->status.b.bytes = ep->maxpacket;
2898 dma_desc->buf = ep->dma_addr;
2899 dma_desc->status.b.bs = BS_HOST_READY;
2900
2901 /** DOEPDMA0 Register write */
2902 dwc_write_reg32(&out_regs->doepdma, core_if->dev_if->dma_out_desc_addr);
2903 }
2904
2905
2906 DWC_DEBUGPL(DBG_PCDV, "IN len=%d xfersize=%d pktcnt=%d [%08x]\n",
2907 ep->xfer_len,
2908 deptsiz.b.xfersize, deptsiz.b.pktcnt, deptsiz.d32);
2909
2910 /* Write the DMA register */
2911 if (core_if->hwcfg2.b.architecture == DWC_INT_DMA_ARCH) {
2912 if(core_if->dma_desc_enable == 0)
2913 dwc_write_reg32 (&(out_regs->doepdma), (uint32_t)ep->dma_addr);
2914 }
2915
2916 /* EP enable, IN data in FIFO */
2917 depctl.b.cnak = 1;
2918 depctl.b.epena = 1;
2919 dwc_write_reg32(&out_regs->doepctl, depctl.d32);
2920
2921 }
2922 }
2923
2924 #ifdef DEBUG
2925 void dump_msg(const u8 *buf, unsigned int length)
2926 {
2927 unsigned int start, num, i;
2928 char line[52], *p;
2929
2930 if (length >= 512)
2931 return;
2932 start = 0;
2933 while (length > 0) {
2934 num = min(length, 16u);
2935 p = line;
2936 for (i = 0; i < num; ++i)
2937 {
2938 if (i == 8)
2939 *p++ = ' ';
2940 sprintf(p, " %02x", buf[i]);
2941 p += 3;
2942 }
2943 *p = 0;
2944 DWC_PRINT("%6x: %s\n", start, line);
2945 buf += num;
2946 start += num;
2947 length -= num;
2948 }
2949 }
2950 #else
2951 static inline void dump_msg(const u8 *buf, unsigned int length)
2952 {
2953 }
2954 #endif
2955
2956 /**
2957 * This function writes a packet into the Tx FIFO associated with the
2958 * EP. For non-periodic EPs the non-periodic Tx FIFO is written. For
2959 * periodic EPs the periodic Tx FIFO associated with the EP is written
2960 * with all packets for the next micro-frame.
2961 *
2962 * @param core_if Programming view of DWC_otg controller.
2963 * @param ep The EP to write packet for.
2964 * @param dma Indicates if DMA is being used.
2965 */
2966 void dwc_otg_ep_write_packet(dwc_otg_core_if_t *core_if, dwc_ep_t *ep, int dma)
2967 {
2968 /**
2969 * The buffer is padded to DWORD on a per packet basis in
2970 * slave/dma mode if the MPS is not DWORD aligned. The last
2971 * packet, if short, is also padded to a multiple of DWORD.
2972 *
2973 * ep->xfer_buff always starts DWORD aligned in memory and is a
2974 * multiple of DWORD in length
2975 *
2976 * ep->xfer_len can be any number of bytes
2977 *
2978 * ep->xfer_count is a multiple of ep->maxpacket until the last
2979 * packet
2980 *
2981 * FIFO access is DWORD */
2982
2983 uint32_t i;
2984 uint32_t byte_count;
2985 uint32_t dword_count;
2986 uint32_t *fifo;
2987 uint32_t *data_buff = (uint32_t *)ep->xfer_buff;
2988
2989 DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s(%p,%p)\n", __func__, core_if, ep);
2990 if (ep->xfer_count >= ep->xfer_len) {
2991 DWC_WARN("%s() No data for EP%d!!!\n", __func__, ep->num);
2992 return;
2993 }
2994
2995 /* Find the byte length of the packet either short packet or MPS */
2996 if ((ep->xfer_len - ep->xfer_count) < ep->maxpacket) {
2997 byte_count = ep->xfer_len - ep->xfer_count;
2998 }
2999 else {
3000 byte_count = ep->maxpacket;
3001 }
3002
3003 /* Find the DWORD length, padded by extra bytes as neccessary if MPS
3004 * is not a multiple of DWORD */
3005 dword_count = (byte_count + 3) / 4;
3006
3007 #ifdef VERBOSE
3008 dump_msg(ep->xfer_buff, byte_count);
3009 #endif
3010
3011 /**@todo NGS Where are the Periodic Tx FIFO addresses
3012 * intialized? What should this be? */
3013
3014 fifo = core_if->data_fifo[ep->num];
3015
3016
3017 DWC_DEBUGPL((DBG_PCDV|DBG_CILV), "fifo=%p buff=%p *p=%08x bc=%d\n", fifo, data_buff, *data_buff, byte_count);
3018
3019 if (!dma) {
3020 for (i=0; i<dword_count; i++, data_buff++) {
3021 dwc_write_reg32(fifo, *data_buff);
3022 }
3023 }
3024
3025 ep->xfer_count += byte_count;
3026 ep->xfer_buff += byte_count;
3027 ep->dma_addr += byte_count;
3028 }
3029
3030 /**
3031 * Set the EP STALL.
3032 *
3033 * @param core_if Programming view of DWC_otg controller.
3034 * @param ep The EP to set the stall on.
3035 */
3036 void dwc_otg_ep_set_stall(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
3037 {
3038 depctl_data_t depctl;
3039 volatile uint32_t *depctl_addr;
3040
3041 DWC_DEBUGPL(DBG_PCD, "%s ep%d-%s\n", __func__, ep->num,
3042 (ep->is_in?"IN":"OUT"));
3043
3044 DWC_PRINT("%s ep%d-%s\n", __func__, ep->num,
3045 (ep->is_in?"in":"out"));
3046
3047 if (ep->is_in == 1) {
3048 depctl_addr = &(core_if->dev_if->in_ep_regs[ep->num]->diepctl);
3049 depctl.d32 = dwc_read_reg32(depctl_addr);
3050
3051 /* set the disable and stall bits */
3052 if (depctl.b.epena) {
3053 depctl.b.epdis = 1;
3054 }
3055 depctl.b.stall = 1;
3056 dwc_write_reg32(depctl_addr, depctl.d32);
3057 }
3058 else {
3059 depctl_addr = &(core_if->dev_if->out_ep_regs[ep->num]->doepctl);
3060 depctl.d32 = dwc_read_reg32(depctl_addr);
3061
3062 /* set the stall bit */
3063 depctl.b.stall = 1;
3064 dwc_write_reg32(depctl_addr, depctl.d32);
3065 }
3066
3067 DWC_DEBUGPL(DBG_PCD,"DEPCTL=%0x\n",dwc_read_reg32(depctl_addr));
3068
3069 return;
3070 }
3071
3072 /**
3073 * Clear the EP STALL.
3074 *
3075 * @param core_if Programming view of DWC_otg controller.
3076 * @param ep The EP to clear stall from.
3077 */
3078 void dwc_otg_ep_clear_stall(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
3079 {
3080 depctl_data_t depctl;
3081 volatile uint32_t *depctl_addr;
3082
3083 DWC_DEBUGPL(DBG_PCD, "%s ep%d-%s\n", __func__, ep->num,
3084 (ep->is_in?"IN":"OUT"));
3085
3086 if (ep->is_in == 1) {
3087 depctl_addr = &(core_if->dev_if->in_ep_regs[ep->num]->diepctl);
3088 }
3089 else {
3090 depctl_addr = &(core_if->dev_if->out_ep_regs[ep->num]->doepctl);
3091 }
3092
3093 depctl.d32 = dwc_read_reg32(depctl_addr);
3094
3095 /* clear the stall bits */
3096 depctl.b.stall = 0;
3097
3098 /*
3099 * USB Spec 9.4.5: For endpoints using data toggle, regardless
3100 * of whether an endpoint has the Halt feature set, a
3101 * ClearFeature(ENDPOINT_HALT) request always results in the
3102 * data toggle being reinitialized to DATA0.
3103 */
3104 if (ep->type == DWC_OTG_EP_TYPE_INTR ||
3105 ep->type == DWC_OTG_EP_TYPE_BULK) {
3106 depctl.b.setd0pid = 1; /* DATA0 */
3107 }
3108
3109 dwc_write_reg32(depctl_addr, depctl.d32);
3110 DWC_DEBUGPL(DBG_PCD,"DEPCTL=%0x\n",dwc_read_reg32(depctl_addr));
3111 return;
3112 }
3113
3114 /**
3115 * This function reads a packet from the Rx FIFO into the destination
3116 * buffer. To read SETUP data use dwc_otg_read_setup_packet.
3117 *
3118 * @param core_if Programming view of DWC_otg controller.
3119 * @param dest Destination buffer for the packet.
3120 * @param bytes Number of bytes to copy to the destination.
3121 */
3122 void dwc_otg_read_packet(dwc_otg_core_if_t *core_if,
3123 uint8_t *dest,
3124 uint16_t bytes)
3125 {
3126 int i;
3127 int word_count = (bytes + 3) / 4;
3128
3129 volatile uint32_t *fifo = core_if->data_fifo[0];
3130 uint32_t *data_buff = (uint32_t *)dest;
3131
3132 /**
3133 * @todo Account for the case where _dest is not dword aligned. This
3134 * requires reading data from the FIFO into a uint32_t temp buffer,
3135 * then moving it into the data buffer.
3136 */
3137
3138 DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s(%p,%p,%d)\n", __func__,
3139 core_if, dest, bytes);
3140
3141 for (i=0; i<word_count; i++, data_buff++)
3142 {
3143 *data_buff = dwc_read_reg32(fifo);
3144 }
3145
3146 return;
3147 }
3148
3149
3150
3151 /**
3152 * This functions reads the device registers and prints them
3153 *
3154 * @param core_if Programming view of DWC_otg controller.
3155 */
3156 void dwc_otg_dump_dev_registers(dwc_otg_core_if_t *core_if)
3157 {
3158 int i;
3159 volatile uint32_t *addr;
3160
3161 DWC_PRINT("Device Global Registers\n");
3162 addr=&core_if->dev_if->dev_global_regs->dcfg;
3163 DWC_PRINT("DCFG @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3164 addr=&core_if->dev_if->dev_global_regs->dctl;
3165 DWC_PRINT("DCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3166 addr=&core_if->dev_if->dev_global_regs->dsts;
3167 DWC_PRINT("DSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3168 addr=&core_if->dev_if->dev_global_regs->diepmsk;
3169 DWC_PRINT("DIEPMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3170 addr=&core_if->dev_if->dev_global_regs->doepmsk;
3171 DWC_PRINT("DOEPMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3172 addr=&core_if->dev_if->dev_global_regs->daint;
3173 DWC_PRINT("DAINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3174 addr=&core_if->dev_if->dev_global_regs->daintmsk;
3175 DWC_PRINT("DAINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3176 addr=&core_if->dev_if->dev_global_regs->dtknqr1;
3177 DWC_PRINT("DTKNQR1 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3178 if (core_if->hwcfg2.b.dev_token_q_depth > 6) {
3179 addr=&core_if->dev_if->dev_global_regs->dtknqr2;
3180 DWC_PRINT("DTKNQR2 @0x%08X : 0x%08X\n",
3181 (uint32_t)addr,dwc_read_reg32(addr));
3182 }
3183
3184 addr=&core_if->dev_if->dev_global_regs->dvbusdis;
3185 DWC_PRINT("DVBUSID @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3186
3187 addr=&core_if->dev_if->dev_global_regs->dvbuspulse;
3188 DWC_PRINT("DVBUSPULSE @0x%08X : 0x%08X\n",
3189 (uint32_t)addr,dwc_read_reg32(addr));
3190
3191 if (core_if->hwcfg2.b.dev_token_q_depth > 14) {
3192 addr=&core_if->dev_if->dev_global_regs->dtknqr3_dthrctl;
3193 DWC_PRINT("DTKNQR3_DTHRCTL @0x%08X : 0x%08X\n",
3194 (uint32_t)addr, dwc_read_reg32(addr));
3195 }
3196 /*
3197 if (core_if->hwcfg2.b.dev_token_q_depth > 22) {
3198 addr=&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk;
3199 DWC_PRINT("DTKNQR4 @0x%08X : 0x%08X\n",
3200 (uint32_t)addr, dwc_read_reg32(addr));
3201 }
3202 */
3203 addr=&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk;
3204 DWC_PRINT("FIFOEMPMSK @0x%08X : 0x%08X\n", (uint32_t)addr, dwc_read_reg32(addr));
3205
3206 addr=&core_if->dev_if->dev_global_regs->deachint;
3207 DWC_PRINT("DEACHINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3208 addr=&core_if->dev_if->dev_global_regs->deachintmsk;
3209 DWC_PRINT("DEACHINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3210
3211 for (i=0; i<= core_if->dev_if->num_in_eps; i++) {
3212 addr=&core_if->dev_if->dev_global_regs->diepeachintmsk[i];
3213 DWC_PRINT("DIEPEACHINTMSK[%d] @0x%08X : 0x%08X\n", i, (uint32_t)addr, dwc_read_reg32(addr));
3214 }
3215
3216
3217 for (i=0; i<= core_if->dev_if->num_out_eps; i++) {
3218 addr=&core_if->dev_if->dev_global_regs->doepeachintmsk[i];
3219 DWC_PRINT("DOEPEACHINTMSK[%d] @0x%08X : 0x%08X\n", i, (uint32_t)addr, dwc_read_reg32(addr));
3220 }
3221
3222 for (i=0; i<= core_if->dev_if->num_in_eps; i++) {
3223 DWC_PRINT("Device IN EP %d Registers\n", i);
3224 addr=&core_if->dev_if->in_ep_regs[i]->diepctl;
3225 DWC_PRINT("DIEPCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3226 addr=&core_if->dev_if->in_ep_regs[i]->diepint;
3227 DWC_PRINT("DIEPINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3228 addr=&core_if->dev_if->in_ep_regs[i]->dieptsiz;
3229 DWC_PRINT("DIETSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3230 addr=&core_if->dev_if->in_ep_regs[i]->diepdma;
3231 DWC_PRINT("DIEPDMA @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3232 addr=&core_if->dev_if->in_ep_regs[i]->dtxfsts;
3233 DWC_PRINT("DTXFSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3234 addr=&core_if->dev_if->in_ep_regs[i]->diepdmab;
3235 DWC_PRINT("DIEPDMAB @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3236 }
3237
3238
3239 for (i=0; i<= core_if->dev_if->num_out_eps; i++) {
3240 DWC_PRINT("Device OUT EP %d Registers\n", i);
3241 addr=&core_if->dev_if->out_ep_regs[i]->doepctl;
3242 DWC_PRINT("DOEPCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3243 addr=&core_if->dev_if->out_ep_regs[i]->doepfn;
3244 DWC_PRINT("DOEPFN @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3245 addr=&core_if->dev_if->out_ep_regs[i]->doepint;
3246 DWC_PRINT("DOEPINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3247 addr=&core_if->dev_if->out_ep_regs[i]->doeptsiz;
3248 DWC_PRINT("DOETSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3249 addr=&core_if->dev_if->out_ep_regs[i]->doepdma;
3250 DWC_PRINT("DOEPDMA @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3251 addr=&core_if->dev_if->out_ep_regs[i]->doepdmab;
3252 DWC_PRINT("DOEPDMAB @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3253
3254 }
3255
3256
3257
3258 return;
3259 }
3260
3261 /**
3262 * This functions reads the SPRAM and prints its content
3263 *
3264 * @param core_if Programming view of DWC_otg controller.
3265 */
3266 void dwc_otg_dump_spram(dwc_otg_core_if_t *core_if)
3267 {
3268 volatile uint8_t *addr, *start_addr, *end_addr;
3269
3270 DWC_PRINT("SPRAM Data:\n");
3271 start_addr = (void*)core_if->core_global_regs;
3272 DWC_PRINT("Base Address: 0x%8X\n", (uint32_t)start_addr);
3273 start_addr += 0x00028000;
3274 end_addr=(void*)core_if->core_global_regs;
3275 end_addr += 0x000280e0;
3276
3277 for(addr = start_addr; addr < end_addr; addr+=16)
3278 {
3279 DWC_PRINT("0x%8X:\t%2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X\n", (uint32_t)addr,
3280 addr[0],
3281 addr[1],
3282 addr[2],
3283 addr[3],
3284 addr[4],
3285 addr[5],
3286 addr[6],
3287 addr[7],
3288 addr[8],
3289 addr[9],
3290 addr[10],
3291 addr[11],
3292 addr[12],
3293 addr[13],
3294 addr[14],
3295 addr[15]
3296 );
3297 }
3298
3299 return;
3300 }
3301 /**
3302 * This function reads the host registers and prints them
3303 *
3304 * @param core_if Programming view of DWC_otg controller.
3305 */
3306 void dwc_otg_dump_host_registers(dwc_otg_core_if_t *core_if)
3307 {
3308 int i;
3309 volatile uint32_t *addr;
3310
3311 DWC_PRINT("Host Global Registers\n");
3312 addr=&core_if->host_if->host_global_regs->hcfg;
3313 DWC_PRINT("HCFG @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3314 addr=&core_if->host_if->host_global_regs->hfir;
3315 DWC_PRINT("HFIR @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3316 addr=&core_if->host_if->host_global_regs->hfnum;
3317 DWC_PRINT("HFNUM @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3318 addr=&core_if->host_if->host_global_regs->hptxsts;
3319 DWC_PRINT("HPTXSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3320 addr=&core_if->host_if->host_global_regs->haint;
3321 DWC_PRINT("HAINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3322 addr=&core_if->host_if->host_global_regs->haintmsk;
3323 DWC_PRINT("HAINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3324 addr=core_if->host_if->hprt0;
3325 DWC_PRINT("HPRT0 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3326
3327 for (i=0; i<core_if->core_params->host_channels; i++)
3328 {
3329 DWC_PRINT("Host Channel %d Specific Registers\n", i);
3330 addr=&core_if->host_if->hc_regs[i]->hcchar;
3331 DWC_PRINT("HCCHAR @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3332 addr=&core_if->host_if->hc_regs[i]->hcsplt;
3333 DWC_PRINT("HCSPLT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3334 addr=&core_if->host_if->hc_regs[i]->hcint;
3335 DWC_PRINT("HCINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3336 addr=&core_if->host_if->hc_regs[i]->hcintmsk;
3337 DWC_PRINT("HCINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3338 addr=&core_if->host_if->hc_regs[i]->hctsiz;
3339 DWC_PRINT("HCTSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3340 addr=&core_if->host_if->hc_regs[i]->hcdma;
3341 DWC_PRINT("HCDMA @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3342 }
3343 return;
3344 }
3345
3346 /**
3347 * This function reads the core global registers and prints them
3348 *
3349 * @param core_if Programming view of DWC_otg controller.
3350 */
3351 void dwc_otg_dump_global_registers(dwc_otg_core_if_t *core_if)
3352 {
3353 int i;
3354 volatile uint32_t *addr;
3355
3356 DWC_PRINT("Core Global Registers\n");
3357 addr=&core_if->core_global_regs->gotgctl;
3358 DWC_PRINT("GOTGCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3359 addr=&core_if->core_global_regs->gotgint;
3360 DWC_PRINT("GOTGINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3361 addr=&core_if->core_global_regs->gahbcfg;
3362 DWC_PRINT("GAHBCFG @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3363 addr=&core_if->core_global_regs->gusbcfg;
3364 DWC_PRINT("GUSBCFG @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3365 addr=&core_if->core_global_regs->grstctl;
3366 DWC_PRINT("GRSTCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3367 addr=&core_if->core_global_regs->gintsts;
3368 DWC_PRINT("GINTSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3369 addr=&core_if->core_global_regs->gintmsk;
3370 DWC_PRINT("GINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3371 addr=&core_if->core_global_regs->grxstsr;
3372 DWC_PRINT("GRXSTSR @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3373 //addr=&core_if->core_global_regs->grxstsp;
3374 //DWC_PRINT("GRXSTSP @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3375 addr=&core_if->core_global_regs->grxfsiz;
3376 DWC_PRINT("GRXFSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3377 addr=&core_if->core_global_regs->gnptxfsiz;
3378 DWC_PRINT("GNPTXFSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3379 addr=&core_if->core_global_regs->gnptxsts;
3380 DWC_PRINT("GNPTXSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3381 addr=&core_if->core_global_regs->gi2cctl;
3382 DWC_PRINT("GI2CCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3383 addr=&core_if->core_global_regs->gpvndctl;
3384 DWC_PRINT("GPVNDCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3385 addr=&core_if->core_global_regs->ggpio;
3386 DWC_PRINT("GGPIO @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3387 addr=&core_if->core_global_regs->guid;
3388 DWC_PRINT("GUID @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3389 addr=&core_if->core_global_regs->gsnpsid;
3390 DWC_PRINT("GSNPSID @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3391 addr=&core_if->core_global_regs->ghwcfg1;
3392 DWC_PRINT("GHWCFG1 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3393 addr=&core_if->core_global_regs->ghwcfg2;
3394 DWC_PRINT("GHWCFG2 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3395 addr=&core_if->core_global_regs->ghwcfg3;
3396 DWC_PRINT("GHWCFG3 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3397 addr=&core_if->core_global_regs->ghwcfg4;
3398 DWC_PRINT("GHWCFG4 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3399 addr=&core_if->core_global_regs->hptxfsiz;
3400 DWC_PRINT("HPTXFSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
3401
3402 for (i=0; i<core_if->hwcfg4.b.num_dev_perio_in_ep; i++)
3403 {
3404 addr=&core_if->core_global_regs->dptxfsiz_dieptxf[i];
3405 DWC_PRINT("DPTXFSIZ[%d] @0x%08X : 0x%08X\n",i,(uint32_t)addr,dwc_read_reg32(addr));
3406 }
3407 }
3408
3409 /**
3410 * Flush a Tx FIFO.
3411 *
3412 * @param core_if Programming view of DWC_otg controller.
3413 * @param num Tx FIFO to flush.
3414 */
3415 void dwc_otg_flush_tx_fifo(dwc_otg_core_if_t *core_if,
3416 const int num)
3417 {
3418 dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
3419 volatile grstctl_t greset = { .d32 = 0};
3420 int count = 0;
3421
3422 DWC_DEBUGPL((DBG_CIL|DBG_PCDV), "Flush Tx FIFO %d\n", num);
3423
3424 greset.b.txfflsh = 1;
3425 greset.b.txfnum = num;
3426 dwc_write_reg32(&global_regs->grstctl, greset.d32);
3427
3428 do {
3429 greset.d32 = dwc_read_reg32(&global_regs->grstctl);
3430 if (++count > 10000) {
3431 DWC_WARN("%s() HANG! GRSTCTL=%0x GNPTXSTS=0x%08x\n",
3432 __func__, greset.d32,
3433 dwc_read_reg32(&global_regs->gnptxsts));
3434 break;
3435 }
3436 }
3437 while (greset.b.txfflsh == 1);
3438
3439 /* Wait for 3 PHY Clocks*/
3440 UDELAY(1);
3441 }
3442
3443 /**
3444 * Flush Rx FIFO.
3445 *
3446 * @param core_if Programming view of DWC_otg controller.
3447 */
3448 void dwc_otg_flush_rx_fifo(dwc_otg_core_if_t *core_if)
3449 {
3450 dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
3451 volatile grstctl_t greset = { .d32 = 0};
3452 int count = 0;
3453
3454 DWC_DEBUGPL((DBG_CIL|DBG_PCDV), "%s\n", __func__);
3455 /*
3456 *
3457 */
3458 greset.b.rxfflsh = 1;
3459 dwc_write_reg32(&global_regs->grstctl, greset.d32);
3460
3461 do {
3462 greset.d32 = dwc_read_reg32(&global_regs->grstctl);
3463 if (++count > 10000) {
3464 DWC_WARN("%s() HANG! GRSTCTL=%0x\n", __func__,
3465 greset.d32);
3466 break;
3467 }
3468 }
3469 while (greset.b.rxfflsh == 1);
3470
3471 /* Wait for 3 PHY Clocks*/
3472 UDELAY(1);
3473 }
3474
3475 /**
3476 * Do core a soft reset of the core. Be careful with this because it
3477 * resets all the internal state machines of the core.
3478 */
3479 void dwc_otg_core_reset(dwc_otg_core_if_t *core_if)
3480 {
3481 dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
3482 volatile grstctl_t greset = { .d32 = 0};
3483 int count = 0;
3484
3485 DWC_DEBUGPL(DBG_CILV, "%s\n", __func__);
3486 /* Wait for AHB master IDLE state. */
3487 do {
3488 UDELAY(10);
3489 greset.d32 = dwc_read_reg32(&global_regs->grstctl);
3490 if (++count > 100000) {
3491 DWC_WARN("%s() HANG! AHB Idle GRSTCTL=%0x\n", __func__,
3492 greset.d32);
3493 return;
3494 }
3495 }
3496 while (greset.b.ahbidle == 0);
3497
3498 /* Core Soft Reset */
3499 count = 0;
3500 greset.b.csftrst = 1;
3501 dwc_write_reg32(&global_regs->grstctl, greset.d32);
3502 do {
3503 greset.d32 = dwc_read_reg32(&global_regs->grstctl);
3504 if (++count > 10000) {
3505 DWC_WARN("%s() HANG! Soft Reset GRSTCTL=%0x\n", __func__,
3506 greset.d32);
3507 break;
3508 }
3509 }
3510 while (greset.b.csftrst == 1);
3511
3512 /* Wait for 3 PHY Clocks*/
3513 MDELAY(100);
3514 }
3515
3516
3517
3518 /**
3519 * Register HCD callbacks. The callbacks are used to start and stop
3520 * the HCD for interrupt processing.
3521 *
3522 * @param core_if Programming view of DWC_otg controller.
3523 * @param cb the HCD callback structure.
3524 * @param p pointer to be passed to callback function (usb_hcd*).
3525 */
3526 void dwc_otg_cil_register_hcd_callbacks(dwc_otg_core_if_t *core_if,
3527 dwc_otg_cil_callbacks_t *cb,
3528 void *p)
3529 {
3530 core_if->hcd_cb = cb;
3531 cb->p = p;
3532 }
3533
3534 /**
3535 * Register PCD callbacks. The callbacks are used to start and stop
3536 * the PCD for interrupt processing.
3537 *
3538 * @param core_if Programming view of DWC_otg controller.
3539 * @param cb the PCD callback structure.
3540 * @param p pointer to be passed to callback function (pcd*).
3541 */
3542 void dwc_otg_cil_register_pcd_callbacks(dwc_otg_core_if_t *core_if,
3543 dwc_otg_cil_callbacks_t *cb,
3544 void *p)
3545 {
3546 core_if->pcd_cb = cb;
3547 cb->p = p;
3548 }
3549
3550 #ifdef DWC_EN_ISOC
3551
3552 /**
3553 * This function writes isoc data per 1 (micro)frame into tx fifo
3554 *
3555 * @param core_if Programming view of DWC_otg controller.
3556 * @param ep The EP to start the transfer on.
3557 *
3558 */
3559 void write_isoc_frame_data(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
3560 {
3561 dwc_otg_dev_in_ep_regs_t *ep_regs;
3562 dtxfsts_data_t txstatus = {.d32 = 0};
3563 uint32_t len = 0;
3564 uint32_t dwords;
3565
3566 ep->xfer_len = ep->data_per_frame;
3567 ep->xfer_count = 0;
3568
3569 ep_regs = core_if->dev_if->in_ep_regs[ep->num];
3570
3571 len = ep->xfer_len - ep->xfer_count;
3572
3573 if (len > ep->maxpacket) {
3574 len = ep->maxpacket;
3575 }
3576
3577 dwords = (len + 3)/4;
3578
3579 /* While there is space in the queue and space in the FIFO and
3580 * More data to tranfer, Write packets to the Tx FIFO */
3581 txstatus.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dtxfsts);
3582 DWC_DEBUGPL(DBG_PCDV, "b4 dtxfsts[%d]=0x%08x\n",ep->num,txstatus.d32);
3583
3584 while (txstatus.b.txfspcavail > dwords &&
3585 ep->xfer_count < ep->xfer_len &&
3586 ep->xfer_len != 0) {
3587 /* Write the FIFO */
3588 dwc_otg_ep_write_packet(core_if, ep, 0);
3589
3590 len = ep->xfer_len - ep->xfer_count;
3591 if (len > ep->maxpacket) {
3592 len = ep->maxpacket;
3593 }
3594
3595 dwords = (len + 3)/4;
3596 txstatus.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dtxfsts);
3597 DWC_DEBUGPL(DBG_PCDV,"dtxfsts[%d]=0x%08x\n", ep->num, txstatus.d32);
3598 }
3599 }
3600
3601
3602 /**
3603 * This function initializes a descriptor chain for Isochronous transfer
3604 *
3605 * @param core_if Programming view of DWC_otg controller.
3606 * @param ep The EP to start the transfer on.
3607 *
3608 */
3609 void dwc_otg_iso_ep_start_frm_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
3610 {
3611 deptsiz_data_t deptsiz = { .d32 = 0 };
3612 depctl_data_t depctl = { .d32 = 0 };
3613 dsts_data_t dsts = { .d32 = 0 };
3614 volatile uint32_t *addr;
3615
3616 if(ep->is_in) {
3617 addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl;
3618 } else {
3619 addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl;
3620 }
3621
3622 ep->xfer_len = ep->data_per_frame;
3623 ep->xfer_count = 0;
3624 ep->xfer_buff = ep->cur_pkt_addr;
3625 ep->dma_addr = ep->cur_pkt_dma_addr;
3626
3627 if(ep->is_in) {
3628 /* Program the transfer size and packet count
3629 * as follows: xfersize = N * maxpacket +
3630 * short_packet pktcnt = N + (short_packet
3631 * exist ? 1 : 0)
3632 */
3633 deptsiz.b.xfersize = ep->xfer_len;
3634 deptsiz.b.pktcnt =
3635 (ep->xfer_len - 1 + ep->maxpacket) /
3636 ep->maxpacket;
3637 deptsiz.b.mc = deptsiz.b.pktcnt;
3638 dwc_write_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dieptsiz, deptsiz.d32);
3639
3640 /* Write the DMA register */
3641 if (core_if->dma_enable) {
3642 dwc_write_reg32 (&(core_if->dev_if->in_ep_regs[ep->num]->diepdma), (uint32_t)ep->dma_addr);
3643 }
3644 } else {
3645 deptsiz.b.pktcnt =
3646 (ep->xfer_len + (ep->maxpacket - 1)) /
3647 ep->maxpacket;
3648 deptsiz.b.xfersize = deptsiz.b.pktcnt * ep->maxpacket;
3649
3650 dwc_write_reg32(&core_if->dev_if->out_ep_regs[ep->num]->doeptsiz, deptsiz.d32);
3651
3652 if (core_if->dma_enable) {
3653 dwc_write_reg32 (&(core_if->dev_if->out_ep_regs[ep->num]->doepdma),
3654 (uint32_t)ep->dma_addr);
3655 }
3656 }
3657
3658
3659 /** Enable endpoint, clear nak */
3660
3661 depctl.d32 = 0;
3662 if(ep->bInterval == 1) {
3663 dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts);
3664 ep->next_frame = dsts.b.soffn + ep->bInterval;
3665
3666 if(ep->next_frame & 0x1) {
3667 depctl.b.setd1pid = 1;
3668 } else {
3669 depctl.b.setd0pid = 1;
3670 }
3671 } else {
3672 ep->next_frame += ep->bInterval;
3673
3674 if(ep->next_frame & 0x1) {
3675 depctl.b.setd1pid = 1;
3676 } else {
3677 depctl.b.setd0pid = 1;
3678 }
3679 }
3680 depctl.b.epena = 1;
3681 depctl.b.cnak = 1;
3682
3683 dwc_modify_reg32(addr, 0, depctl.d32);
3684 depctl.d32 = dwc_read_reg32(addr);
3685
3686 if(ep->is_in && core_if->dma_enable == 0) {
3687 write_isoc_frame_data(core_if, ep);
3688 }
3689
3690 }
3691
3692 #endif //DWC_EN_ISOC