1 #include <linux/module.h>
2 #include <linux/init.h>
3 #include <linux/sched.h>
4 #include <linux/kernel.h>
5 #include <linux/slab.h>
6 #include <linux/string.h>
7 #include <linux/timer.h>
9 #include <linux/errno.h>
10 #include <linux/stat.h>
12 #include <linux/tty.h>
13 #include <linux/selection.h>
14 #include <linux/kmod.h>
15 #include <linux/vmalloc.h>
16 #include <linux/interrupt.h>
17 #include <linux/delay.h>
18 #include <asm/uaccess.h>
19 #include <linux/errno.h>
22 #include <asm/danube/danube.h>
23 #include <asm/danube/danube_irq.h>
24 #include <asm/danube/danube_dma.h>
26 /*25 descriptors for each dma channel,4096/8/20=25.xx*/
27 #define DANUBE_DMA_DESCRIPTOR_OFFSET 25
29 #define MAX_DMA_DEVICE_NUM 6 /*max ports connecting to dma */
30 #define MAX_DMA_CHANNEL_NUM 20 /*max dma channels */
31 #define DMA_INT_BUDGET 100 /*budget for interrupt handling */
32 #define DMA_POLL_COUNTER 4 /*fix me, set the correct counter value here! */
34 extern void mask_and_ack_danube_irq (unsigned int irq_nr
);
35 extern void enable_danube_irq (unsigned int irq_nr
);
36 extern void disable_danube_irq (unsigned int irq_nr
);
39 _dma_device_info dma_devs
[MAX_DMA_DEVICE_NUM
];
40 _dma_channel_info dma_chan
[MAX_DMA_CHANNEL_NUM
];
42 char global_device_name
[MAX_DMA_DEVICE_NUM
][20] =
43 { {"PPE"}, {"DEU"}, {"SPI"}, {"SDIO"}, {"MCTRL0"}, {"MCTRL1"} };
45 _dma_chan_map default_dma_map
[MAX_DMA_CHANNEL_NUM
] = {
46 {"PPE", DANUBE_DMA_RX
, 0, DANUBE_DMA_CH0_INT
, 0},
47 {"PPE", DANUBE_DMA_TX
, 0, DANUBE_DMA_CH1_INT
, 0},
48 {"PPE", DANUBE_DMA_RX
, 1, DANUBE_DMA_CH2_INT
, 1},
49 {"PPE", DANUBE_DMA_TX
, 1, DANUBE_DMA_CH3_INT
, 1},
50 {"PPE", DANUBE_DMA_RX
, 2, DANUBE_DMA_CH4_INT
, 2},
51 {"PPE", DANUBE_DMA_TX
, 2, DANUBE_DMA_CH5_INT
, 2},
52 {"PPE", DANUBE_DMA_RX
, 3, DANUBE_DMA_CH6_INT
, 3},
53 {"PPE", DANUBE_DMA_TX
, 3, DANUBE_DMA_CH7_INT
, 3},
54 {"DEU", DANUBE_DMA_RX
, 0, DANUBE_DMA_CH8_INT
, 0},
55 {"DEU", DANUBE_DMA_TX
, 0, DANUBE_DMA_CH9_INT
, 0},
56 {"DEU", DANUBE_DMA_RX
, 1, DANUBE_DMA_CH10_INT
, 1},
57 {"DEU", DANUBE_DMA_TX
, 1, DANUBE_DMA_CH11_INT
, 1},
58 {"SPI", DANUBE_DMA_RX
, 0, DANUBE_DMA_CH12_INT
, 0},
59 {"SPI", DANUBE_DMA_TX
, 0, DANUBE_DMA_CH13_INT
, 0},
60 {"SDIO", DANUBE_DMA_RX
, 0, DANUBE_DMA_CH14_INT
, 0},
61 {"SDIO", DANUBE_DMA_TX
, 0, DANUBE_DMA_CH15_INT
, 0},
62 {"MCTRL0", DANUBE_DMA_RX
, 0, DANUBE_DMA_CH16_INT
, 0},
63 {"MCTRL0", DANUBE_DMA_TX
, 0, DANUBE_DMA_CH17_INT
, 0},
64 {"MCTRL1", DANUBE_DMA_RX
, 1, DANUBE_DMA_CH18_INT
, 1},
65 {"MCTRL1", DANUBE_DMA_TX
, 1, DANUBE_DMA_CH19_INT
, 1}
68 _dma_chan_map
*chan_map
= default_dma_map
;
69 volatile u32 g_danube_dma_int_status
= 0;
70 volatile int g_danube_dma_in_process
= 0;/*0=not in process,1=in process*/
72 void do_dma_tasklet (unsigned long);
73 DECLARE_TASKLET (dma_tasklet
, do_dma_tasklet
, 0);
76 common_buffer_alloc (int len
, int *byte_offset
, void **opt
)
78 u8
*buffer
= (u8
*) kmalloc (len
* sizeof (u8
), GFP_KERNEL
);
86 common_buffer_free (u8
*dataptr
, void *opt
)
93 enable_ch_irq (_dma_channel_info
*pCh
)
95 int chan_no
= (int)(pCh
- dma_chan
);
99 writel(chan_no
, DANUBE_DMA_CS
);
100 writel(0x4a, DANUBE_DMA_CIE
);
101 writel(readl(DANUBE_DMA_IRNEN
) | (1 << chan_no
), DANUBE_DMA_IRNEN
);
102 local_irq_restore(flag
);
103 enable_danube_irq(pCh
->irq
);
107 disable_ch_irq (_dma_channel_info
*pCh
)
110 int chan_no
= (int) (pCh
- dma_chan
);
112 local_irq_save(flag
);
113 g_danube_dma_int_status
&= ~(1 << chan_no
);
114 writel(chan_no
, DANUBE_DMA_CS
);
115 writel(0, DANUBE_DMA_CIE
);
116 writel(readl(DANUBE_DMA_IRNEN
) & ~(1 << chan_no
), DANUBE_DMA_IRNEN
);
117 local_irq_restore(flag
);
118 mask_and_ack_danube_irq(pCh
->irq
);
122 open_chan (_dma_channel_info
*pCh
)
125 int chan_no
= (int)(pCh
- dma_chan
);
127 local_irq_save(flag
);
128 writel(chan_no
, DANUBE_DMA_CS
);
129 writel(readl(DANUBE_DMA_CCTRL
) | 1, DANUBE_DMA_CCTRL
);
130 if(pCh
->dir
== DANUBE_DMA_RX
)
132 local_irq_restore(flag
);
136 close_chan(_dma_channel_info
*pCh
)
139 int chan_no
= (int) (pCh
- dma_chan
);
141 local_irq_save(flag
);
142 writel(chan_no
, DANUBE_DMA_CS
);
143 writel(readl(DANUBE_DMA_CCTRL
) & ~1, DANUBE_DMA_CCTRL
);
145 local_irq_restore(flag
);
149 reset_chan (_dma_channel_info
*pCh
)
151 int chan_no
= (int) (pCh
- dma_chan
);
153 writel(chan_no
, DANUBE_DMA_CS
);
154 writel(readl(DANUBE_DMA_CCTRL
) | 2, DANUBE_DMA_CCTRL
);
158 rx_chan_intr_handler (int chan_no
)
160 _dma_device_info
*pDev
= (_dma_device_info
*)dma_chan
[chan_no
].dma_dev
;
161 _dma_channel_info
*pCh
= &dma_chan
[chan_no
];
162 struct rx_desc
*rx_desc_p
;
166 /*handle command complete interrupt */
167 rx_desc_p
= (struct rx_desc
*)pCh
->desc_base
+ pCh
->curr_desc
;
168 if (rx_desc_p
->status
.field
.OWN
== CPU_OWN
169 && rx_desc_p
->status
.field
.C
170 && rx_desc_p
->status
.field
.data_length
< 1536){
171 /*Every thing is correct, then we inform the upper layer */
172 pDev
->current_rx_chan
= pCh
->rel_chan_no
;
173 if(pDev
->intr_handler
)
174 pDev
->intr_handler(pDev
, RCV_INT
);
177 local_irq_save(flag
);
178 tmp
= readl(DANUBE_DMA_CS
);
179 writel(chan_no
, DANUBE_DMA_CS
);
180 writel(readl(DANUBE_DMA_CIS
) | 0x7e, DANUBE_DMA_CIS
);
181 writel(tmp
, DANUBE_DMA_CS
);
182 g_danube_dma_int_status
&= ~(1 << chan_no
);
183 local_irq_restore(flag
);
184 enable_danube_irq(dma_chan
[chan_no
].irq
);
189 tx_chan_intr_handler (int chan_no
)
191 _dma_device_info
*pDev
= (_dma_device_info
*)dma_chan
[chan_no
].dma_dev
;
192 _dma_channel_info
*pCh
= &dma_chan
[chan_no
];
196 local_irq_save(flag
);
197 tmp
= readl(DANUBE_DMA_CS
);
198 writel(chan_no
, DANUBE_DMA_CS
);
199 writel(readl(DANUBE_DMA_CIS
) | 0x7e, DANUBE_DMA_CIS
);
200 writel(tmp
, DANUBE_DMA_CS
);
201 g_danube_dma_int_status
&= ~(1 << chan_no
);
202 local_irq_restore(flag
);
203 pDev
->current_tx_chan
= pCh
->rel_chan_no
;
204 if (pDev
->intr_handler
)
205 pDev
->intr_handler(pDev
, TRANSMIT_CPT_INT
);
209 do_dma_tasklet (unsigned long unused
)
213 int budget
= DMA_INT_BUDGET
;
217 while (g_danube_dma_int_status
)
221 tasklet_schedule(&dma_tasklet
);
226 for (i
= 0; i
< MAX_DMA_CHANNEL_NUM
; i
++)
228 if ((g_danube_dma_int_status
& (1 << i
)) && dma_chan
[i
].weight
> 0)
230 if (dma_chan
[i
].weight
> weight
)
233 weight
= dma_chan
[chan_no
].weight
;
240 if (chan_map
[chan_no
].dir
== DANUBE_DMA_RX
)
241 rx_chan_intr_handler(chan_no
);
243 tx_chan_intr_handler(chan_no
);
245 for (i
= 0; i
< MAX_DMA_CHANNEL_NUM
; i
++)
247 dma_chan
[i
].weight
= dma_chan
[i
].default_weight
;
252 local_irq_save(flag
);
253 g_danube_dma_in_process
= 0;
254 if (g_danube_dma_int_status
)
256 g_danube_dma_in_process
= 1;
257 tasklet_schedule(&dma_tasklet
);
259 local_irq_restore(flag
);
263 dma_interrupt (int irq
, void *dev_id
)
265 _dma_channel_info
*pCh
;
269 pCh
= (_dma_channel_info
*)dev_id
;
270 chan_no
= (int)(pCh
- dma_chan
);
271 if (chan_no
< 0 || chan_no
> 19)
274 tmp
= readl(DANUBE_DMA_IRNEN
);
275 writel(0, DANUBE_DMA_IRNEN
);
276 g_danube_dma_int_status
|= 1 << chan_no
;
277 writel(tmp
, DANUBE_DMA_IRNEN
);
278 mask_and_ack_danube_irq(irq
);
280 if (!g_danube_dma_in_process
)
282 g_danube_dma_in_process
= 1;
283 tasklet_schedule(&dma_tasklet
);
290 dma_device_reserve (char *dev_name
)
294 for (i
= 0; i
< MAX_DMA_DEVICE_NUM
; i
++)
296 if (strcmp(dev_name
, dma_devs
[i
].device_name
) == 0)
298 if (dma_devs
[i
].reserved
)
300 dma_devs
[i
].reserved
= 1;
309 dma_device_release (_dma_device_info
*dev
)
315 dma_device_register(_dma_device_info
*dev
)
322 _dma_device_info
*pDev
;
323 _dma_channel_info
*pCh
;
324 struct rx_desc
*rx_desc_p
;
325 struct tx_desc
*tx_desc_p
;
327 for (i
= 0; i
< dev
->max_tx_chan_num
; i
++)
329 pCh
= dev
->tx_chan
[i
];
330 if (pCh
->control
== DANUBE_DMA_CH_ON
)
332 chan_no
= (int)(pCh
- dma_chan
);
333 for (j
= 0; j
< pCh
->desc_len
; j
++)
335 tx_desc_p
= (struct tx_desc
*)pCh
->desc_base
+ j
;
336 memset(tx_desc_p
, 0, sizeof(struct tx_desc
));
338 local_irq_save(flag
);
339 writel(chan_no
, DANUBE_DMA_CS
);
340 /*check if the descriptor length is changed */
341 if (readl(DANUBE_DMA_CDLEN
) != pCh
->desc_len
)
342 writel(pCh
->desc_len
, DANUBE_DMA_CDLEN
);
344 writel(readl(DANUBE_DMA_CCTRL
) & ~1, DANUBE_DMA_CCTRL
);
345 writel(readl(DANUBE_DMA_CCTRL
) | 2, DANUBE_DMA_CCTRL
);
346 while (readl(DANUBE_DMA_CCTRL
) & 2){};
347 writel(readl(DANUBE_DMA_IRNEN
) | (1 << chan_no
), DANUBE_DMA_IRNEN
);
348 writel(0x30100, DANUBE_DMA_CCTRL
); /*reset and enable channel,enable channel later */
349 local_irq_restore(flag
);
353 for (i
= 0; i
< dev
->max_rx_chan_num
; i
++)
355 pCh
= dev
->rx_chan
[i
];
356 if (pCh
->control
== DANUBE_DMA_CH_ON
)
358 chan_no
= (int)(pCh
- dma_chan
);
360 for (j
= 0; j
< pCh
->desc_len
; j
++)
362 rx_desc_p
= (struct rx_desc
*)pCh
->desc_base
+ j
;
363 pDev
= (_dma_device_info
*)(pCh
->dma_dev
);
364 buffer
= pDev
->buffer_alloc(pCh
->packet_size
, &byte_offset
, (void*)&(pCh
->opt
[j
]));
368 dma_cache_inv((unsigned long) buffer
, pCh
->packet_size
);
370 rx_desc_p
->Data_Pointer
= (u32
)CPHYSADDR((u32
)buffer
);
371 rx_desc_p
->status
.word
= 0;
372 rx_desc_p
->status
.field
.byte_offset
= byte_offset
;
373 rx_desc_p
->status
.field
.OWN
= DMA_OWN
;
374 rx_desc_p
->status
.field
.data_length
= pCh
->packet_size
;
377 local_irq_save(flag
);
378 writel(chan_no
, DANUBE_DMA_CS
);
379 /*check if the descriptor length is changed */
380 if (readl(DANUBE_DMA_CDLEN
) != pCh
->desc_len
)
381 writel(pCh
->desc_len
, DANUBE_DMA_CDLEN
);
382 writel(readl(DANUBE_DMA_CCTRL
) & ~1, DANUBE_DMA_CCTRL
);
383 writel(readl(DANUBE_DMA_CCTRL
) | 2, DANUBE_DMA_CCTRL
);
384 while (readl(DANUBE_DMA_CCTRL
) & 2){};
385 writel(0x0a, DANUBE_DMA_CIE
); /*fix me, should enable all the interrupts here? */
386 writel(readl(DANUBE_DMA_IRNEN
) | (1 << chan_no
), DANUBE_DMA_IRNEN
);
387 writel(0x30000, DANUBE_DMA_CCTRL
);
388 local_irq_restore(flag
);
389 enable_danube_irq(dma_chan
[chan_no
].irq
);
395 dma_device_unregister (_dma_device_info
*dev
)
399 _dma_channel_info
*pCh
;
400 struct rx_desc
*rx_desc_p
;
401 struct tx_desc
*tx_desc_p
;
404 for (i
= 0; i
< dev
->max_tx_chan_num
; i
++)
406 pCh
= dev
->tx_chan
[i
];
407 if (pCh
->control
== DANUBE_DMA_CH_ON
)
409 chan_no
= (int)(dev
->tx_chan
[i
] - dma_chan
);
410 local_irq_save (flag
);
411 writel(chan_no
, DANUBE_DMA_CS
);
414 pCh
->control
= DANUBE_DMA_CH_OFF
;
415 writel(0, DANUBE_DMA_CIE
); /*fix me, should disable all the interrupts here? */
416 writel(readl(DANUBE_DMA_IRNEN
) & ~(1 << chan_no
), DANUBE_DMA_IRNEN
); /*disable interrupts */
417 writel(readl(DANUBE_DMA_CCTRL
) & ~1, DANUBE_DMA_CCTRL
);
418 while (readl(DANUBE_DMA_CCTRL
) & 1) {};
419 local_irq_restore (flag
);
421 for (j
= 0; j
< pCh
->desc_len
; j
++)
423 tx_desc_p
= (struct tx_desc
*)pCh
->desc_base
+ j
;
424 if ((tx_desc_p
->status
.field
.OWN
== CPU_OWN
&& tx_desc_p
->status
.field
.C
)
425 || (tx_desc_p
->status
.field
.OWN
== DMA_OWN
&& tx_desc_p
->status
.field
.data_length
> 0))
427 dev
->buffer_free ((u8
*) __va (tx_desc_p
->Data_Pointer
), (void*)pCh
->opt
[j
]);
429 tx_desc_p
->status
.field
.OWN
= CPU_OWN
;
430 memset (tx_desc_p
, 0, sizeof (struct tx_desc
));
432 //TODO should free buffer that is not transferred by dma
436 for (i
= 0; i
< dev
->max_rx_chan_num
; i
++)
438 pCh
= dev
->rx_chan
[i
];
439 chan_no
= (int)(dev
->rx_chan
[i
] - dma_chan
);
440 disable_danube_irq(pCh
->irq
);
442 local_irq_save(flag
);
443 g_danube_dma_int_status
&= ~(1 << chan_no
);
446 pCh
->control
= DANUBE_DMA_CH_OFF
;
448 writel(chan_no
, DANUBE_DMA_CS
);
449 writel(0, DANUBE_DMA_CIE
); /*fix me, should disable all the interrupts here? */
450 writel(readl(DANUBE_DMA_IRNEN
) & ~(1 << chan_no
), DANUBE_DMA_IRNEN
); /*disable interrupts */
451 writel(readl(DANUBE_DMA_CCTRL
) & ~1, DANUBE_DMA_CCTRL
);
452 while (readl(DANUBE_DMA_CCTRL
) & 1) {};
454 local_irq_restore (flag
);
455 for (j
= 0; j
< pCh
->desc_len
; j
++)
457 rx_desc_p
= (struct rx_desc
*) pCh
->desc_base
+ j
;
458 if ((rx_desc_p
->status
.field
.OWN
== CPU_OWN
459 && rx_desc_p
->status
.field
.C
)
460 || (rx_desc_p
->status
.field
.OWN
== DMA_OWN
461 && rx_desc_p
->status
.field
.data_length
> 0)) {
462 dev
->buffer_free ((u8
*)
465 (void *) pCh
->opt
[j
]);
472 dma_device_read (struct dma_device_info
*dma_dev
, u8
** dataptr
, void **opt
)
478 _dma_channel_info
*pCh
= dma_dev
->rx_chan
[dma_dev
->current_rx_chan
];
479 struct rx_desc
*rx_desc_p
;
481 /*get the rx data first */
482 rx_desc_p
= (struct rx_desc
*) pCh
->desc_base
+ pCh
->curr_desc
;
483 if (!(rx_desc_p
->status
.field
.OWN
== CPU_OWN
&& rx_desc_p
->status
.field
.C
))
488 buf
= (u8
*) __va (rx_desc_p
->Data_Pointer
);
489 *(u32
*)dataptr
= (u32
)buf
;
490 len
= rx_desc_p
->status
.field
.data_length
;
494 *(int*)opt
= (int)pCh
->opt
[pCh
->curr_desc
];
497 /*replace with a new allocated buffer */
498 buf
= dma_dev
->buffer_alloc(pCh
->packet_size
, &byte_offset
, &p
);
502 dma_cache_inv ((unsigned long) buf
,
504 pCh
->opt
[pCh
->curr_desc
] = p
;
507 rx_desc_p
->Data_Pointer
= (u32
) CPHYSADDR ((u32
) buf
);
508 rx_desc_p
->status
.word
= (DMA_OWN
<< 31) | ((byte_offset
) << 23) | pCh
->packet_size
;
511 *(u32
*) dataptr
= 0;
517 /*increase the curr_desc pointer */
519 if (pCh
->curr_desc
== pCh
->desc_len
)
526 dma_device_write (struct dma_device_info
*dma_dev
, u8
* dataptr
, int len
, void *opt
)
529 u32 tmp
, byte_offset
;
530 _dma_channel_info
*pCh
;
532 struct tx_desc
*tx_desc_p
;
533 local_irq_save (flag
);
535 pCh
= dma_dev
->tx_chan
[dma_dev
->current_tx_chan
];
536 chan_no
= (int)(pCh
- (_dma_channel_info
*) dma_chan
);
538 tx_desc_p
= (struct tx_desc
*)pCh
->desc_base
+ pCh
->prev_desc
;
539 while (tx_desc_p
->status
.field
.OWN
== CPU_OWN
&& tx_desc_p
->status
.field
.C
)
541 dma_dev
->buffer_free((u8
*) __va (tx_desc_p
->Data_Pointer
), pCh
->opt
[pCh
->prev_desc
]);
542 memset(tx_desc_p
, 0, sizeof (struct tx_desc
));
543 pCh
->prev_desc
= (pCh
->prev_desc
+ 1) % (pCh
->desc_len
);
544 tx_desc_p
= (struct tx_desc
*)pCh
->desc_base
+ pCh
->prev_desc
;
546 tx_desc_p
= (struct tx_desc
*)pCh
->desc_base
+ pCh
->curr_desc
;
547 /*Check whether this descriptor is available */
548 if (tx_desc_p
->status
.field
.OWN
== DMA_OWN
|| tx_desc_p
->status
.field
.C
)
550 /*if not , the tell the upper layer device */
551 dma_dev
->intr_handler (dma_dev
, TX_BUF_FULL_INT
);
552 local_irq_restore(flag
);
553 printk (KERN_INFO
"%s %d: failed to write!\n", __func__
, __LINE__
);
557 pCh
->opt
[pCh
->curr_desc
] = opt
;
558 /*byte offset----to adjust the starting address of the data buffer, should be multiple of the burst length. */
559 byte_offset
= ((u32
) CPHYSADDR ((u32
) dataptr
)) % ((dma_dev
->tx_burst_len
) * 4);
560 dma_cache_wback ((unsigned long) dataptr
, len
);
562 tx_desc_p
->Data_Pointer
= (u32
) CPHYSADDR ((u32
) dataptr
) - byte_offset
;
564 tx_desc_p
->status
.word
= (DMA_OWN
<< 31) | DMA_DESC_SOP_SET
| DMA_DESC_EOP_SET
| ((byte_offset
) << 23) | len
;
568 if (pCh
->curr_desc
== pCh
->desc_len
)
571 /*Check whether this descriptor is available */
572 tx_desc_p
= (struct tx_desc
*) pCh
->desc_base
+ pCh
->curr_desc
;
573 if (tx_desc_p
->status
.field
.OWN
== DMA_OWN
)
575 /*if not , the tell the upper layer device */
576 dma_dev
->intr_handler (dma_dev
, TX_BUF_FULL_INT
);
579 writel(chan_no
, DANUBE_DMA_CS
);
580 tmp
= readl(DANUBE_DMA_CCTRL
);
585 local_irq_restore (flag
);
591 map_dma_chan(_dma_chan_map
*map
)
596 for (i
= 0; i
< MAX_DMA_DEVICE_NUM
; i
++)
598 strcpy(dma_devs
[i
].device_name
, global_device_name
[i
]);
601 for (i
= 0; i
< MAX_DMA_CHANNEL_NUM
; i
++)
603 dma_chan
[i
].irq
= map
[i
].irq
;
604 result
= request_irq(dma_chan
[i
].irq
, dma_interrupt
, SA_INTERRUPT
, "dma-core", (void*)&dma_chan
[i
]);
607 printk("error, cannot get dma_irq!\n");
608 free_irq(dma_chan
[i
].irq
, (void *) &dma_interrupt
);
614 for (i
= 0; i
< MAX_DMA_DEVICE_NUM
; i
++)
616 dma_devs
[i
].num_tx_chan
= 0; /*set default tx channel number to be one */
617 dma_devs
[i
].num_rx_chan
= 0; /*set default rx channel number to be one */
618 dma_devs
[i
].max_rx_chan_num
= 0;
619 dma_devs
[i
].max_tx_chan_num
= 0;
620 dma_devs
[i
].buffer_alloc
= &common_buffer_alloc
;
621 dma_devs
[i
].buffer_free
= &common_buffer_free
;
622 dma_devs
[i
].intr_handler
= NULL
;
623 dma_devs
[i
].tx_burst_len
= 4;
624 dma_devs
[i
].rx_burst_len
= 4;
627 writel(0, DANUBE_DMA_PS
);
628 writel(readl(DANUBE_DMA_PCTRL
) | ((0xf << 8) | (1 << 6)), DANUBE_DMA_PCTRL
); /*enable dma drop */
633 writel(1, DANUBE_DMA_PS
);
634 writel(0x14, DANUBE_DMA_PCTRL
); /*deu port setting */
637 for (j
= 0; j
< MAX_DMA_CHANNEL_NUM
; j
++)
639 dma_chan
[j
].byte_offset
= 0;
640 dma_chan
[j
].open
= &open_chan
;
641 dma_chan
[j
].close
= &close_chan
;
642 dma_chan
[j
].reset
= &reset_chan
;
643 dma_chan
[j
].enable_irq
= &enable_ch_irq
;
644 dma_chan
[j
].disable_irq
= &disable_ch_irq
;
645 dma_chan
[j
].rel_chan_no
= map
[j
].rel_chan_no
;
646 dma_chan
[j
].control
= DANUBE_DMA_CH_OFF
;
647 dma_chan
[j
].default_weight
= DANUBE_DMA_CH_DEFAULT_WEIGHT
;
648 dma_chan
[j
].weight
= dma_chan
[j
].default_weight
;
649 dma_chan
[j
].curr_desc
= 0;
650 dma_chan
[j
].prev_desc
= 0;
653 for (j
= 0; j
< MAX_DMA_CHANNEL_NUM
; j
++)
655 if (strcmp(dma_devs
[i
].device_name
, map
[j
].dev_name
) == 0)
657 if (map
[j
].dir
== DANUBE_DMA_RX
)
659 dma_chan
[j
].dir
= DANUBE_DMA_RX
;
660 dma_devs
[i
].max_rx_chan_num
++;
661 dma_devs
[i
].rx_chan
[dma_devs
[i
].max_rx_chan_num
- 1] = &dma_chan
[j
];
662 dma_devs
[i
].rx_chan
[dma_devs
[i
].max_rx_chan_num
- 1]->pri
= map
[j
].pri
;
663 dma_chan
[j
].dma_dev
= (void*)&dma_devs
[i
];
664 } else if(map
[j
].dir
== DANUBE_DMA_TX
)
666 dma_chan
[j
].dir
= DANUBE_DMA_TX
;
667 dma_devs
[i
].max_tx_chan_num
++;
668 dma_devs
[i
].tx_chan
[dma_devs
[i
].max_tx_chan_num
- 1] = &dma_chan
[j
];
669 dma_devs
[i
].tx_chan
[dma_devs
[i
].max_tx_chan_num
- 1]->pri
= map
[j
].pri
;
670 dma_chan
[j
].dma_dev
= (void*)&dma_devs
[i
];
672 printk ("WRONG DMA MAP!\n");
686 // enable DMA from PMU
687 writel(readl(DANUBE_PMU_PWDCR
) & ~DANUBE_PMU_PWDCR_DMA
, DANUBE_PMU_PWDCR
);
690 writel(readl(DANUBE_DMA_CTRL
) | 1, DANUBE_DMA_CTRL
);
692 // diable all interrupts
693 writel(0, DANUBE_DMA_IRNEN
);
695 for (i
= 0; i
< MAX_DMA_CHANNEL_NUM
; i
++)
697 writel(i
, DANUBE_DMA_CS
);
698 writel(0x2, DANUBE_DMA_CCTRL
);
699 writel(0x80000040, DANUBE_DMA_CPOLL
);
700 writel(readl(DANUBE_DMA_CCTRL
) & ~0x1, DANUBE_DMA_CCTRL
);
706 danube_dma_init (void)
711 if (map_dma_chan(default_dma_map
))
714 g_desc_list
= (u64
*)KSEG1ADDR(__get_free_page(GFP_DMA
));
716 if (g_desc_list
== NULL
)
718 printk("no memory for desriptor\n");
722 memset(g_desc_list
, 0, PAGE_SIZE
);
724 for (i
= 0; i
< MAX_DMA_CHANNEL_NUM
; i
++)
726 dma_chan
[i
].desc_base
= (u32
)g_desc_list
+ i
* DANUBE_DMA_DESCRIPTOR_OFFSET
* 8;
727 dma_chan
[i
].curr_desc
= 0;
728 dma_chan
[i
].desc_len
= DANUBE_DMA_DESCRIPTOR_OFFSET
;
730 writel(i
, DANUBE_DMA_CS
);
731 writel((u32
)CPHYSADDR(dma_chan
[i
].desc_base
), DANUBE_DMA_CDBA
);
732 writel(dma_chan
[i
].desc_len
, DANUBE_DMA_CDLEN
);
738 arch_initcall(danube_dma_init
);
745 free_page(KSEG0ADDR((unsigned long) g_desc_list
));
746 for (i
= 0; i
< MAX_DMA_CHANNEL_NUM
; i
++)
747 free_irq(dma_chan
[i
].irq
, (void*)&dma_interrupt
);
750 EXPORT_SYMBOL (dma_device_reserve
);
751 EXPORT_SYMBOL (dma_device_release
);
752 EXPORT_SYMBOL (dma_device_register
);
753 EXPORT_SYMBOL (dma_device_unregister
);
754 EXPORT_SYMBOL (dma_device_read
);
755 EXPORT_SYMBOL (dma_device_write
);
757 MODULE_LICENSE ("GPL");