deptest: Do not clobber the base build and staging dirs
[openwrt/svn-archive/archive.git] / target / linux / ifxmips / files-2.6.30 / arch / mips / ifxmips / dma-core.c
1 #include <linux/module.h>
2 #include <linux/init.h>
3 #include <linux/sched.h>
4 #include <linux/kernel.h>
5 #include <linux/slab.h>
6 #include <linux/string.h>
7 #include <linux/timer.h>
8 #include <linux/fs.h>
9 #include <linux/errno.h>
10 #include <linux/stat.h>
11 #include <linux/mm.h>
12 #include <linux/tty.h>
13 #include <linux/selection.h>
14 #include <linux/kmod.h>
15 #include <linux/vmalloc.h>
16 #include <linux/interrupt.h>
17 #include <linux/delay.h>
18 #include <linux/uaccess.h>
19 #include <linux/errno.h>
20 #include <linux/io.h>
21
22 #include <ifxmips.h>
23 #include <ifxmips_irq.h>
24 #include <ifxmips_dma.h>
25 #include <ifxmips_pmu.h>
26
27 /*25 descriptors for each dma channel,4096/8/20=25.xx*/
28 #define IFXMIPS_DMA_DESCRIPTOR_OFFSET 25
29
30 #define MAX_DMA_DEVICE_NUM 6 /*max ports connecting to dma */
31 #define MAX_DMA_CHANNEL_NUM 20 /*max dma channels */
32 #define DMA_INT_BUDGET 100 /*budget for interrupt handling */
33 #define DMA_POLL_COUNTER 4 /*fix me, set the correct counter value here! */
34
35 extern void ifxmips_mask_and_ack_irq(unsigned int irq_nr);
36 extern void ifxmips_enable_irq(unsigned int irq_nr);
37 extern void ifxmips_disable_irq(unsigned int irq_nr);
38
39 u64 *g_desc_list;
40 struct dma_device_info dma_devs[MAX_DMA_DEVICE_NUM];
41 struct dma_channel_info dma_chan[MAX_DMA_CHANNEL_NUM];
42
43 static const char *global_device_name[MAX_DMA_DEVICE_NUM] =
44 { "PPE", "DEU", "SPI", "SDIO", "MCTRL0", "MCTRL1" };
45
46 struct dma_chan_map default_dma_map[MAX_DMA_CHANNEL_NUM] = {
47 {"PPE", IFXMIPS_DMA_RX, 0, IFXMIPS_DMA_CH0_INT, 0},
48 {"PPE", IFXMIPS_DMA_TX, 0, IFXMIPS_DMA_CH1_INT, 0},
49 {"PPE", IFXMIPS_DMA_RX, 1, IFXMIPS_DMA_CH2_INT, 1},
50 {"PPE", IFXMIPS_DMA_TX, 1, IFXMIPS_DMA_CH3_INT, 1},
51 {"PPE", IFXMIPS_DMA_RX, 2, IFXMIPS_DMA_CH4_INT, 2},
52 {"PPE", IFXMIPS_DMA_TX, 2, IFXMIPS_DMA_CH5_INT, 2},
53 {"PPE", IFXMIPS_DMA_RX, 3, IFXMIPS_DMA_CH6_INT, 3},
54 {"PPE", IFXMIPS_DMA_TX, 3, IFXMIPS_DMA_CH7_INT, 3},
55 {"DEU", IFXMIPS_DMA_RX, 0, IFXMIPS_DMA_CH8_INT, 0},
56 {"DEU", IFXMIPS_DMA_TX, 0, IFXMIPS_DMA_CH9_INT, 0},
57 {"DEU", IFXMIPS_DMA_RX, 1, IFXMIPS_DMA_CH10_INT, 1},
58 {"DEU", IFXMIPS_DMA_TX, 1, IFXMIPS_DMA_CH11_INT, 1},
59 {"SPI", IFXMIPS_DMA_RX, 0, IFXMIPS_DMA_CH12_INT, 0},
60 {"SPI", IFXMIPS_DMA_TX, 0, IFXMIPS_DMA_CH13_INT, 0},
61 {"SDIO", IFXMIPS_DMA_RX, 0, IFXMIPS_DMA_CH14_INT, 0},
62 {"SDIO", IFXMIPS_DMA_TX, 0, IFXMIPS_DMA_CH15_INT, 0},
63 {"MCTRL0", IFXMIPS_DMA_RX, 0, IFXMIPS_DMA_CH16_INT, 0},
64 {"MCTRL0", IFXMIPS_DMA_TX, 0, IFXMIPS_DMA_CH17_INT, 0},
65 {"MCTRL1", IFXMIPS_DMA_RX, 1, IFXMIPS_DMA_CH18_INT, 1},
66 {"MCTRL1", IFXMIPS_DMA_TX, 1, IFXMIPS_DMA_CH19_INT, 1}
67 };
68
69 struct dma_chan_map *chan_map = default_dma_map;
70 volatile u32 g_ifxmips_dma_int_status;
71 volatile int g_ifxmips_dma_in_process; /* 0=not in process, 1=in process */
72
73 void do_dma_tasklet(unsigned long);
74 DECLARE_TASKLET(dma_tasklet, do_dma_tasklet, 0);
75
76 u8 *common_buffer_alloc(int len, int *byte_offset, void **opt)
77 {
78 u8 *buffer = kmalloc(len * sizeof(u8), GFP_KERNEL);
79
80 *byte_offset = 0;
81
82 return buffer;
83 }
84
85 void common_buffer_free(u8 *dataptr, void *opt)
86 {
87 kfree(dataptr);
88 }
89
90 void enable_ch_irq(struct dma_channel_info *pCh)
91 {
92 int chan_no = (int)(pCh - dma_chan);
93 unsigned long flag;
94
95 local_irq_save(flag);
96 ifxmips_w32(chan_no, IFXMIPS_DMA_CS);
97 ifxmips_w32(0x4a, IFXMIPS_DMA_CIE);
98 ifxmips_w32(ifxmips_r32(IFXMIPS_DMA_IRNEN) | (1 << chan_no), IFXMIPS_DMA_IRNEN);
99 local_irq_restore(flag);
100 ifxmips_enable_irq(pCh->irq);
101 }
102
103 void disable_ch_irq(struct dma_channel_info *pCh)
104 {
105 unsigned long flag;
106 int chan_no = (int) (pCh - dma_chan);
107
108 local_irq_save(flag);
109 g_ifxmips_dma_int_status &= ~(1 << chan_no);
110 ifxmips_w32(chan_no, IFXMIPS_DMA_CS);
111 ifxmips_w32(0, IFXMIPS_DMA_CIE);
112 ifxmips_w32(ifxmips_r32(IFXMIPS_DMA_IRNEN) & ~(1 << chan_no), IFXMIPS_DMA_IRNEN);
113 local_irq_restore(flag);
114 ifxmips_mask_and_ack_irq(pCh->irq);
115 }
116
117 void open_chan(struct dma_channel_info *pCh)
118 {
119 unsigned long flag;
120 int chan_no = (int)(pCh - dma_chan);
121
122 local_irq_save(flag);
123 ifxmips_w32(chan_no, IFXMIPS_DMA_CS);
124 ifxmips_w32(ifxmips_r32(IFXMIPS_DMA_CCTRL) | 1, IFXMIPS_DMA_CCTRL);
125 if (pCh->dir == IFXMIPS_DMA_RX)
126 enable_ch_irq(pCh);
127 local_irq_restore(flag);
128 }
129
130 void close_chan(struct dma_channel_info *pCh)
131 {
132 unsigned long flag;
133 int chan_no = (int) (pCh - dma_chan);
134
135 local_irq_save(flag);
136 ifxmips_w32(chan_no, IFXMIPS_DMA_CS);
137 ifxmips_w32(ifxmips_r32(IFXMIPS_DMA_CCTRL) & ~1, IFXMIPS_DMA_CCTRL);
138 disable_ch_irq(pCh);
139 local_irq_restore(flag);
140 }
141
142 void reset_chan(struct dma_channel_info *pCh)
143 {
144 int chan_no = (int) (pCh - dma_chan);
145
146 ifxmips_w32(chan_no, IFXMIPS_DMA_CS);
147 ifxmips_w32(ifxmips_r32(IFXMIPS_DMA_CCTRL) | 2, IFXMIPS_DMA_CCTRL);
148 }
149
150 void rx_chan_intr_handler(int chan_no)
151 {
152 struct dma_device_info *pDev = (struct dma_device_info *)dma_chan[chan_no].dma_dev;
153 struct dma_channel_info *pCh = &dma_chan[chan_no];
154 struct rx_desc *rx_desc_p;
155 int tmp;
156 unsigned long flag;
157
158 /*handle command complete interrupt */
159 rx_desc_p = (struct rx_desc *)pCh->desc_base + pCh->curr_desc;
160 if (rx_desc_p->status.field.OWN == CPU_OWN
161 && rx_desc_p->status.field.C
162 && rx_desc_p->status.field.data_length < 1536){
163 /* Every thing is correct, then we inform the upper layer */
164 pDev->current_rx_chan = pCh->rel_chan_no;
165 if (pDev->intr_handler)
166 pDev->intr_handler(pDev, RCV_INT);
167 pCh->weight--;
168 } else {
169 local_irq_save(flag);
170 tmp = ifxmips_r32(IFXMIPS_DMA_CS);
171 ifxmips_w32(chan_no, IFXMIPS_DMA_CS);
172 ifxmips_w32(ifxmips_r32(IFXMIPS_DMA_CIS) | 0x7e, IFXMIPS_DMA_CIS);
173 ifxmips_w32(tmp, IFXMIPS_DMA_CS);
174 g_ifxmips_dma_int_status &= ~(1 << chan_no);
175 local_irq_restore(flag);
176 ifxmips_enable_irq(dma_chan[chan_no].irq);
177 }
178 }
179
180 inline void tx_chan_intr_handler(int chan_no)
181 {
182 struct dma_device_info *pDev = (struct dma_device_info *)dma_chan[chan_no].dma_dev;
183 struct dma_channel_info *pCh = &dma_chan[chan_no];
184 int tmp;
185 unsigned long flag;
186
187 local_irq_save(flag);
188 tmp = ifxmips_r32(IFXMIPS_DMA_CS);
189 ifxmips_w32(chan_no, IFXMIPS_DMA_CS);
190 ifxmips_w32(ifxmips_r32(IFXMIPS_DMA_CIS) | 0x7e, IFXMIPS_DMA_CIS);
191 ifxmips_w32(tmp, IFXMIPS_DMA_CS);
192 g_ifxmips_dma_int_status &= ~(1 << chan_no);
193 local_irq_restore(flag);
194 pDev->current_tx_chan = pCh->rel_chan_no;
195 if (pDev->intr_handler)
196 pDev->intr_handler(pDev, TRANSMIT_CPT_INT);
197 }
198
199 void do_dma_tasklet(unsigned long unused)
200 {
201 int i;
202 int chan_no = 0;
203 int budget = DMA_INT_BUDGET;
204 int weight = 0;
205 unsigned long flag;
206
207 while (g_ifxmips_dma_int_status) {
208 if (budget-- < 0) {
209 tasklet_schedule(&dma_tasklet);
210 return;
211 }
212 chan_no = -1;
213 weight = 0;
214 for (i = 0; i < MAX_DMA_CHANNEL_NUM; i++) {
215 if ((g_ifxmips_dma_int_status & (1 << i)) && dma_chan[i].weight > 0) {
216 if (dma_chan[i].weight > weight) {
217 chan_no = i;
218 weight = dma_chan[chan_no].weight;
219 }
220 }
221 }
222
223 if (chan_no >= 0) {
224 if (chan_map[chan_no].dir == IFXMIPS_DMA_RX)
225 rx_chan_intr_handler(chan_no);
226 else
227 tx_chan_intr_handler(chan_no);
228 } else {
229 for (i = 0; i < MAX_DMA_CHANNEL_NUM; i++)
230 dma_chan[i].weight = dma_chan[i].default_weight;
231 }
232 }
233
234 local_irq_save(flag);
235 g_ifxmips_dma_in_process = 0;
236 if (g_ifxmips_dma_int_status) {
237 g_ifxmips_dma_in_process = 1;
238 tasklet_schedule(&dma_tasklet);
239 }
240 local_irq_restore(flag);
241 }
242
243 irqreturn_t dma_interrupt(int irq, void *dev_id)
244 {
245 struct dma_channel_info *pCh;
246 int chan_no = 0;
247 int tmp;
248
249 pCh = (struct dma_channel_info *)dev_id;
250 chan_no = (int)(pCh - dma_chan);
251 if (chan_no < 0 || chan_no > 19)
252 BUG();
253
254 tmp = ifxmips_r32(IFXMIPS_DMA_IRNEN);
255 ifxmips_w32(0, IFXMIPS_DMA_IRNEN);
256 g_ifxmips_dma_int_status |= 1 << chan_no;
257 ifxmips_w32(tmp, IFXMIPS_DMA_IRNEN);
258 ifxmips_mask_and_ack_irq(irq);
259
260 if (!g_ifxmips_dma_in_process) {
261 g_ifxmips_dma_in_process = 1;
262 tasklet_schedule(&dma_tasklet);
263 }
264
265 return IRQ_HANDLED;
266 }
267
268 struct dma_device_info *dma_device_reserve(char *dev_name)
269 {
270 int i;
271
272 for (i = 0; i < MAX_DMA_DEVICE_NUM; i++) {
273 if (strcmp(dev_name, dma_devs[i].device_name) == 0) {
274 if (dma_devs[i].reserved)
275 return NULL;
276 dma_devs[i].reserved = 1;
277 break;
278 }
279 }
280
281 return &dma_devs[i];
282 }
283 EXPORT_SYMBOL(dma_device_reserve);
284
285 void dma_device_release(struct dma_device_info *dev)
286 {
287 dev->reserved = 0;
288 }
289 EXPORT_SYMBOL(dma_device_release);
290
291 void dma_device_register(struct dma_device_info *dev)
292 {
293 int i, j;
294 int chan_no = 0;
295 u8 *buffer;
296 int byte_offset;
297 unsigned long flag;
298 struct dma_device_info *pDev;
299 struct dma_channel_info *pCh;
300 struct rx_desc *rx_desc_p;
301 struct tx_desc *tx_desc_p;
302
303 for (i = 0; i < dev->max_tx_chan_num; i++) {
304 pCh = dev->tx_chan[i];
305 if (pCh->control == IFXMIPS_DMA_CH_ON) {
306 chan_no = (int)(pCh - dma_chan);
307 for (j = 0; j < pCh->desc_len; j++) {
308 tx_desc_p = (struct tx_desc *)pCh->desc_base + j;
309 memset(tx_desc_p, 0, sizeof(struct tx_desc));
310 }
311 local_irq_save(flag);
312 ifxmips_w32(chan_no, IFXMIPS_DMA_CS);
313 /* check if the descriptor length is changed */
314 if (ifxmips_r32(IFXMIPS_DMA_CDLEN) != pCh->desc_len)
315 ifxmips_w32(pCh->desc_len, IFXMIPS_DMA_CDLEN);
316
317 ifxmips_w32(ifxmips_r32(IFXMIPS_DMA_CCTRL) & ~1, IFXMIPS_DMA_CCTRL);
318 ifxmips_w32(ifxmips_r32(IFXMIPS_DMA_CCTRL) | 2, IFXMIPS_DMA_CCTRL);
319 while (ifxmips_r32(IFXMIPS_DMA_CCTRL) & 2)
320 ;
321 ifxmips_w32(ifxmips_r32(IFXMIPS_DMA_IRNEN) | (1 << chan_no), IFXMIPS_DMA_IRNEN);
322 ifxmips_w32(0x30100, IFXMIPS_DMA_CCTRL); /* reset and enable channel,enable channel later */
323 local_irq_restore(flag);
324 }
325 }
326
327 for (i = 0; i < dev->max_rx_chan_num; i++) {
328 pCh = dev->rx_chan[i];
329 if (pCh->control == IFXMIPS_DMA_CH_ON) {
330 chan_no = (int)(pCh - dma_chan);
331
332 for (j = 0; j < pCh->desc_len; j++) {
333 rx_desc_p = (struct rx_desc *)pCh->desc_base + j;
334 pDev = (struct dma_device_info *)(pCh->dma_dev);
335 buffer = pDev->buffer_alloc(pCh->packet_size, &byte_offset, (void *)&(pCh->opt[j]));
336 if (!buffer)
337 break;
338
339 dma_cache_inv((unsigned long) buffer, pCh->packet_size);
340
341 rx_desc_p->Data_Pointer = (u32)CPHYSADDR((u32)buffer);
342 rx_desc_p->status.word = 0;
343 rx_desc_p->status.field.byte_offset = byte_offset;
344 rx_desc_p->status.field.OWN = DMA_OWN;
345 rx_desc_p->status.field.data_length = pCh->packet_size;
346 }
347
348 local_irq_save(flag);
349 ifxmips_w32(chan_no, IFXMIPS_DMA_CS);
350 /* check if the descriptor length is changed */
351 if (ifxmips_r32(IFXMIPS_DMA_CDLEN) != pCh->desc_len)
352 ifxmips_w32(pCh->desc_len, IFXMIPS_DMA_CDLEN);
353 ifxmips_w32(ifxmips_r32(IFXMIPS_DMA_CCTRL) & ~1, IFXMIPS_DMA_CCTRL);
354 ifxmips_w32(ifxmips_r32(IFXMIPS_DMA_CCTRL) | 2, IFXMIPS_DMA_CCTRL);
355 while (ifxmips_r32(IFXMIPS_DMA_CCTRL) & 2)
356 ;
357 ifxmips_w32(0x0a, IFXMIPS_DMA_CIE); /* fix me, should enable all the interrupts here? */
358 ifxmips_w32(ifxmips_r32(IFXMIPS_DMA_IRNEN) | (1 << chan_no), IFXMIPS_DMA_IRNEN);
359 ifxmips_w32(0x30000, IFXMIPS_DMA_CCTRL);
360 local_irq_restore(flag);
361 ifxmips_enable_irq(dma_chan[chan_no].irq);
362 }
363 }
364 }
365 EXPORT_SYMBOL(dma_device_register);
366
367 void dma_device_unregister(struct dma_device_info *dev)
368 {
369 int i, j;
370 int chan_no;
371 struct dma_channel_info *pCh;
372 struct rx_desc *rx_desc_p;
373 struct tx_desc *tx_desc_p;
374 unsigned long flag;
375
376 for (i = 0; i < dev->max_tx_chan_num; i++) {
377 pCh = dev->tx_chan[i];
378 if (pCh->control == IFXMIPS_DMA_CH_ON) {
379 chan_no = (int)(dev->tx_chan[i] - dma_chan);
380 local_irq_save(flag);
381 ifxmips_w32(chan_no, IFXMIPS_DMA_CS);
382 pCh->curr_desc = 0;
383 pCh->prev_desc = 0;
384 pCh->control = IFXMIPS_DMA_CH_OFF;
385 ifxmips_w32(0, IFXMIPS_DMA_CIE); /* fix me, should disable all the interrupts here? */
386 ifxmips_w32(ifxmips_r32(IFXMIPS_DMA_IRNEN) & ~(1 << chan_no), IFXMIPS_DMA_IRNEN); /* disable interrupts */
387 ifxmips_w32(ifxmips_r32(IFXMIPS_DMA_CCTRL) & ~1, IFXMIPS_DMA_CCTRL);
388 while (ifxmips_r32(IFXMIPS_DMA_CCTRL) & 1)
389 ;
390 local_irq_restore(flag);
391
392 for (j = 0; j < pCh->desc_len; j++) {
393 tx_desc_p = (struct tx_desc *)pCh->desc_base + j;
394 if ((tx_desc_p->status.field.OWN == CPU_OWN && tx_desc_p->status.field.C)
395 || (tx_desc_p->status.field.OWN == DMA_OWN && tx_desc_p->status.field.data_length > 0)) {
396 dev->buffer_free((u8 *) __va(tx_desc_p->Data_Pointer), (void *)pCh->opt[j]);
397 }
398 tx_desc_p->status.field.OWN = CPU_OWN;
399 memset(tx_desc_p, 0, sizeof(struct tx_desc));
400 }
401 /* TODO should free buffer that is not transferred by dma */
402 }
403 }
404
405 for (i = 0; i < dev->max_rx_chan_num; i++) {
406 pCh = dev->rx_chan[i];
407 chan_no = (int)(dev->rx_chan[i] - dma_chan);
408 ifxmips_disable_irq(pCh->irq);
409
410 local_irq_save(flag);
411 g_ifxmips_dma_int_status &= ~(1 << chan_no);
412 pCh->curr_desc = 0;
413 pCh->prev_desc = 0;
414 pCh->control = IFXMIPS_DMA_CH_OFF;
415
416 ifxmips_w32(chan_no, IFXMIPS_DMA_CS);
417 ifxmips_w32(0, IFXMIPS_DMA_CIE); /* fix me, should disable all the interrupts here? */
418 ifxmips_w32(ifxmips_r32(IFXMIPS_DMA_IRNEN) & ~(1 << chan_no), IFXMIPS_DMA_IRNEN); /* disable interrupts */
419 ifxmips_w32(ifxmips_r32(IFXMIPS_DMA_CCTRL) & ~1, IFXMIPS_DMA_CCTRL);
420 while (ifxmips_r32(IFXMIPS_DMA_CCTRL) & 1)
421 ;
422
423 local_irq_restore(flag);
424 for (j = 0; j < pCh->desc_len; j++) {
425 rx_desc_p = (struct rx_desc *) pCh->desc_base + j;
426 if ((rx_desc_p->status.field.OWN == CPU_OWN
427 && rx_desc_p->status.field.C)
428 || (rx_desc_p->status.field.OWN == DMA_OWN
429 && rx_desc_p->status.field.data_length > 0)) {
430 dev->buffer_free((u8 *)
431 __va(rx_desc_p->Data_Pointer),
432 (void *) pCh->opt[j]);
433 }
434 }
435 }
436 }
437 EXPORT_SYMBOL(dma_device_unregister);
438
439 int dma_device_read(struct dma_device_info *dma_dev, u8 **dataptr, void **opt)
440 {
441 u8 *buf;
442 int len;
443 int byte_offset = 0;
444 void *p = NULL;
445 struct dma_channel_info *pCh = dma_dev->rx_chan[dma_dev->current_rx_chan];
446 struct rx_desc *rx_desc_p;
447
448 /* get the rx data first */
449 rx_desc_p = (struct rx_desc *) pCh->desc_base + pCh->curr_desc;
450 if (!(rx_desc_p->status.field.OWN == CPU_OWN && rx_desc_p->status.field.C))
451 return 0;
452
453 buf = (u8 *) __va(rx_desc_p->Data_Pointer);
454 *(u32 *)dataptr = (u32)buf;
455 len = rx_desc_p->status.field.data_length;
456
457 if (opt)
458 *(int *)opt = (int)pCh->opt[pCh->curr_desc];
459
460 /* replace with a new allocated buffer */
461 buf = dma_dev->buffer_alloc(pCh->packet_size, &byte_offset, &p);
462
463 if (buf) {
464 dma_cache_inv((unsigned long) buf, pCh->packet_size);
465 pCh->opt[pCh->curr_desc] = p;
466 wmb();
467
468 rx_desc_p->Data_Pointer = (u32) CPHYSADDR((u32) buf);
469 rx_desc_p->status.word = (DMA_OWN << 31) | ((byte_offset) << 23) | pCh->packet_size;
470 wmb();
471 } else {
472 *(u32 *) dataptr = 0;
473 if (opt)
474 *(int *) opt = 0;
475 len = 0;
476 }
477
478 /* increase the curr_desc pointer */
479 pCh->curr_desc++;
480 if (pCh->curr_desc == pCh->desc_len)
481 pCh->curr_desc = 0;
482
483 return len;
484 }
485 EXPORT_SYMBOL(dma_device_read);
486
487 int dma_device_write(struct dma_device_info *dma_dev, u8 *dataptr, int len, void *opt)
488 {
489 unsigned long flag;
490 u32 tmp, byte_offset;
491 struct dma_channel_info *pCh;
492 int chan_no;
493 struct tx_desc *tx_desc_p;
494 local_irq_save(flag);
495
496 pCh = dma_dev->tx_chan[dma_dev->current_tx_chan];
497 chan_no = (int)(pCh - (struct dma_channel_info *) dma_chan);
498
499 tx_desc_p = (struct tx_desc *)pCh->desc_base + pCh->prev_desc;
500 while (tx_desc_p->status.field.OWN == CPU_OWN && tx_desc_p->status.field.C) {
501 dma_dev->buffer_free((u8 *) __va(tx_desc_p->Data_Pointer), pCh->opt[pCh->prev_desc]);
502 memset(tx_desc_p, 0, sizeof(struct tx_desc));
503 pCh->prev_desc = (pCh->prev_desc + 1) % (pCh->desc_len);
504 tx_desc_p = (struct tx_desc *)pCh->desc_base + pCh->prev_desc;
505 }
506 tx_desc_p = (struct tx_desc *)pCh->desc_base + pCh->curr_desc;
507 /* Check whether this descriptor is available */
508 if (tx_desc_p->status.field.OWN == DMA_OWN || tx_desc_p->status.field.C) {
509 /* if not, the tell the upper layer device */
510 dma_dev->intr_handler (dma_dev, TX_BUF_FULL_INT);
511 local_irq_restore(flag);
512 printk(KERN_INFO "%s %d: failed to write!\n", __func__, __LINE__);
513
514 return 0;
515 }
516 pCh->opt[pCh->curr_desc] = opt;
517 /* byte offset----to adjust the starting address of the data buffer, should be multiple of the burst length. */
518 byte_offset = ((u32) CPHYSADDR((u32) dataptr)) % ((dma_dev->tx_burst_len) * 4);
519 dma_cache_wback((unsigned long) dataptr, len);
520 wmb();
521 tx_desc_p->Data_Pointer = (u32) CPHYSADDR((u32) dataptr) - byte_offset;
522 wmb();
523 tx_desc_p->status.word = (DMA_OWN << 31) | DMA_DESC_SOP_SET | DMA_DESC_EOP_SET | ((byte_offset) << 23) | len;
524 wmb();
525
526 pCh->curr_desc++;
527 if (pCh->curr_desc == pCh->desc_len)
528 pCh->curr_desc = 0;
529
530 /*Check whether this descriptor is available */
531 tx_desc_p = (struct tx_desc *) pCh->desc_base + pCh->curr_desc;
532 if (tx_desc_p->status.field.OWN == DMA_OWN) {
533 /*if not , the tell the upper layer device */
534 dma_dev->intr_handler (dma_dev, TX_BUF_FULL_INT);
535 }
536
537 ifxmips_w32(chan_no, IFXMIPS_DMA_CS);
538 tmp = ifxmips_r32(IFXMIPS_DMA_CCTRL);
539
540 if (!(tmp & 1))
541 pCh->open(pCh);
542
543 local_irq_restore(flag);
544
545 return len;
546 }
547 EXPORT_SYMBOL(dma_device_write);
548
549 int map_dma_chan(struct dma_chan_map *map)
550 {
551 int i, j;
552 int result;
553
554 for (i = 0; i < MAX_DMA_DEVICE_NUM; i++)
555 strcpy(dma_devs[i].device_name, global_device_name[i]);
556
557 for (i = 0; i < MAX_DMA_CHANNEL_NUM; i++) {
558 dma_chan[i].irq = map[i].irq;
559 result = request_irq(dma_chan[i].irq, dma_interrupt, IRQF_DISABLED, map[i].dev_name, (void *)&dma_chan[i]);
560 if (result) {
561 printk(KERN_WARNING "error, cannot get dma_irq!\n");
562 free_irq(dma_chan[i].irq, (void *) &dma_interrupt);
563
564 return -EFAULT;
565 }
566 }
567
568 for (i = 0; i < MAX_DMA_DEVICE_NUM; i++) {
569 dma_devs[i].num_tx_chan = 0; /*set default tx channel number to be one */
570 dma_devs[i].num_rx_chan = 0; /*set default rx channel number to be one */
571 dma_devs[i].max_rx_chan_num = 0;
572 dma_devs[i].max_tx_chan_num = 0;
573 dma_devs[i].buffer_alloc = &common_buffer_alloc;
574 dma_devs[i].buffer_free = &common_buffer_free;
575 dma_devs[i].intr_handler = NULL;
576 dma_devs[i].tx_burst_len = 4;
577 dma_devs[i].rx_burst_len = 4;
578 if (i == 0) {
579 ifxmips_w32(0, IFXMIPS_DMA_PS);
580 ifxmips_w32(ifxmips_r32(IFXMIPS_DMA_PCTRL) | ((0xf << 8) | (1 << 6)), IFXMIPS_DMA_PCTRL); /*enable dma drop */
581 }
582
583 if (i == 1) {
584 ifxmips_w32(1, IFXMIPS_DMA_PS);
585 ifxmips_w32(0x14, IFXMIPS_DMA_PCTRL); /*deu port setting */
586 }
587
588 for (j = 0; j < MAX_DMA_CHANNEL_NUM; j++) {
589 dma_chan[j].byte_offset = 0;
590 dma_chan[j].open = &open_chan;
591 dma_chan[j].close = &close_chan;
592 dma_chan[j].reset = &reset_chan;
593 dma_chan[j].enable_irq = &enable_ch_irq;
594 dma_chan[j].disable_irq = &disable_ch_irq;
595 dma_chan[j].rel_chan_no = map[j].rel_chan_no;
596 dma_chan[j].control = IFXMIPS_DMA_CH_OFF;
597 dma_chan[j].default_weight = IFXMIPS_DMA_CH_DEFAULT_WEIGHT;
598 dma_chan[j].weight = dma_chan[j].default_weight;
599 dma_chan[j].curr_desc = 0;
600 dma_chan[j].prev_desc = 0;
601 }
602
603 for (j = 0; j < MAX_DMA_CHANNEL_NUM; j++) {
604 if (strcmp(dma_devs[i].device_name, map[j].dev_name) == 0) {
605 if (map[j].dir == IFXMIPS_DMA_RX) {
606 dma_chan[j].dir = IFXMIPS_DMA_RX;
607 dma_devs[i].max_rx_chan_num++;
608 dma_devs[i].rx_chan[dma_devs[i].max_rx_chan_num - 1] = &dma_chan[j];
609 dma_devs[i].rx_chan[dma_devs[i].max_rx_chan_num - 1]->pri = map[j].pri;
610 dma_chan[j].dma_dev = (void *)&dma_devs[i];
611 } else if (map[j].dir == IFXMIPS_DMA_TX) {
612 /*TX direction */
613 dma_chan[j].dir = IFXMIPS_DMA_TX;
614 dma_devs[i].max_tx_chan_num++;
615 dma_devs[i].tx_chan[dma_devs[i].max_tx_chan_num - 1] = &dma_chan[j];
616 dma_devs[i].tx_chan[dma_devs[i].max_tx_chan_num - 1]->pri = map[j].pri;
617 dma_chan[j].dma_dev = (void *)&dma_devs[i];
618 } else {
619 printk(KERN_WARNING "WRONG DMA MAP!\n");
620 }
621 }
622 }
623 }
624
625 return 0;
626 }
627
628 void dma_chip_init(void)
629 {
630 int i;
631
632 /* enable DMA from PMU */
633 ifxmips_pmu_enable(IFXMIPS_PMU_PWDCR_DMA);
634
635 /* reset DMA */
636 ifxmips_w32(ifxmips_r32(IFXMIPS_DMA_CTRL) | 1, IFXMIPS_DMA_CTRL);
637
638 /* disable all interrupts */
639 ifxmips_w32(0, IFXMIPS_DMA_IRNEN);
640
641 for (i = 0; i < MAX_DMA_CHANNEL_NUM; i++) {
642 ifxmips_w32(i, IFXMIPS_DMA_CS);
643 ifxmips_w32(0x2, IFXMIPS_DMA_CCTRL);
644 ifxmips_w32(0x80000040, IFXMIPS_DMA_CPOLL);
645 ifxmips_w32(ifxmips_r32(IFXMIPS_DMA_CCTRL) & ~0x1, IFXMIPS_DMA_CCTRL);
646 }
647 }
648
649 int ifxmips_dma_init(void)
650 {
651 int i;
652
653 dma_chip_init();
654 if (map_dma_chan(default_dma_map))
655 BUG();
656
657 g_desc_list = (u64 *)KSEG1ADDR(__get_free_page(GFP_DMA));
658
659 if (g_desc_list == NULL) {
660 printk(KERN_WARNING "no memory for desriptor\n");
661 return -ENOMEM;
662 }
663
664 memset(g_desc_list, 0, PAGE_SIZE);
665
666 for (i = 0; i < MAX_DMA_CHANNEL_NUM; i++) {
667 dma_chan[i].desc_base = (u32)g_desc_list + i * IFXMIPS_DMA_DESCRIPTOR_OFFSET * 8;
668 dma_chan[i].curr_desc = 0;
669 dma_chan[i].desc_len = IFXMIPS_DMA_DESCRIPTOR_OFFSET;
670
671 ifxmips_w32(i, IFXMIPS_DMA_CS);
672 ifxmips_w32((u32)CPHYSADDR(dma_chan[i].desc_base), IFXMIPS_DMA_CDBA);
673 ifxmips_w32(dma_chan[i].desc_len, IFXMIPS_DMA_CDLEN);
674 }
675
676 return 0;
677 }
678
679 arch_initcall(ifxmips_dma_init);
680
681 void dma_cleanup(void)
682 {
683 int i;
684
685 free_page(KSEG0ADDR((unsigned long) g_desc_list));
686 for (i = 0; i < MAX_DMA_CHANNEL_NUM; i++)
687 free_irq(dma_chan[i].irq, (void *)&dma_interrupt);
688 }
689
690 MODULE_LICENSE("GPL");