2 ** Copyright (C) 2005 Wu Qi Ming <Qi-Ming.Wu@infineon.com>
4 ** This program is free software; you can redistribute it and/or modify
5 ** it under the terms of the GNU General Public License as published by
6 ** the Free Software Foundation; either version 2 of the License, or
7 ** (at your option) any later version.
9 ** This program is distributed in the hope that it will be useful,
10 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
11 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 ** GNU General Public License for more details.
14 ** You should have received a copy of the GNU General Public License
15 ** along with this program; if not, write to the Free Software
16 ** Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 * Author: Wu Qi Ming[Qi-Ming.Wu@infineon.com]
22 * Created: 26-September-2005
25 #include <linux/module.h>
26 #include <linux/init.h>
27 #include <linux/sched.h>
28 #include <linux/kernel.h>
29 #include <linux/slab.h>
30 #include <linux/string.h>
31 #include <linux/timer.h>
33 #include <linux/errno.h>
34 #include <linux/proc_fs.h>
35 #include <linux/stat.h>
37 #include <linux/tty.h>
38 #include <linux/selection.h>
39 #include <linux/kmod.h>
40 #include <linux/vmalloc.h>
41 #include <linux/interrupt.h>
42 #include <linux/delay.h>
43 #include <linux/errno.h>
44 #include <linux/uaccess.h>
46 #include <linux/semaphore.h>
52 #include <lantiq_soc.h>
56 static struct svip_reg_sys1
*const sys1
= (struct svip_reg_sys1
*)LTQ_SYS1_BASE
;
57 static struct svip_reg_dma
*const dma
= (struct svip_reg_dma
*)LTQ_DMA_BASE
;
58 static struct svip_reg_mbs
*const mbs
= (struct svip_reg_mbs
*)LTQ_MBS_BASE
;
60 #define DRV_NAME "ltq_dma"
61 extern void ltq_mask_and_ack_irq(struct irq_data
*data
);
62 extern void ltq_enable_irq(struct irq_data
*data
);
64 static inline void mask_and_ack_irq(unsigned int irq_nr
)
69 if ((i
< 2) && (irq_nr
== 137)) {
70 printk("eth delay hack\n");
73 ltq_mask_and_ack_irq(&data
);
76 static inline void svip_enable_irq(unsigned int irq_nr
)
80 ltq_enable_irq(&data
);
83 #define DMA_EMSG(fmt, args...) \
84 printk(KERN_ERR "%s: " fmt, __func__, ## args)
86 static inline void mbs_grab(void)
88 while (mbs_r32(mbsr0
) != 0);
91 static inline void mbs_release(void)
97 /* max ports connecting to dma */
98 #define LTQ_MAX_DMA_DEVICE_NUM ARRAY_SIZE(dma_devices)
99 /* max dma channels */
100 #define LTQ_MAX_DMA_CHANNEL_NUM ARRAY_SIZE(dma_chan)
102 /* bytes per descriptor */
103 #define DMA_DESCR_SIZE 8
105 #define DMA_DESCR_CH_SIZE (DMA_DESCR_NUM * DMA_DESCR_SIZE)
106 #define DMA_DESCR_TOTAL_SIZE (LTQ_MAX_DMA_CHANNEL_NUM * DMA_DESCR_CH_SIZE)
107 #define DMA_DESCR_MEM_PAGES ((DMA_DESCR_TOTAL_SIZE / PAGE_SIZE) + \
108 (((DMA_DESCR_TOTAL_SIZE % PAGE_SIZE) > 0)))
110 /* budget for interrupt handling */
111 #define DMA_INT_BUDGET 100
112 /* set the correct counter value here! */
113 #define DMA_POLL_COUNTER 32
115 struct proc_dir_entry
*g_dma_dir
;
117 /* device_name | max_rx_chan_num | max_tx_chan_num | drop_enable */
118 struct dma_device_info dma_devices
[] = {
123 { "MCTRL", 1, 1, 0 },
130 /* *dma_dev | dir | pri | irq | rel_chan_no */
131 struct dma_channel_info dma_chan
[] = {
132 { &dma_devices
[0], DIR_RX
, 0, INT_NUM_IM4_IRL0
+ 0, 0 },
133 { &dma_devices
[0], DIR_TX
, 0, INT_NUM_IM4_IRL0
+ 1, 0 },
134 { &dma_devices
[0], DIR_RX
, 1, INT_NUM_IM4_IRL0
+ 2, 1 },
135 { &dma_devices
[0], DIR_TX
, 1, INT_NUM_IM4_IRL0
+ 3, 1 },
136 { &dma_devices
[0], DIR_RX
, 2, INT_NUM_IM4_IRL0
+ 4, 2 },
137 { &dma_devices
[0], DIR_TX
, 2, INT_NUM_IM4_IRL0
+ 5, 2 },
138 { &dma_devices
[0], DIR_RX
, 3, INT_NUM_IM4_IRL0
+ 6, 3 },
139 { &dma_devices
[0], DIR_TX
, 3, INT_NUM_IM4_IRL0
+ 7, 3 },
140 { &dma_devices
[1], DIR_RX
, 0, INT_NUM_IM4_IRL0
+ 8, 0 },
141 { &dma_devices
[1], DIR_TX
, 0, INT_NUM_IM4_IRL0
+ 9, 0 },
142 { &dma_devices
[2], DIR_RX
, 0, INT_NUM_IM4_IRL0
+ 10, 0 },
143 { &dma_devices
[2], DIR_TX
, 0, INT_NUM_IM4_IRL0
+ 11, 0 },
144 { &dma_devices
[3], DIR_RX
, 0, INT_NUM_IM4_IRL0
+ 12, 0 },
145 { &dma_devices
[3], DIR_TX
, 0, INT_NUM_IM4_IRL0
+ 13, 0 },
146 { &dma_devices
[4], DIR_RX
, 0, INT_NUM_IM4_IRL0
+ 14, 0 },
147 { &dma_devices
[4], DIR_TX
, 0, INT_NUM_IM4_IRL0
+ 15, 0 },
148 { &dma_devices
[5], DIR_RX
, 0, INT_NUM_IM4_IRL0
+ 16, 0 },
149 { &dma_devices
[5], DIR_TX
, 0, INT_NUM_IM4_IRL0
+ 17, 0 },
150 { &dma_devices
[6], DIR_RX
, 1, INT_NUM_IM3_IRL0
+ 18, 0 },
151 { &dma_devices
[6], DIR_TX
, 1, INT_NUM_IM3_IRL0
+ 19, 0 },
152 { &dma_devices
[7], DIR_RX
, 2, INT_NUM_IM4_IRL0
+ 20, 0 },
153 { &dma_devices
[7], DIR_TX
, 2, INT_NUM_IM4_IRL0
+ 21, 0 },
154 { &dma_devices
[8], DIR_RX
, 3, INT_NUM_IM4_IRL0
+ 22, 0 },
155 { &dma_devices
[8], DIR_TX
, 3, INT_NUM_IM4_IRL0
+ 23, 0 }
158 u64
*g_desc_list
[DMA_DESCR_MEM_PAGES
];
160 volatile u32 g_dma_int_status
= 0;
162 /* 0 - not in process, 1 - in process */
163 volatile int g_dma_in_process
;
165 int ltq_dma_init(void);
166 void do_dma_tasklet(unsigned long);
167 DECLARE_TASKLET(dma_tasklet
, do_dma_tasklet
, 0);
168 irqreturn_t
dma_interrupt(int irq
, void *dev_id
);
170 u8
*common_buffer_alloc(int len
, int *byte_offset
, void **opt
)
172 u8
*buffer
= kmalloc(len
* sizeof(u8
), GFP_KERNEL
);
177 void common_buffer_free(u8
*dataptr
, void *opt
)
182 void enable_ch_irq(struct dma_channel_info
*ch
)
184 int chan_no
= (int)(ch
- dma_chan
);
188 if (ch
->dir
== DIR_RX
)
189 val
= DMA_CIE_DESCPT
| DMA_CIE_DUR
;
191 val
= DMA_CIE_DESCPT
;
193 local_irq_save(flag
);
195 dma_w32(chan_no
, cs
);
197 dma_w32_mask(0, 1 << chan_no
, irnen
);
199 local_irq_restore(flag
);
201 svip_enable_irq(ch
->irq
);
204 void disable_ch_irq(struct dma_channel_info
*ch
)
207 int chan_no
= (int)(ch
- dma_chan
);
209 local_irq_save(flag
);
210 g_dma_int_status
&= ~(1 << chan_no
);
212 dma_w32(chan_no
, cs
);
215 dma_w32_mask(1 << chan_no
, 0, irnen
);
216 local_irq_restore(flag
);
218 mask_and_ack_irq(ch
->irq
);
221 int open_chan(struct dma_channel_info
*ch
)
225 int chan_no
= (int)(ch
- dma_chan
);
228 struct rx_desc
*rx_desc_p
;
229 struct tx_desc
*tx_desc_p
;
231 if (ch
->control
== LTQ_DMA_CH_ON
)
234 if (ch
->dir
== DIR_RX
) {
235 for (j
= 0; j
< ch
->desc_len
; j
++) {
236 rx_desc_p
= (struct rx_desc
*)ch
->desc_base
+j
;
237 buffer
= ch
->dma_dev
->buffer_alloc(ch
->packet_size
,
239 (void *)&ch
->opt
[j
]);
243 rx_desc_p
->data_pointer
= (u32
)CPHYSADDR((u32
)buffer
);
244 rx_desc_p
->status
.word
= 0;
245 rx_desc_p
->status
.field
.byte_offset
= byte_offset
;
246 rx_desc_p
->status
.field
.data_length
= ch
->packet_size
;
247 rx_desc_p
->status
.field
.own
= DMA_OWN
;
250 for (j
= 0; j
< ch
->desc_len
; j
++) {
251 tx_desc_p
= (struct tx_desc
*)ch
->desc_base
+ j
;
252 tx_desc_p
->data_pointer
= 0;
253 tx_desc_p
->status
.word
= 0;
258 local_irq_save(flag
);
260 dma_w32(chan_no
, cs
);
261 dma_w32(ch
->desc_len
, cdlen
);
263 dma_w32(DMA_CCTRL_TXWGT_VAL(ch
->tx_weight
)
264 | DMA_CCTRL_CLASS_VAL(ch
->pri
)
265 | (ch
->dir
== DIR_RX
? DMA_CCTRL_ON_OFF
: 0), cctrl
);
267 ch
->control
= LTQ_DMA_CH_ON
;
268 local_irq_restore(flag
);
270 if (request_irq(ch
->irq
, dma_interrupt
,
271 IRQF_DISABLED
, "dma-core", (void *)ch
) != 0) {
272 printk(KERN_ERR
"error, cannot get dma_irq!\n");
280 int close_chan(struct dma_channel_info
*ch
)
284 int chan_no
= (int)(ch
- dma_chan
);
285 struct rx_desc
*desc_p
;
287 if (ch
->control
== LTQ_DMA_CH_OFF
)
290 local_irq_save(flag
);
292 dma_w32(chan_no
, cs
);
293 dma_w32_mask(DMA_CCTRL_ON_OFF
, 0, cctrl
);
296 free_irq(ch
->irq
, (void *)ch
);
297 ch
->control
= LTQ_DMA_CH_OFF
;
298 local_irq_restore(flag
);
300 /* free descriptors in use */
301 for (j
= 0; j
< ch
->desc_len
; j
++) {
302 desc_p
= (struct rx_desc
*)ch
->desc_base
+j
;
303 if ((desc_p
->status
.field
.own
== CPU_OWN
&&
304 desc_p
->status
.field
.c
) ||
305 (desc_p
->status
.field
.own
== DMA_OWN
)) {
306 if (desc_p
->data_pointer
) {
307 ch
->dma_dev
->buffer_free((u8
*)__va(desc_p
->data_pointer
),
309 desc_p
->data_pointer
= (u32
)NULL
;
317 int reset_chan(struct dma_channel_info
*ch
)
321 int chan_no
= (int)(ch
- dma_chan
);
325 local_irq_save(flag
);
327 dma_w32(chan_no
, cs
);
328 dma_w32_mask(0, DMA_CCTRL_RST
, cctrl
);
330 local_irq_restore(flag
);
333 local_irq_save(flag
);
335 dma_w32(chan_no
, cs
);
336 val
= dma_r32(cctrl
);
338 local_irq_restore(flag
);
339 } while (val
& DMA_CCTRL_RST
);
344 static inline void rx_chan_intr_handler(int chan_no
)
346 struct dma_device_info
*dma_dev
= (struct dma_device_info
*)
347 dma_chan
[chan_no
].dma_dev
;
348 struct dma_channel_info
*ch
= &dma_chan
[chan_no
];
349 struct rx_desc
*rx_desc_p
;
353 local_irq_save(flag
);
355 dma_w32(chan_no
, cs
);
357 dma_w32(DMA_CIS_DESCPT
, cis
);
360 /* handle command complete interrupt */
361 rx_desc_p
= (struct rx_desc
*)ch
->desc_base
+ ch
->curr_desc
;
362 if ((rx_desc_p
->status
.word
& (DMA_DESC_OWN_DMA
| DMA_DESC_CPT_SET
)) ==
364 local_irq_restore(flag
);
365 /* Every thing is correct, then we inform the upper layer */
366 dma_dev
->current_rx_chan
= ch
->rel_chan_no
;
367 if (dma_dev
->intr_handler
)
368 dma_dev
->intr_handler(dma_dev
, RCV_INT
);
371 g_dma_int_status
&= ~(1 << chan_no
);
372 local_irq_restore(flag
);
373 svip_enable_irq(dma_chan
[chan_no
].irq
);
377 static inline void tx_chan_intr_handler(int chan_no
)
379 struct dma_device_info
*dma_dev
= (struct dma_device_info
*)
380 dma_chan
[chan_no
].dma_dev
;
381 struct dma_channel_info
*ch
= &dma_chan
[chan_no
];
382 struct tx_desc
*tx_desc_p
;
385 local_irq_save(flag
);
387 dma_w32(chan_no
, cs
);
388 dma_w32(DMA_CIS_DESCPT
, cis
);
391 tx_desc_p
= (struct tx_desc
*)ch
->desc_base
+ch
->prev_desc
;
392 if ((tx_desc_p
->status
.word
& (DMA_DESC_OWN_DMA
| DMA_DESC_CPT_SET
)) ==
394 local_irq_restore(flag
);
396 dma_dev
->buffer_free((u8
*)__va(tx_desc_p
->data_pointer
),
397 ch
->opt
[ch
->prev_desc
]);
398 memset(tx_desc_p
, 0, sizeof(struct tx_desc
));
399 dma_dev
->current_tx_chan
= ch
->rel_chan_no
;
400 if (dma_dev
->intr_handler
)
401 dma_dev
->intr_handler(dma_dev
, TRANSMIT_CPT_INT
);
404 ch
->prev_desc
= (ch
->prev_desc
+ 1) % (ch
->desc_len
);
406 g_dma_int_status
&= ~(1 << chan_no
);
407 local_irq_restore(flag
);
408 svip_enable_irq(dma_chan
[chan_no
].irq
);
412 void do_dma_tasklet(unsigned long unused
)
416 int budget
= DMA_INT_BUDGET
;
420 while (g_dma_int_status
) {
422 tasklet_schedule(&dma_tasklet
);
427 /* WFQ algorithm to select the channel */
428 for (i
= 0; i
< LTQ_MAX_DMA_CHANNEL_NUM
; i
++) {
429 if (g_dma_int_status
& (1 << i
) &&
430 dma_chan
[i
].weight
> 0) {
431 if (dma_chan
[i
].weight
> weight
) {
433 weight
= dma_chan
[chan_no
].weight
;
438 if (dma_chan
[chan_no
].dir
== DIR_RX
)
439 rx_chan_intr_handler(chan_no
);
441 tx_chan_intr_handler(chan_no
);
443 /* reset all the channels */
444 for (i
= 0; i
< LTQ_MAX_DMA_CHANNEL_NUM
; i
++)
445 dma_chan
[i
].weight
= dma_chan
[i
].default_weight
;
449 local_irq_save(flag
);
450 g_dma_in_process
= 0;
451 if (g_dma_int_status
) {
452 g_dma_in_process
= 1;
453 tasklet_schedule(&dma_tasklet
);
455 local_irq_restore(flag
);
458 irqreturn_t
dma_interrupt(int irq
, void *dev_id
)
460 struct dma_channel_info
*ch
;
463 ch
= (struct dma_channel_info
*)dev_id
;
464 chan_no
= (int)(ch
- dma_chan
);
466 if ((unsigned)chan_no
>= LTQ_MAX_DMA_CHANNEL_NUM
) {
467 printk(KERN_ERR
"error: dma_interrupt irq=%d chan_no=%d\n",
471 g_dma_int_status
|= 1 << chan_no
;
472 dma_w32(1 << chan_no
, irncr
);
473 mask_and_ack_irq(irq
);
475 if (!g_dma_in_process
) {
476 g_dma_in_process
= 1;
477 tasklet_schedule(&dma_tasklet
);
480 return IRQ_RETVAL(1);
483 struct dma_device_info
*dma_device_reserve(char *dev_name
)
488 for (i
= 0; i
< LTQ_MAX_DMA_DEVICE_NUM
; i
++) {
489 if (strcmp(dev_name
, dma_devices
[i
].device_name
) == 0) {
490 if (dma_devices
[i
].reserved
)
492 dma_devices
[i
].reserved
= 1;
497 if (i
== LTQ_MAX_DMA_DEVICE_NUM
)
500 return &dma_devices
[i
];
502 EXPORT_SYMBOL(dma_device_reserve
);
504 int dma_device_release(struct dma_device_info
*dma_dev
)
506 dma_dev
->reserved
= 0;
510 EXPORT_SYMBOL(dma_device_release
);
512 int dma_device_register(struct dma_device_info
*dma_dev
)
514 int port_no
= (int)(dma_dev
- dma_devices
);
518 switch (dma_dev
->tx_burst_len
) {
530 switch (dma_dev
->rx_burst_len
) {
541 local_irq_save(flag
);
543 dma_w32(port_no
, ps
);
544 dma_w32(DMA_PCTRL_TXWGT_VAL(dma_dev
->tx_weight
)
545 | DMA_PCTRL_TXENDI_VAL(dma_dev
->tx_endianness_mode
)
546 | DMA_PCTRL_RXENDI_VAL(dma_dev
->rx_endianness_mode
)
547 | DMA_PCTRL_PDEN_VAL(dma_dev
->drop_enable
)
548 | DMA_PCTRL_TXBL_VAL(txbl
)
549 | DMA_PCTRL_RXBL_VAL(rxbl
), pctrl
);
551 local_irq_restore(flag
);
555 EXPORT_SYMBOL(dma_device_register
);
557 int dma_device_unregister(struct dma_device_info
*dma_dev
)
560 int port_no
= (int)(dma_dev
- dma_devices
);
563 /* flush memcopy module; has no effect for other ports */
564 local_irq_save(flag
);
566 dma_w32(port_no
, ps
);
567 dma_w32_mask(0, DMA_PCTRL_GPC
, pctrl
);
569 local_irq_restore(flag
);
571 for (i
= 0; i
< dma_dev
->max_tx_chan_num
; i
++)
572 reset_chan(dma_dev
->tx_chan
[i
]);
574 for (i
= 0; i
< dma_dev
->max_rx_chan_num
; i
++)
575 reset_chan(dma_dev
->rx_chan
[i
]);
579 EXPORT_SYMBOL(dma_device_unregister
);
582 * Read Packet from DMA Rx channel.
583 * The function gets the data from the current rx descriptor assigned
584 * to the passed DMA device and passes it back to the caller.
585 * The function is called in the context of DMA interrupt.
586 * In detail the following actions are done:
587 * - get current receive descriptor
588 * - allocate memory via allocation callback function
589 * - pass data from descriptor to allocated memory
590 * - update channel weight
591 * - release descriptor
592 * - update current descriptor position
594 * \param *dma_dev - pointer to DMA device structure
595 * \param **dataptr - pointer to received data
597 * \return packet length - length of received data
600 int dma_device_read(struct dma_device_info
*dma_dev
, u8
**dataptr
, void **opt
)
607 struct dma_channel_info
*ch
=
608 dma_dev
->rx_chan
[dma_dev
->current_rx_chan
];
610 struct rx_desc
*rx_desc_p
;
612 /* get the rx data first */
613 rx_desc_p
= (struct rx_desc
*)ch
->desc_base
+ch
->curr_desc
;
614 buf
= (u8
*)__va(rx_desc_p
->data_pointer
);
615 *(u32
*)dataptr
= (u32
)buf
;
616 len
= rx_desc_p
->status
.field
.data_length
;
617 #ifndef CONFIG_MIPS_UNCACHED
618 dma_cache_inv((unsigned long)buf
, len
);
621 *(int *)opt
= (int)ch
->opt
[ch
->curr_desc
];
623 /* replace with a new allocated buffer */
624 buf
= dma_dev
->buffer_alloc(ch
->packet_size
, &byte_offset
, &p
);
626 ch
->opt
[ch
->curr_desc
] = p
;
629 rx_desc_p
->data_pointer
= (u32
)CPHYSADDR((u32
)buf
);
630 rx_desc_p
->status
.word
= (DMA_OWN
<< 31) \
631 |(byte_offset
<< 23) \
642 /* increase the curr_desc pointer */
644 if (ch
->curr_desc
== ch
->desc_len
)
646 /* return the length of the received packet */
649 EXPORT_SYMBOL(dma_device_read
);
652 * Write Packet through DMA Tx channel to peripheral.
654 * \param *dma_dev - pointer to DMA device structure
655 * \param *dataptr - pointer to data to be sent
656 * \param len - amount of data bytes to be sent
658 * \return len - length of transmitted data
661 int dma_device_write(struct dma_device_info
*dma_dev
, u8
*dataptr
, int len
,
666 struct dma_channel_info
*ch
;
668 struct tx_desc
*tx_desc_p
;
669 local_irq_save(flag
);
671 ch
= dma_dev
->tx_chan
[dma_dev
->current_tx_chan
];
672 chan_no
= (int)(ch
- dma_chan
);
674 if (ch
->control
== LTQ_DMA_CH_OFF
) {
675 local_irq_restore(flag
);
676 printk(KERN_ERR
"%s: dma channel %d not enabled!\n",
681 tx_desc_p
= (struct tx_desc
*)ch
->desc_base
+ch
->curr_desc
;
682 /* Check whether this descriptor is available */
683 if (tx_desc_p
->status
.word
& (DMA_DESC_OWN_DMA
| DMA_DESC_CPT_SET
)) {
684 /* if not , the tell the upper layer device */
685 dma_dev
->intr_handler(dma_dev
, TX_BUF_FULL_INT
);
686 local_irq_restore(flag
);
689 ch
->opt
[ch
->curr_desc
] = opt
;
690 /* byte offset----to adjust the starting address of the data buffer,
691 * should be multiple of the burst length.*/
692 byte_offset
= ((u32
)CPHYSADDR((u32
)dataptr
)) %
693 (dma_dev
->tx_burst_len
* 4);
694 #ifndef CONFIG_MIPS_UNCACHED
695 dma_cache_wback((unsigned long)dataptr
, len
);
698 tx_desc_p
->data_pointer
= (u32
)CPHYSADDR((u32
)dataptr
) - byte_offset
;
700 tx_desc_p
->status
.word
= (DMA_OWN
<< 31)
703 | (byte_offset
<< 23)
707 if (ch
->xfer_cnt
== 0) {
709 dma_w32(chan_no
, cs
);
710 dma_w32_mask(0, DMA_CCTRL_ON_OFF
, cctrl
);
716 if (ch
->curr_desc
== ch
->desc_len
)
719 local_irq_restore(flag
);
722 EXPORT_SYMBOL(dma_device_write
);
725 * Display descriptor list via proc file
727 * \param chan_no - logical channel number
730 int desc_list_proc_read(char *buf
, char **start
, off_t offset
,
731 int count
, int *eof
, void *data
)
738 if ((chan_no
== 0) && (offset
> count
)) {
750 p
= (u32
*)dma_chan
[chan_no
].desc_base
;
752 if (dma_chan
[chan_no
].dir
== DIR_RX
)
753 len
+= sprintf(buf
+ len
,
754 "channel %d %s Rx descriptor list:\n",
755 chan_no
, dma_chan
[chan_no
].dma_dev
->device_name
);
757 len
+= sprintf(buf
+ len
,
758 "channel %d %s Tx descriptor list:\n",
759 chan_no
, dma_chan
[chan_no
].dma_dev
->device_name
);
760 len
+= sprintf(buf
+ len
,
761 " no address data pointer command bits "
762 "(Own, Complete, SoP, EoP, Offset) \n");
763 len
+= sprintf(buf
+ len
,
764 "----------------------------------------------"
765 "-----------------------------------\n");
766 for (i
= 0; i
< dma_chan
[chan_no
].desc_len
; i
++) {
767 len
+= sprintf(buf
+ len
, "%3d ", i
);
768 len
+= sprintf(buf
+ len
, "0x%08x ", (u32
)(p
+ (i
* 2)));
769 len
+= sprintf(buf
+ len
, "%08x ", *(p
+ (i
* 2 + 1)));
770 len
+= sprintf(buf
+ len
, "%08x ", *(p
+ (i
* 2)));
772 if (*(p
+ (i
* 2)) & 0x80000000)
773 len
+= sprintf(buf
+ len
, "D ");
775 len
+= sprintf(buf
+ len
, "C ");
776 if (*(p
+ (i
* 2)) & 0x40000000)
777 len
+= sprintf(buf
+ len
, "C ");
779 len
+= sprintf(buf
+ len
, "c ");
780 if (*(p
+ (i
* 2)) & 0x20000000)
781 len
+= sprintf(buf
+ len
, "S ");
783 len
+= sprintf(buf
+ len
, "s ");
784 if (*(p
+ (i
* 2)) & 0x10000000)
785 len
+= sprintf(buf
+ len
, "E ");
787 len
+= sprintf(buf
+ len
, "e ");
789 /* byte offset is different for rx and tx descriptors*/
790 if (dma_chan
[chan_no
].dir
== DIR_RX
) {
791 len
+= sprintf(buf
+ len
, "%01x ",
792 (*(p
+ (i
* 2)) & 0x01800000) >> 23);
794 len
+= sprintf(buf
+ len
, "%02x ",
795 (*(p
+ (i
* 2)) & 0x0F800000) >> 23);
798 if (dma_chan
[chan_no
].curr_desc
== i
)
799 len
+= sprintf(buf
+ len
, "<- CURR");
801 if (dma_chan
[chan_no
].prev_desc
== i
)
802 len
+= sprintf(buf
+ len
, "<- PREV");
804 len
+= sprintf(buf
+ len
, "\n");
808 len
+= sprintf(buf
+ len
, "\n");
810 if (chan_no
> LTQ_MAX_DMA_CHANNEL_NUM
- 1)
818 * Displays the weight of all DMA channels via proc file
828 * \return len - amount of bytes written to file
830 int channel_weight_proc_read(char *buf
, char **start
, off_t offset
,
831 int count
, int *eof
, void *data
)
835 len
+= sprintf(buf
+ len
, "Qos dma channel weight list\n");
836 len
+= sprintf(buf
+ len
, "channel_num default_weight "
837 "current_weight device Tx/Rx\n");
838 len
+= sprintf(buf
+ len
, "---------------------------"
839 "---------------------------------\n");
840 for (i
= 0; i
< LTQ_MAX_DMA_CHANNEL_NUM
; i
++) {
841 struct dma_channel_info
*ch
= &dma_chan
[i
];
843 if (ch
->dir
== DIR_RX
) {
844 len
+= sprintf(buf
+ len
,
847 i
, ch
->default_weight
, ch
->weight
,
848 ch
->dma_dev
->device_name
);
850 len
+= sprintf(buf
+ len
,
853 i
, ch
->default_weight
, ch
->weight
,
854 ch
->dma_dev
->device_name
);
862 * Provides DMA Register Content to proc file
863 * This function reads the content of general DMA Registers, DMA Channel
864 * Registers and DMA Port Registers and performs a structures output to the
873 * \return len - amount of bytes written to file
875 int dma_register_proc_read(char *buf
, char **start
, off_t offset
,
876 int count
, int *eof
, void *data
)
882 static int blockcount
;
883 static int channel_no
;
885 if ((blockcount
== 0) && (offset
> count
)) {
890 switch (blockcount
) {
892 len
+= sprintf(buf
+ len
, "\nGeneral DMA Registers\n");
893 len
+= sprintf(buf
+ len
, "-------------------------"
894 "----------------\n");
895 len
+= sprintf(buf
+ len
, "CLC= %08x\n", dma_r32(clc
));
896 len
+= sprintf(buf
+ len
, "ID= %08x\n", dma_r32(id
));
897 len
+= sprintf(buf
+ len
, "DMA_CPOLL= %08x\n", dma_r32(cpoll
));
898 len
+= sprintf(buf
+ len
, "DMA_CS= %08x\n", dma_r32(cs
));
899 len
+= sprintf(buf
+ len
, "DMA_PS= %08x\n", dma_r32(ps
));
900 len
+= sprintf(buf
+ len
, "DMA_IRNEN= %08x\n", dma_r32(irnen
));
901 len
+= sprintf(buf
+ len
, "DMA_IRNCR= %08x\n", dma_r32(irncr
));
902 len
+= sprintf(buf
+ len
, "DMA_IRNICR= %08x\n",
904 len
+= sprintf(buf
+ len
, "\nDMA Channel Registers\n");
909 /* If we had an overflow start at beginning of buffer
910 * otherwise use offset */
911 if (channel_no
!= 0) {
918 local_irq_save(flags
);
919 for (i
= channel_no
; i
< LTQ_MAX_DMA_CHANNEL_NUM
; i
++) {
920 struct dma_channel_info
*ch
= &dma_chan
[i
];
922 if (len
+ 300 > limit
) {
923 local_irq_restore(flags
);
928 len
+= sprintf(buf
+ len
, "----------------------"
929 "-------------------\n");
930 if (ch
->dir
== DIR_RX
) {
931 len
+= sprintf(buf
+ len
,
932 "Channel %d - Device %s Rx\n",
933 i
, ch
->dma_dev
->device_name
);
935 len
+= sprintf(buf
+ len
,
936 "Channel %d - Device %s Tx\n",
937 i
, ch
->dma_dev
->device_name
);
940 len
+= sprintf(buf
+ len
, "DMA_CCTRL= %08x\n",
942 len
+= sprintf(buf
+ len
, "DMA_CDBA= %08x\n",
944 len
+= sprintf(buf
+ len
, "DMA_CIE= %08x\n",
946 len
+= sprintf(buf
+ len
, "DMA_CIS= %08x\n",
948 len
+= sprintf(buf
+ len
, "DMA_CDLEN= %08x\n",
951 local_irq_restore(flags
);
959 * display port dependent registers
961 len
+= sprintf(buf
+ len
, "\nDMA Port Registers\n");
962 len
+= sprintf(buf
+ len
,
963 "-----------------------------------------\n");
964 local_irq_save(flags
);
965 for (i
= 0; i
< LTQ_MAX_DMA_DEVICE_NUM
; i
++) {
967 len
+= sprintf(buf
+ len
,
968 "Port %d DMA_PCTRL= %08x\n",
971 local_irq_restore(flags
);
984 * Open Method of DMA Device Driver
985 * This function increments the device driver's use counter.
991 static int dma_open(struct inode
*inode
, struct file
*file
)
997 * Release Method of DMA Device driver.
998 * This function decrements the device driver's use counter.
1004 static int dma_release(struct inode
*inode
, struct file
*file
)
1006 /* release the resources */
1011 * Ioctl Interface to DMA Module
1014 * \return 0 - initialization successful
1015 * <0 - failed initialization
1017 static long dma_ioctl(struct file
*file
,
1018 unsigned int cmd
, unsigned long arg
)
1021 /* TODO: add some user controled functions here */
1025 const static struct file_operations dma_fops
= {
1026 .owner
= THIS_MODULE
,
1028 .release
= dma_release
,
1029 .unlocked_ioctl
= dma_ioctl
,
1032 void map_dma_chan(struct dma_channel_info
*map
)
1036 /* assign default values for channel settings */
1037 for (i
= 0; i
< LTQ_MAX_DMA_CHANNEL_NUM
; i
++) {
1038 dma_chan
[i
].byte_offset
= 0;
1039 dma_chan
[i
].open
= &open_chan
;
1040 dma_chan
[i
].close
= &close_chan
;
1041 dma_chan
[i
].reset
= &reset_chan
;
1042 dma_chan
[i
].enable_irq
= enable_ch_irq
;
1043 dma_chan
[i
].disable_irq
= disable_ch_irq
;
1044 dma_chan
[i
].tx_weight
= 1;
1045 dma_chan
[i
].control
= 0;
1046 dma_chan
[i
].default_weight
= LTQ_DMA_CH_DEFAULT_WEIGHT
;
1047 dma_chan
[i
].weight
= dma_chan
[i
].default_weight
;
1048 dma_chan
[i
].curr_desc
= 0;
1049 dma_chan
[i
].prev_desc
= 0;
1052 /* assign default values for port settings */
1053 for (i
= 0; i
< LTQ_MAX_DMA_DEVICE_NUM
; i
++) {
1054 /*set default tx channel number to be one*/
1055 dma_devices
[i
].num_tx_chan
= 1;
1056 /*set default rx channel number to be one*/
1057 dma_devices
[i
].num_rx_chan
= 1;
1058 dma_devices
[i
].buffer_alloc
= common_buffer_alloc
;
1059 dma_devices
[i
].buffer_free
= common_buffer_free
;
1060 dma_devices
[i
].intr_handler
= NULL
;
1061 dma_devices
[i
].tx_burst_len
= 4;
1062 dma_devices
[i
].rx_burst_len
= 4;
1063 #ifdef CONFIG_CPU_LITTLE_ENDIAN
1064 dma_devices
[i
].tx_endianness_mode
= 0;
1065 dma_devices
[i
].rx_endianness_mode
= 0;
1067 dma_devices
[i
].tx_endianness_mode
= 3;
1068 dma_devices
[i
].rx_endianness_mode
= 3;
1073 void dma_chip_init(void)
1077 sys1_w32(SYS1_CLKENR_DMA
, clkenr
);
1080 dma_w32(DMA_CTRL_RST
, ctrl
);
1082 /* disable all the interrupts first */
1085 /* enable polling for all channels */
1086 dma_w32(DMA_CPOLL_EN
| DMA_CPOLL_CNT_VAL(DMA_POLL_COUNTER
), cpoll
);
1088 /****************************************************/
1089 for (i
= 0; i
< LTQ_MAX_DMA_CHANNEL_NUM
; i
++)
1090 disable_ch_irq(&dma_chan
[i
]);
1093 int ltq_dma_init(void)
1098 static int dma_initialized
;
1100 if (dma_initialized
== 1)
1102 dma_initialized
= 1;
1104 result
= register_chrdev(DMA_MAJOR
, "dma-core", &dma_fops
);
1106 DMA_EMSG("cannot register device dma-core!\n");
1111 map_dma_chan(dma_chan
);
1113 /* allocate DMA memory for buffer descriptors */
1114 for (i
= 0; i
< DMA_DESCR_MEM_PAGES
; i
++) {
1115 g_desc_list
[i
] = (u64
*)__get_free_page(GFP_DMA
);
1116 if (g_desc_list
[i
] == NULL
) {
1117 DMA_EMSG("no memory for desriptor\n");
1120 g_desc_list
[i
] = (u64
*)KSEG1ADDR(g_desc_list
[i
]);
1121 memset(g_desc_list
[i
], 0, PAGE_SIZE
);
1124 for (i
= 0; i
< LTQ_MAX_DMA_CHANNEL_NUM
; i
++) {
1125 int page_index
, ch_per_page
;
1126 /* cross-link relative channels of a port to
1127 * corresponding absolute channels */
1128 if (dma_chan
[i
].dir
== DIR_RX
) {
1129 ((struct dma_device_info
*)(dma_chan
[i
].dma_dev
))->
1130 rx_chan
[dma_chan
[i
].rel_chan_no
] = &dma_chan
[i
];
1132 ((struct dma_device_info
*)(dma_chan
[i
].dma_dev
))->
1133 tx_chan
[dma_chan
[i
].rel_chan_no
] = &dma_chan
[i
];
1135 dma_chan
[i
].abs_chan_no
= i
;
1137 page_index
= i
* DMA_DESCR_CH_SIZE
/ PAGE_SIZE
;
1138 ch_per_page
= PAGE_SIZE
/ DMA_DESCR_CH_SIZE
+
1139 ((PAGE_SIZE
% DMA_DESCR_CH_SIZE
) > 0);
1140 dma_chan
[i
].desc_base
=
1141 (u32
)g_desc_list
[page_index
] +
1142 (i
- page_index
*ch_per_page
) * DMA_DESCR_NUM
*8;
1143 dma_chan
[i
].curr_desc
= 0;
1144 dma_chan
[i
].desc_len
= DMA_DESCR_NUM
;
1146 local_irq_save(flag
);
1149 dma_w32((u32
)CPHYSADDR(dma_chan
[i
].desc_base
), cdba
);
1151 local_irq_restore(flag
);
1154 g_dma_dir
= proc_mkdir("driver/" DRV_NAME
, NULL
);
1156 create_proc_read_entry("dma_register",
1159 dma_register_proc_read
,
1162 create_proc_read_entry("g_desc_list",
1165 desc_list_proc_read
,
1168 create_proc_read_entry("channel_weight",
1171 channel_weight_proc_read
,
1174 printk(KERN_NOTICE
"SVIP DMA engine initialized\n");
1180 * Cleanup DMA device
1181 * This function releases all resources used by the DMA device driver on
1188 void dma_cleanup(void)
1191 unregister_chrdev(DMA_MAJOR
, "dma-core");
1193 for (i
= 0; i
< DMA_DESCR_MEM_PAGES
; i
++)
1194 free_page(KSEG0ADDR((unsigned long)g_desc_list
[i
]));
1195 remove_proc_entry("channel_weight", g_dma_dir
);
1196 remove_proc_entry("g_desc_list", g_dma_dir
);
1197 remove_proc_entry("dma_register", g_dma_dir
);
1198 remove_proc_entry("driver/" DRV_NAME
, NULL
);
1199 /* release the resources */
1200 for (i
= 0; i
< LTQ_MAX_DMA_CHANNEL_NUM
; i
++)
1201 free_irq(dma_chan
[i
].irq
, (void *)&dma_chan
[i
]);
1204 arch_initcall(ltq_dma_init
);
1206 MODULE_LICENSE("GPL");