1 From f1c4d9e622c800e1f38b3818f933ec7597d1ccfb Mon Sep 17 00:00:00 2001
2 From: John Crispin <blogic@openwrt.org>
3 Date: Sun, 27 Jul 2014 09:29:51 +0100
4 Subject: [PATCH 47/53] DMA: ralink: add rt2880 dma engine
6 Signed-off-by: John Crispin <blogic@openwrt.org>
8 drivers/dma/Kconfig | 6 +
9 drivers/dma/Makefile | 1 +
10 drivers/dma/ralink-gdma.c | 577 +++++++++++++++++++++++++++++++++++++++++++++
11 include/linux/dmaengine.h | 1 +
12 4 files changed, 585 insertions(+)
13 create mode 100644 drivers/dma/ralink-gdma.c
15 diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
16 index b458475..2d5ae4a 100644
17 --- a/drivers/dma/Kconfig
18 +++ b/drivers/dma/Kconfig
19 @@ -40,6 +40,12 @@ config ASYNC_TX_ENABLE_CHANNEL_SWITCH
20 config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
24 + tristate "RALINK DMA support"
25 + depends on RALINK && SOC_MT7620
27 + select DMA_VIRTUAL_CHANNELS
32 diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
33 index 7711a71..b33c434 100644
34 --- a/drivers/dma/Makefile
35 +++ b/drivers/dma/Makefile
36 @@ -65,5 +65,6 @@ obj-$(CONFIG_TI_DMA_CROSSBAR) += ti-dma-crossbar.o
37 obj-$(CONFIG_TI_EDMA) += edma.o
38 obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
39 obj-$(CONFIG_ZX_DMA) += zx296702_dma.o
40 +obj-$(CONFIG_DMA_RALINK) += ralink-gdma.o
43 diff --git a/drivers/dma/ralink-gdma.c b/drivers/dma/ralink-gdma.c
45 index 0000000..2c3cace
47 +++ b/drivers/dma/ralink-gdma.c
50 + * Copyright (C) 2013, Lars-Peter Clausen <lars@metafoo.de>
51 + * GDMA4740 DMAC support
53 + * This program is free software; you can redistribute it and/or modify it
54 + * under the terms of the GNU General Public License as published by the
55 + * Free Software Foundation; either version 2 of the License, or (at your
56 + * option) any later version.
58 + * You should have received a copy of the GNU General Public License along
59 + * with this program; if not, write to the Free Software Foundation, Inc.,
60 + * 675 Mass Ave, Cambridge, MA 02139, USA.
64 +#include <linux/dmaengine.h>
65 +#include <linux/dma-mapping.h>
66 +#include <linux/err.h>
67 +#include <linux/init.h>
68 +#include <linux/list.h>
69 +#include <linux/module.h>
70 +#include <linux/platform_device.h>
71 +#include <linux/slab.h>
72 +#include <linux/spinlock.h>
73 +#include <linux/irq.h>
74 +#include <linux/of_dma.h>
76 +#include "virt-dma.h"
78 +#define GDMA_NR_CHANS 16
80 +#define GDMA_REG_SRC_ADDR(x) (0x00 + (x) * 0x10)
81 +#define GDMA_REG_DST_ADDR(x) (0x04 + (x) * 0x10)
83 +#define GDMA_REG_CTRL0(x) (0x08 + (x) * 0x10)
84 +#define GDMA_REG_CTRL0_TX_MASK 0xffff
85 +#define GDMA_REG_CTRL0_TX_SHIFT 16
86 +#define GDMA_REG_CTRL0_CURR_MASK 0xff
87 +#define GDMA_REG_CTRL0_CURR_SHIFT 8
88 +#define GDMA_REG_CTRL0_SRC_ADDR_FIXED BIT(7)
89 +#define GDMA_REG_CTRL0_DST_ADDR_FIXED BIT(6)
90 +#define GDMA_REG_CTRL0_BURST_MASK 0x7
91 +#define GDMA_REG_CTRL0_BURST_SHIFT 3
92 +#define GDMA_REG_CTRL0_DONE_INT BIT(2)
93 +#define GDMA_REG_CTRL0_ENABLE BIT(1)
94 +#define GDMA_REG_CTRL0_HW_MODE 0
96 +#define GDMA_REG_CTRL1(x) (0x0c + (x) * 0x10)
97 +#define GDMA_REG_CTRL1_SEG_MASK 0xf
98 +#define GDMA_REG_CTRL1_SEG_SHIFT 22
99 +#define GDMA_REG_CTRL1_REQ_MASK 0x3f
100 +#define GDMA_REG_CTRL1_SRC_REQ_SHIFT 16
101 +#define GDMA_REG_CTRL1_DST_REQ_SHIFT 8
102 +#define GDMA_REG_CTRL1_CONTINOUS BIT(14)
103 +#define GDMA_REG_CTRL1_NEXT_MASK 0x1f
104 +#define GDMA_REG_CTRL1_NEXT_SHIFT 3
105 +#define GDMA_REG_CTRL1_COHERENT BIT(2)
106 +#define GDMA_REG_CTRL1_FAIL BIT(1)
107 +#define GDMA_REG_CTRL1_MASK BIT(0)
109 +#define GDMA_REG_UNMASK_INT 0x200
110 +#define GDMA_REG_DONE_INT 0x204
112 +#define GDMA_REG_GCT 0x220
113 +#define GDMA_REG_GCT_CHAN_MASK 0x3
114 +#define GDMA_REG_GCT_CHAN_SHIFT 3
115 +#define GDMA_REG_GCT_VER_MASK 0x3
116 +#define GDMA_REG_GCT_VER_SHIFT 1
117 +#define GDMA_REG_GCT_ARBIT_RR BIT(0)
119 +enum gdma_dma_transfer_size {
120 + GDMA_TRANSFER_SIZE_4BYTE = 0,
121 + GDMA_TRANSFER_SIZE_8BYTE = 1,
122 + GDMA_TRANSFER_SIZE_16BYTE = 2,
123 + GDMA_TRANSFER_SIZE_32BYTE = 3,
126 +struct gdma_dma_sg {
131 +struct gdma_dma_desc {
132 + struct virt_dma_desc vdesc;
134 + enum dma_transfer_direction direction;
137 + unsigned int num_sgs;
138 + struct gdma_dma_sg sg[];
141 +struct gdma_dmaengine_chan {
142 + struct virt_dma_chan vchan;
145 + dma_addr_t fifo_addr;
146 + unsigned int transfer_shift;
148 + struct gdma_dma_desc *desc;
149 + unsigned int next_sg;
152 +struct gdma_dma_dev {
153 + struct dma_device ddev;
154 + void __iomem *base;
157 + struct gdma_dmaengine_chan chan[GDMA_NR_CHANS];
160 +static struct gdma_dma_dev *gdma_dma_chan_get_dev(
161 + struct gdma_dmaengine_chan *chan)
163 + return container_of(chan->vchan.chan.device, struct gdma_dma_dev,
167 +static struct gdma_dmaengine_chan *to_gdma_dma_chan(struct dma_chan *c)
169 + return container_of(c, struct gdma_dmaengine_chan, vchan.chan);
172 +static struct gdma_dma_desc *to_gdma_dma_desc(struct virt_dma_desc *vdesc)
174 + return container_of(vdesc, struct gdma_dma_desc, vdesc);
177 +static inline uint32_t gdma_dma_read(struct gdma_dma_dev *dma_dev,
180 + return readl(dma_dev->base + reg);
183 +static inline void gdma_dma_write(struct gdma_dma_dev *dma_dev,
184 + unsigned reg, uint32_t val)
186 + //printk("gdma --> %p = 0x%08X\n", dma_dev->base + reg, val);
187 + writel(val, dma_dev->base + reg);
190 +static inline void gdma_dma_write_mask(struct gdma_dma_dev *dma_dev,
191 + unsigned int reg, uint32_t val, uint32_t mask)
195 + tmp = gdma_dma_read(dma_dev, reg);
198 + gdma_dma_write(dma_dev, reg, tmp);
201 +static struct gdma_dma_desc *gdma_dma_alloc_desc(unsigned int num_sgs)
203 + return kzalloc(sizeof(struct gdma_dma_desc) +
204 + sizeof(struct gdma_dma_sg) * num_sgs, GFP_ATOMIC);
207 +static enum gdma_dma_transfer_size gdma_dma_maxburst(u32 maxburst)
210 + return GDMA_TRANSFER_SIZE_4BYTE;
211 + else if (maxburst <= 15)
212 + return GDMA_TRANSFER_SIZE_8BYTE;
213 + else if (maxburst <= 31)
214 + return GDMA_TRANSFER_SIZE_16BYTE;
216 + return GDMA_TRANSFER_SIZE_32BYTE;
219 +static int gdma_dma_slave_config(struct dma_chan *c,
220 + const struct dma_slave_config *config)
222 + struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
223 + struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
224 + enum gdma_dma_transfer_size transfer_size;
226 + uint32_t ctrl0, ctrl1;
228 + switch (config->direction) {
229 + case DMA_MEM_TO_DEV:
230 + ctrl1 = 32 << GDMA_REG_CTRL1_SRC_REQ_SHIFT;
231 + ctrl1 |= config->slave_id << GDMA_REG_CTRL1_DST_REQ_SHIFT;
232 + flags = GDMA_REG_CTRL0_DST_ADDR_FIXED;
233 + transfer_size = gdma_dma_maxburst(config->dst_maxburst);
234 + chan->fifo_addr = config->dst_addr;
237 + case DMA_DEV_TO_MEM:
238 + ctrl1 = config->slave_id << GDMA_REG_CTRL1_SRC_REQ_SHIFT;
239 + ctrl1 |= 32 << GDMA_REG_CTRL1_DST_REQ_SHIFT;
240 + flags = GDMA_REG_CTRL0_SRC_ADDR_FIXED;
241 + transfer_size = gdma_dma_maxburst(config->src_maxburst);
242 + chan->fifo_addr = config->src_addr;
249 + chan->transfer_shift = 1 + transfer_size;
251 + ctrl0 = flags | GDMA_REG_CTRL0_HW_MODE;
252 + ctrl0 |= GDMA_REG_CTRL0_DONE_INT;
254 + ctrl1 &= ~(GDMA_REG_CTRL1_NEXT_MASK << GDMA_REG_CTRL1_NEXT_SHIFT);
255 + ctrl1 |= chan->id << GDMA_REG_CTRL1_NEXT_SHIFT;
256 + ctrl1 |= GDMA_REG_CTRL1_FAIL;
257 + ctrl1 &= ~GDMA_REG_CTRL1_CONTINOUS;
258 + gdma_dma_write(dma_dev, GDMA_REG_CTRL0(chan->id), ctrl0);
259 + gdma_dma_write(dma_dev, GDMA_REG_CTRL1(chan->id), ctrl1);
264 +static int gdma_dma_terminate_all(struct dma_chan *c)
266 + struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
267 + struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
268 + unsigned long flags;
271 + spin_lock_irqsave(&chan->vchan.lock, flags);
272 + gdma_dma_write_mask(dma_dev, GDMA_REG_CTRL0(chan->id), 0,
273 + GDMA_REG_CTRL0_ENABLE);
275 + vchan_get_all_descriptors(&chan->vchan, &head);
276 + spin_unlock_irqrestore(&chan->vchan.lock, flags);
278 + vchan_dma_desc_free_list(&chan->vchan, &head);
283 +static int gdma_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
286 + struct dma_slave_config *config = (struct dma_slave_config *)arg;
289 + case DMA_SLAVE_CONFIG:
290 + return gdma_dma_slave_config(chan, config);
291 + case DMA_TERMINATE_ALL:
292 + return gdma_dma_terminate_all(chan);
298 +static int gdma_dma_start_transfer(struct gdma_dmaengine_chan *chan)
300 + struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
301 + dma_addr_t src_addr, dst_addr;
302 + struct virt_dma_desc *vdesc;
303 + struct gdma_dma_sg *sg;
305 + gdma_dma_write_mask(dma_dev, GDMA_REG_CTRL0(chan->id), 0,
306 + GDMA_REG_CTRL0_ENABLE);
309 + vdesc = vchan_next_desc(&chan->vchan);
312 + chan->desc = to_gdma_dma_desc(vdesc);
316 + if (chan->next_sg == chan->desc->num_sgs)
319 + sg = &chan->desc->sg[chan->next_sg];
321 + if (chan->desc->direction == DMA_MEM_TO_DEV) {
322 + src_addr = sg->addr;
323 + dst_addr = chan->fifo_addr;
325 + src_addr = chan->fifo_addr;
326 + dst_addr = sg->addr;
328 + gdma_dma_write(dma_dev, GDMA_REG_SRC_ADDR(chan->id), src_addr);
329 + gdma_dma_write(dma_dev, GDMA_REG_DST_ADDR(chan->id), dst_addr);
330 + gdma_dma_write_mask(dma_dev, GDMA_REG_CTRL0(chan->id),
331 + (sg->len << GDMA_REG_CTRL0_TX_SHIFT) | GDMA_REG_CTRL0_ENABLE,
332 + GDMA_REG_CTRL0_TX_MASK << GDMA_REG_CTRL0_TX_SHIFT);
334 + gdma_dma_write_mask(dma_dev, GDMA_REG_CTRL1(chan->id), 0, GDMA_REG_CTRL1_MASK);
339 +static void gdma_dma_chan_irq(struct gdma_dmaengine_chan *chan)
341 + spin_lock(&chan->vchan.lock);
343 + if (chan->desc && chan->desc->cyclic) {
344 + vchan_cyclic_callback(&chan->desc->vdesc);
346 + if (chan->next_sg == chan->desc->num_sgs) {
348 + vchan_cookie_complete(&chan->desc->vdesc);
352 + gdma_dma_start_transfer(chan);
353 + spin_unlock(&chan->vchan.lock);
356 +static irqreturn_t gdma_dma_irq(int irq, void *devid)
358 + struct gdma_dma_dev *dma_dev = devid;
359 + uint32_t unmask, done;
362 + unmask = gdma_dma_read(dma_dev, GDMA_REG_UNMASK_INT);
363 + gdma_dma_write(dma_dev, GDMA_REG_UNMASK_INT, unmask);
364 + done = gdma_dma_read(dma_dev, GDMA_REG_DONE_INT);
366 + for (i = 0; i < GDMA_NR_CHANS; ++i)
368 + gdma_dma_chan_irq(&dma_dev->chan[i]);
369 + gdma_dma_write(dma_dev, GDMA_REG_DONE_INT, done);
371 + return IRQ_HANDLED;
374 +static void gdma_dma_issue_pending(struct dma_chan *c)
376 + struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
377 + unsigned long flags;
379 + spin_lock_irqsave(&chan->vchan.lock, flags);
380 + if (vchan_issue_pending(&chan->vchan) && !chan->desc)
381 + gdma_dma_start_transfer(chan);
382 + spin_unlock_irqrestore(&chan->vchan.lock, flags);
385 +static struct dma_async_tx_descriptor *gdma_dma_prep_slave_sg(
386 + struct dma_chan *c, struct scatterlist *sgl,
387 + unsigned int sg_len, enum dma_transfer_direction direction,
388 + unsigned long flags, void *context)
390 + struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
391 + struct gdma_dma_desc *desc;
392 + struct scatterlist *sg;
395 + desc = gdma_dma_alloc_desc(sg_len);
399 + for_each_sg(sgl, sg, sg_len, i) {
400 + desc->sg[i].addr = sg_dma_address(sg);
401 + desc->sg[i].len = sg_dma_len(sg);
404 + desc->num_sgs = sg_len;
405 + desc->direction = direction;
406 + desc->cyclic = false;
408 + return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
411 +static struct dma_async_tx_descriptor *gdma_dma_prep_dma_cyclic(
412 + struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
413 + size_t period_len, enum dma_transfer_direction direction,
414 + unsigned long flags, void *context)
416 + struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
417 + struct gdma_dma_desc *desc;
418 + unsigned int num_periods, i;
420 + if (buf_len % period_len)
423 + num_periods = buf_len / period_len;
425 + desc = gdma_dma_alloc_desc(num_periods);
429 + for (i = 0; i < num_periods; i++) {
430 + desc->sg[i].addr = buf_addr;
431 + desc->sg[i].len = period_len;
432 + buf_addr += period_len;
435 + desc->num_sgs = num_periods;
436 + desc->direction = direction;
437 + desc->cyclic = true;
439 + return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
442 +static size_t gdma_dma_desc_residue(struct gdma_dmaengine_chan *chan,
443 + struct gdma_dma_desc *desc, unsigned int next_sg)
445 + struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
446 + unsigned int residue, count;
451 + for (i = next_sg; i < desc->num_sgs; i++)
452 + residue += desc->sg[i].len;
454 + if (next_sg != 0) {
455 + count = gdma_dma_read(dma_dev, GDMA_REG_CTRL0(chan->id));
456 + count >>= GDMA_REG_CTRL0_CURR_SHIFT;
457 + count &= GDMA_REG_CTRL0_CURR_MASK;
458 + residue += count << chan->transfer_shift;
464 +static enum dma_status gdma_dma_tx_status(struct dma_chan *c,
465 + dma_cookie_t cookie, struct dma_tx_state *state)
467 + struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
468 + struct virt_dma_desc *vdesc;
469 + enum dma_status status;
470 + unsigned long flags;
472 + status = dma_cookie_status(c, cookie, state);
473 + if (status == DMA_SUCCESS || !state)
476 + spin_lock_irqsave(&chan->vchan.lock, flags);
477 + vdesc = vchan_find_desc(&chan->vchan, cookie);
478 + if (cookie == chan->desc->vdesc.tx.cookie) {
479 + state->residue = gdma_dma_desc_residue(chan, chan->desc,
481 + } else if (vdesc) {
482 + state->residue = gdma_dma_desc_residue(chan,
483 + to_gdma_dma_desc(vdesc), 0);
485 + state->residue = 0;
487 + spin_unlock_irqrestore(&chan->vchan.lock, flags);
492 +static int gdma_dma_alloc_chan_resources(struct dma_chan *c)
497 +static void gdma_dma_free_chan_resources(struct dma_chan *c)
499 + vchan_free_chan_resources(to_virt_chan(c));
502 +static void gdma_dma_desc_free(struct virt_dma_desc *vdesc)
504 + kfree(container_of(vdesc, struct gdma_dma_desc, vdesc));
507 +static struct dma_chan *
508 +of_dma_xlate_by_chan_id(struct of_phandle_args *dma_spec,
509 + struct of_dma *ofdma)
511 + struct gdma_dma_dev *dma_dev = ofdma->of_dma_data;
512 + unsigned int request = dma_spec->args[0];
514 + if (request >= GDMA_NR_CHANS)
517 + return dma_get_slave_channel(&(dma_dev->chan[request].vchan.chan));
520 +static int gdma_dma_probe(struct platform_device *pdev)
522 + struct gdma_dmaengine_chan *chan;
523 + struct gdma_dma_dev *dma_dev;
524 + struct dma_device *dd;
526 + struct resource *res;
532 + dma_dev = devm_kzalloc(&pdev->dev, sizeof(*dma_dev), GFP_KERNEL);
536 + dd = &dma_dev->ddev;
538 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
539 + dma_dev->base = devm_ioremap_resource(&pdev->dev, res);
540 + if (IS_ERR(dma_dev->base))
541 + return PTR_ERR(dma_dev->base);
543 + dma_cap_set(DMA_SLAVE, dd->cap_mask);
544 + dma_cap_set(DMA_CYCLIC, dd->cap_mask);
545 + dd->device_alloc_chan_resources = gdma_dma_alloc_chan_resources;
546 + dd->device_free_chan_resources = gdma_dma_free_chan_resources;
547 + dd->device_tx_status = gdma_dma_tx_status;
548 + dd->device_issue_pending = gdma_dma_issue_pending;
549 + dd->device_prep_slave_sg = gdma_dma_prep_slave_sg;
550 + dd->device_prep_dma_cyclic = gdma_dma_prep_dma_cyclic;
551 + dd->device_control = gdma_dma_control;
552 + dd->dev = &pdev->dev;
553 + dd->chancnt = GDMA_NR_CHANS;
554 + INIT_LIST_HEAD(&dd->channels);
556 + for (i = 0; i < dd->chancnt; i++) {
557 + chan = &dma_dev->chan[i];
559 + chan->vchan.desc_free = gdma_dma_desc_free;
560 + vchan_init(&chan->vchan, dd);
563 + ret = dma_async_device_register(dd);
567 + ret = of_dma_controller_register(pdev->dev.of_node,
568 + of_dma_xlate_by_chan_id, dma_dev);
570 + goto err_unregister;
572 + irq = platform_get_irq(pdev, 0);
573 + ret = request_irq(irq, gdma_dma_irq, 0, dev_name(&pdev->dev), dma_dev);
575 + goto err_unregister;
577 + gdma_dma_write(dma_dev, GDMA_REG_UNMASK_INT, 0);
578 + gdma_dma_write(dma_dev, GDMA_REG_DONE_INT, BIT(dd->chancnt) - 1);
580 + gct = gdma_dma_read(dma_dev, GDMA_REG_GCT);
581 + dev_info(&pdev->dev, "revision: %d, channels: %d\n",
582 + (gct >> GDMA_REG_GCT_VER_SHIFT) & GDMA_REG_GCT_VER_MASK,
583 + 8 << ((gct >> GDMA_REG_GCT_CHAN_SHIFT) & GDMA_REG_GCT_CHAN_MASK));
584 + platform_set_drvdata(pdev, dma_dev);
586 + gdma_dma_write(dma_dev, GDMA_REG_GCT, GDMA_REG_GCT_ARBIT_RR);
591 + dma_async_device_unregister(dd);
595 +static int gdma_dma_remove(struct platform_device *pdev)
597 + struct gdma_dma_dev *dma_dev = platform_get_drvdata(pdev);
598 + int irq = platform_get_irq(pdev, 0);
600 + free_irq(irq, dma_dev);
601 + of_dma_controller_free(pdev->dev.of_node);
602 + dma_async_device_unregister(&dma_dev->ddev);
607 +static const struct of_device_id gdma_of_match_table[] = {
608 + { .compatible = "ralink,rt2880-gdma" },
612 +static struct platform_driver gdma_dma_driver = {
613 + .probe = gdma_dma_probe,
614 + .remove = gdma_dma_remove,
616 + .name = "gdma-rt2880",
617 + .owner = THIS_MODULE,
618 + .of_match_table = gdma_of_match_table,
621 +module_platform_driver(gdma_dma_driver);
623 +MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
624 +MODULE_DESCRIPTION("GDMA4740 DMA driver");
625 +MODULE_LICENSE("GPLv2");
626 diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
627 index 7ea9184..d371bf1 100644
628 --- a/include/linux/dmaengine.h
629 +++ b/include/linux/dmaengine.h
630 @@ -496,6 +496,7 @@ static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
631 struct dmaengine_unmap_data *
632 dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags);
633 void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap);
634 +struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
636 static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
637 struct dmaengine_unmap_data *unmap)