1 From f1c4d9e622c800e1f38b3818f933ec7597d1ccfb Mon Sep 17 00:00:00 2001
2 From: John Crispin <blogic@openwrt.org>
3 Date: Sun, 27 Jul 2014 09:29:51 +0100
4 Subject: [PATCH 47/53] DMA: ralink: add rt2880 dma engine
6 Signed-off-by: John Crispin <blogic@openwrt.org>
8 drivers/dma/Kconfig | 6 +
9 drivers/dma/Makefile | 1 +
10 drivers/dma/ralink-gdma.c | 577 +++++++++++++++++++++++++++++++++++++++++++++
11 include/linux/dmaengine.h | 1 +
12 4 files changed, 585 insertions(+)
13 create mode 100644 drivers/dma/ralink-gdma.c
15 --- a/drivers/dma/Kconfig
16 +++ b/drivers/dma/Kconfig
17 @@ -40,6 +40,18 @@ config ASYNC_TX_ENABLE_CHANNEL_SWITCH
18 config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
22 + tristate "RALINK DMA support"
23 + depends on RALINK && !SOC_RT288X
25 + select DMA_VIRTUAL_CHANNELS
28 + tristate "MTK HSDMA support"
29 + depends on RALINK && SOC_MT7621
31 + select DMA_VIRTUAL_CHANNELS
36 --- a/drivers/dma/Makefile
37 +++ b/drivers/dma/Makefile
38 @@ -72,6 +72,8 @@ obj-$(CONFIG_TIMB_DMA) += timb_dma.o
39 obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
40 obj-$(CONFIG_ZX_DMA) += zx_dma.o
41 obj-$(CONFIG_ST_FDMA) += st_fdma.o
42 +obj-$(CONFIG_DMA_RALINK) += ralink-gdma.o
43 +obj-$(CONFIG_MTK_HSDMA) += mtk-hsdma.o
48 +++ b/drivers/dma/ralink-gdma.c
51 + * Copyright (C) 2013, Lars-Peter Clausen <lars@metafoo.de>
52 + * GDMA4740 DMAC support
54 + * This program is free software; you can redistribute it and/or modify it
55 + * under the terms of the GNU General Public License as published by the
56 + * Free Software Foundation; either version 2 of the License, or (at your
57 + * option) any later version.
61 +#include <linux/dmaengine.h>
62 +#include <linux/dma-mapping.h>
63 +#include <linux/err.h>
64 +#include <linux/init.h>
65 +#include <linux/list.h>
66 +#include <linux/module.h>
67 +#include <linux/platform_device.h>
68 +#include <linux/slab.h>
69 +#include <linux/spinlock.h>
70 +#include <linux/irq.h>
71 +#include <linux/of_dma.h>
72 +#include <linux/reset.h>
73 +#include <linux/of_device.h>
75 +#include "virt-dma.h"
77 +#define GDMA_REG_SRC_ADDR(x) (0x00 + (x) * 0x10)
78 +#define GDMA_REG_DST_ADDR(x) (0x04 + (x) * 0x10)
80 +#define GDMA_REG_CTRL0(x) (0x08 + (x) * 0x10)
81 +#define GDMA_REG_CTRL0_TX_MASK 0xffff
82 +#define GDMA_REG_CTRL0_TX_SHIFT 16
83 +#define GDMA_REG_CTRL0_CURR_MASK 0xff
84 +#define GDMA_REG_CTRL0_CURR_SHIFT 8
85 +#define GDMA_REG_CTRL0_SRC_ADDR_FIXED BIT(7)
86 +#define GDMA_REG_CTRL0_DST_ADDR_FIXED BIT(6)
87 +#define GDMA_REG_CTRL0_BURST_MASK 0x7
88 +#define GDMA_REG_CTRL0_BURST_SHIFT 3
89 +#define GDMA_REG_CTRL0_DONE_INT BIT(2)
90 +#define GDMA_REG_CTRL0_ENABLE BIT(1)
91 +#define GDMA_REG_CTRL0_SW_MODE BIT(0)
93 +#define GDMA_REG_CTRL1(x) (0x0c + (x) * 0x10)
94 +#define GDMA_REG_CTRL1_SEG_MASK 0xf
95 +#define GDMA_REG_CTRL1_SEG_SHIFT 22
96 +#define GDMA_REG_CTRL1_REQ_MASK 0x3f
97 +#define GDMA_REG_CTRL1_SRC_REQ_SHIFT 16
98 +#define GDMA_REG_CTRL1_DST_REQ_SHIFT 8
99 +#define GDMA_REG_CTRL1_CONTINOUS BIT(14)
100 +#define GDMA_REG_CTRL1_NEXT_MASK 0x1f
101 +#define GDMA_REG_CTRL1_NEXT_SHIFT 3
102 +#define GDMA_REG_CTRL1_COHERENT BIT(2)
103 +#define GDMA_REG_CTRL1_FAIL BIT(1)
104 +#define GDMA_REG_CTRL1_MASK BIT(0)
106 +#define GDMA_REG_UNMASK_INT 0x200
107 +#define GDMA_REG_DONE_INT 0x204
109 +#define GDMA_REG_GCT 0x220
110 +#define GDMA_REG_GCT_CHAN_MASK 0x3
111 +#define GDMA_REG_GCT_CHAN_SHIFT 3
112 +#define GDMA_REG_GCT_VER_MASK 0x3
113 +#define GDMA_REG_GCT_VER_SHIFT 1
114 +#define GDMA_REG_GCT_ARBIT_RR BIT(0)
116 +#define GDMA_REG_REQSTS 0x2a0
117 +#define GDMA_REG_ACKSTS 0x2a4
118 +#define GDMA_REG_FINSTS 0x2a8
120 +/* for RT305X gdma registers */
121 +#define GDMA_RT305X_CTRL0_REQ_MASK 0xf
122 +#define GDMA_RT305X_CTRL0_SRC_REQ_SHIFT 12
123 +#define GDMA_RT305X_CTRL0_DST_REQ_SHIFT 8
125 +#define GDMA_RT305X_CTRL1_FAIL BIT(4)
126 +#define GDMA_RT305X_CTRL1_NEXT_MASK 0x7
127 +#define GDMA_RT305X_CTRL1_NEXT_SHIFT 1
129 +#define GDMA_RT305X_STATUS_INT 0x80
130 +#define GDMA_RT305X_STATUS_SIGNAL 0x84
131 +#define GDMA_RT305X_GCT 0x88
133 +/* for MT7621 gdma registers */
134 +#define GDMA_REG_PERF_START(x) (0x230 + (x) * 0x8)
135 +#define GDMA_REG_PERF_END(x) (0x234 + (x) * 0x8)
137 +enum gdma_dma_transfer_size {
138 + GDMA_TRANSFER_SIZE_4BYTE = 0,
139 + GDMA_TRANSFER_SIZE_8BYTE = 1,
140 + GDMA_TRANSFER_SIZE_16BYTE = 2,
141 + GDMA_TRANSFER_SIZE_32BYTE = 3,
142 + GDMA_TRANSFER_SIZE_64BYTE = 4,
145 +struct gdma_dma_sg {
146 + dma_addr_t src_addr;
147 + dma_addr_t dst_addr;
151 +struct gdma_dma_desc {
152 + struct virt_dma_desc vdesc;
154 + enum dma_transfer_direction direction;
158 + unsigned int num_sgs;
159 + struct gdma_dma_sg sg[];
162 +struct gdma_dmaengine_chan {
163 + struct virt_dma_chan vchan;
165 + unsigned int slave_id;
167 + dma_addr_t fifo_addr;
168 + enum gdma_dma_transfer_size burst_size;
170 + struct gdma_dma_desc *desc;
171 + unsigned int next_sg;
174 +struct gdma_dma_dev {
175 + struct dma_device ddev;
176 + struct device_dma_parameters dma_parms;
177 + struct gdma_data *data;
178 + void __iomem *base;
179 + struct tasklet_struct task;
180 + volatile unsigned long chan_issued;
183 + struct gdma_dmaengine_chan chan[];
190 + void (*init)(struct gdma_dma_dev *dma_dev);
191 + int (*start_transfer)(struct gdma_dmaengine_chan *chan);
194 +static struct gdma_dma_dev *gdma_dma_chan_get_dev(
195 + struct gdma_dmaengine_chan *chan)
197 + return container_of(chan->vchan.chan.device, struct gdma_dma_dev,
201 +static struct gdma_dmaengine_chan *to_gdma_dma_chan(struct dma_chan *c)
203 + return container_of(c, struct gdma_dmaengine_chan, vchan.chan);
206 +static struct gdma_dma_desc *to_gdma_dma_desc(struct virt_dma_desc *vdesc)
208 + return container_of(vdesc, struct gdma_dma_desc, vdesc);
211 +static inline uint32_t gdma_dma_read(struct gdma_dma_dev *dma_dev,
214 + return readl(dma_dev->base + reg);
217 +static inline void gdma_dma_write(struct gdma_dma_dev *dma_dev,
218 + unsigned reg, uint32_t val)
220 + writel(val, dma_dev->base + reg);
223 +static struct gdma_dma_desc *gdma_dma_alloc_desc(unsigned int num_sgs)
225 + return kzalloc(sizeof(struct gdma_dma_desc) +
226 + sizeof(struct gdma_dma_sg) * num_sgs, GFP_ATOMIC);
229 +static enum gdma_dma_transfer_size gdma_dma_maxburst(u32 maxburst)
232 + return GDMA_TRANSFER_SIZE_4BYTE;
233 + else if (maxburst < 4)
234 + return GDMA_TRANSFER_SIZE_8BYTE;
235 + else if (maxburst < 8)
236 + return GDMA_TRANSFER_SIZE_16BYTE;
237 + else if (maxburst < 16)
238 + return GDMA_TRANSFER_SIZE_32BYTE;
240 + return GDMA_TRANSFER_SIZE_64BYTE;
243 +static int gdma_dma_config(struct dma_chan *c,
244 + struct dma_slave_config *config)
246 + struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
247 + struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
249 + if (config->device_fc) {
250 + dev_err(dma_dev->ddev.dev, "not support flow controller\n");
254 + switch (config->direction) {
255 + case DMA_MEM_TO_DEV:
256 + if (config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) {
257 + dev_err(dma_dev->ddev.dev, "only support 4 byte buswidth\n");
260 + chan->slave_id = config->slave_id;
261 + chan->fifo_addr = config->dst_addr;
262 + chan->burst_size = gdma_dma_maxburst(config->dst_maxburst);
264 + case DMA_DEV_TO_MEM:
265 + if (config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) {
266 + dev_err(dma_dev->ddev.dev, "only support 4 byte buswidth\n");
269 + chan->slave_id = config->slave_id;
270 + chan->fifo_addr = config->src_addr;
271 + chan->burst_size = gdma_dma_maxburst(config->src_maxburst);
274 + dev_err(dma_dev->ddev.dev, "direction type %d error\n",
275 + config->direction);
282 +static int gdma_dma_terminate_all(struct dma_chan *c)
284 + struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
285 + struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
286 + unsigned long flags, timeout;
290 + spin_lock_irqsave(&chan->vchan.lock, flags);
292 + clear_bit(chan->id, &dma_dev->chan_issued);
293 + vchan_get_all_descriptors(&chan->vchan, &head);
294 + spin_unlock_irqrestore(&chan->vchan.lock, flags);
296 + vchan_dma_desc_free_list(&chan->vchan, &head);
298 + /* wait dma transfer complete */
299 + timeout = jiffies + msecs_to_jiffies(5000);
300 + while (gdma_dma_read(dma_dev, GDMA_REG_CTRL0(chan->id)) &
301 + GDMA_REG_CTRL0_ENABLE) {
302 + if (time_after_eq(jiffies, timeout)) {
303 + dev_err(dma_dev->ddev.dev, "chan %d wait timeout\n",
305 + /* restore to init value */
306 + gdma_dma_write(dma_dev, GDMA_REG_CTRL0(chan->id), 0);
314 + dev_dbg(dma_dev->ddev.dev, "terminate chan %d loops %d\n",
320 +static void rt305x_dump_reg(struct gdma_dma_dev *dma_dev, int id)
322 + dev_dbg(dma_dev->ddev.dev, "chan %d, src %08x, dst %08x, ctr0 %08x, " \
323 + "ctr1 %08x, intr %08x, signal %08x\n", id,
324 + gdma_dma_read(dma_dev, GDMA_REG_SRC_ADDR(id)),
325 + gdma_dma_read(dma_dev, GDMA_REG_DST_ADDR(id)),
326 + gdma_dma_read(dma_dev, GDMA_REG_CTRL0(id)),
327 + gdma_dma_read(dma_dev, GDMA_REG_CTRL1(id)),
328 + gdma_dma_read(dma_dev, GDMA_RT305X_STATUS_INT),
329 + gdma_dma_read(dma_dev, GDMA_RT305X_STATUS_SIGNAL));
332 +static int rt305x_gdma_start_transfer(struct gdma_dmaengine_chan *chan)
334 + struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
335 + dma_addr_t src_addr, dst_addr;
336 + struct gdma_dma_sg *sg;
337 + uint32_t ctrl0, ctrl1;
339 + /* verify chan is already stopped */
340 + ctrl0 = gdma_dma_read(dma_dev, GDMA_REG_CTRL0(chan->id));
341 + if (unlikely(ctrl0 & GDMA_REG_CTRL0_ENABLE)) {
342 + dev_err(dma_dev->ddev.dev, "chan %d is start(%08x).\n",
344 + rt305x_dump_reg(dma_dev, chan->id);
348 + sg = &chan->desc->sg[chan->next_sg];
349 + if (chan->desc->direction == DMA_MEM_TO_DEV) {
350 + src_addr = sg->src_addr;
351 + dst_addr = chan->fifo_addr;
352 + ctrl0 = GDMA_REG_CTRL0_DST_ADDR_FIXED | \
353 + (8 << GDMA_RT305X_CTRL0_SRC_REQ_SHIFT) | \
354 + (chan->slave_id << GDMA_RT305X_CTRL0_DST_REQ_SHIFT);
355 + } else if (chan->desc->direction == DMA_DEV_TO_MEM) {
356 + src_addr = chan->fifo_addr;
357 + dst_addr = sg->dst_addr;
358 + ctrl0 = GDMA_REG_CTRL0_SRC_ADDR_FIXED | \
359 + (chan->slave_id << GDMA_RT305X_CTRL0_SRC_REQ_SHIFT) | \
360 + (8 << GDMA_RT305X_CTRL0_DST_REQ_SHIFT);
361 + } else if (chan->desc->direction == DMA_MEM_TO_MEM) {
363 + * TODO: memcpy function have bugs. sometime it will copy
364 + * more 8 bytes data when using dmatest verify.
366 + src_addr = sg->src_addr;
367 + dst_addr = sg->dst_addr;
368 + ctrl0 = GDMA_REG_CTRL0_SW_MODE | \
369 + (8 << GDMA_REG_CTRL1_SRC_REQ_SHIFT) | \
370 + (8 << GDMA_REG_CTRL1_DST_REQ_SHIFT);
372 + dev_err(dma_dev->ddev.dev, "direction type %d error\n",
373 + chan->desc->direction);
377 + ctrl0 |= (sg->len << GDMA_REG_CTRL0_TX_SHIFT) | \
378 + (chan->burst_size << GDMA_REG_CTRL0_BURST_SHIFT) | \
379 + GDMA_REG_CTRL0_DONE_INT | GDMA_REG_CTRL0_ENABLE;
380 + ctrl1 = chan->id << GDMA_REG_CTRL1_NEXT_SHIFT;
383 + gdma_dma_write(dma_dev, GDMA_REG_SRC_ADDR(chan->id), src_addr);
384 + gdma_dma_write(dma_dev, GDMA_REG_DST_ADDR(chan->id), dst_addr);
385 + gdma_dma_write(dma_dev, GDMA_REG_CTRL1(chan->id), ctrl1);
387 + /* make sure next_sg is update */
389 + gdma_dma_write(dma_dev, GDMA_REG_CTRL0(chan->id), ctrl0);
394 +static void rt3883_dump_reg(struct gdma_dma_dev *dma_dev, int id)
396 + dev_dbg(dma_dev->ddev.dev, "chan %d, src %08x, dst %08x, ctr0 %08x, " \
397 + "ctr1 %08x, unmask %08x, done %08x, " \
398 + "req %08x, ack %08x, fin %08x\n", id,
399 + gdma_dma_read(dma_dev, GDMA_REG_SRC_ADDR(id)),
400 + gdma_dma_read(dma_dev, GDMA_REG_DST_ADDR(id)),
401 + gdma_dma_read(dma_dev, GDMA_REG_CTRL0(id)),
402 + gdma_dma_read(dma_dev, GDMA_REG_CTRL1(id)),
403 + gdma_dma_read(dma_dev, GDMA_REG_UNMASK_INT),
404 + gdma_dma_read(dma_dev, GDMA_REG_DONE_INT),
405 + gdma_dma_read(dma_dev, GDMA_REG_REQSTS),
406 + gdma_dma_read(dma_dev, GDMA_REG_ACKSTS),
407 + gdma_dma_read(dma_dev, GDMA_REG_FINSTS));
410 +static int rt3883_gdma_start_transfer(struct gdma_dmaengine_chan *chan)
412 + struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
413 + dma_addr_t src_addr, dst_addr;
414 + struct gdma_dma_sg *sg;
415 + uint32_t ctrl0, ctrl1;
417 + /* verify chan is already stopped */
418 + ctrl0 = gdma_dma_read(dma_dev, GDMA_REG_CTRL0(chan->id));
419 + if (unlikely(ctrl0 & GDMA_REG_CTRL0_ENABLE)) {
420 + dev_err(dma_dev->ddev.dev, "chan %d is start(%08x).\n",
422 + rt3883_dump_reg(dma_dev, chan->id);
426 + sg = &chan->desc->sg[chan->next_sg];
427 + if (chan->desc->direction == DMA_MEM_TO_DEV) {
428 + src_addr = sg->src_addr;
429 + dst_addr = chan->fifo_addr;
430 + ctrl0 = GDMA_REG_CTRL0_DST_ADDR_FIXED;
431 + ctrl1 = (32 << GDMA_REG_CTRL1_SRC_REQ_SHIFT) | \
432 + (chan->slave_id << GDMA_REG_CTRL1_DST_REQ_SHIFT);
433 + } else if (chan->desc->direction == DMA_DEV_TO_MEM) {
434 + src_addr = chan->fifo_addr;
435 + dst_addr = sg->dst_addr;
436 + ctrl0 = GDMA_REG_CTRL0_SRC_ADDR_FIXED;
437 + ctrl1 = (chan->slave_id << GDMA_REG_CTRL1_SRC_REQ_SHIFT) | \
438 + (32 << GDMA_REG_CTRL1_DST_REQ_SHIFT) | \
439 + GDMA_REG_CTRL1_COHERENT;
440 + } else if (chan->desc->direction == DMA_MEM_TO_MEM) {
441 + src_addr = sg->src_addr;
442 + dst_addr = sg->dst_addr;
443 + ctrl0 = GDMA_REG_CTRL0_SW_MODE;
444 + ctrl1 = (32 << GDMA_REG_CTRL1_SRC_REQ_SHIFT) | \
445 + (32 << GDMA_REG_CTRL1_DST_REQ_SHIFT) | \
446 + GDMA_REG_CTRL1_COHERENT;
448 + dev_err(dma_dev->ddev.dev, "direction type %d error\n",
449 + chan->desc->direction);
453 + ctrl0 |= (sg->len << GDMA_REG_CTRL0_TX_SHIFT) | \
454 + (chan->burst_size << GDMA_REG_CTRL0_BURST_SHIFT) | \
455 + GDMA_REG_CTRL0_DONE_INT | GDMA_REG_CTRL0_ENABLE;
456 + ctrl1 |= chan->id << GDMA_REG_CTRL1_NEXT_SHIFT;
459 + gdma_dma_write(dma_dev, GDMA_REG_SRC_ADDR(chan->id), src_addr);
460 + gdma_dma_write(dma_dev, GDMA_REG_DST_ADDR(chan->id), dst_addr);
461 + gdma_dma_write(dma_dev, GDMA_REG_CTRL1(chan->id), ctrl1);
463 + /* make sure next_sg is update */
465 + gdma_dma_write(dma_dev, GDMA_REG_CTRL0(chan->id), ctrl0);
470 +static inline int gdma_start_transfer(struct gdma_dma_dev *dma_dev,
471 + struct gdma_dmaengine_chan *chan)
473 + return dma_dev->data->start_transfer(chan);
476 +static int gdma_next_desc(struct gdma_dmaengine_chan *chan)
478 + struct virt_dma_desc *vdesc;
480 + vdesc = vchan_next_desc(&chan->vchan);
485 + chan->desc = to_gdma_dma_desc(vdesc);
491 +static void gdma_dma_chan_irq(struct gdma_dma_dev *dma_dev,
492 + struct gdma_dmaengine_chan *chan)
494 + struct gdma_dma_desc *desc;
495 + unsigned long flags;
499 + spin_lock_irqsave(&chan->vchan.lock, flags);
502 + if (desc->cyclic) {
503 + vchan_cyclic_callback(&desc->vdesc);
504 + if (chan->next_sg == desc->num_sgs)
508 + desc->residue -= desc->sg[chan->next_sg - 1].len;
509 + if (chan->next_sg == desc->num_sgs) {
510 + list_del(&desc->vdesc.node);
511 + vchan_cookie_complete(&desc->vdesc);
512 + chan_issued = gdma_next_desc(chan);
517 + dev_dbg(dma_dev->ddev.dev, "chan %d no desc to complete\n",
520 + set_bit(chan->id, &dma_dev->chan_issued);
521 + spin_unlock_irqrestore(&chan->vchan.lock, flags);
524 +static irqreturn_t gdma_dma_irq(int irq, void *devid)
526 + struct gdma_dma_dev *dma_dev = devid;
527 + u32 done, done_reg;
530 + done_reg = dma_dev->data->done_int_reg;
531 + done = gdma_dma_read(dma_dev, done_reg);
532 + if (unlikely(!done))
535 + /* clean done bits */
536 + gdma_dma_write(dma_dev, done_reg, done);
541 + gdma_dma_chan_irq(dma_dev, &dma_dev->chan[i]);
542 + atomic_dec(&dma_dev->cnt);
548 + /* start only have work to do */
549 + if (dma_dev->chan_issued)
550 + tasklet_schedule(&dma_dev->task);
552 + return IRQ_HANDLED;
555 +static void gdma_dma_issue_pending(struct dma_chan *c)
557 + struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
558 + struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
559 + unsigned long flags;
561 + spin_lock_irqsave(&chan->vchan.lock, flags);
562 + if (vchan_issue_pending(&chan->vchan) && !chan->desc) {
563 + if (gdma_next_desc(chan)) {
564 + set_bit(chan->id, &dma_dev->chan_issued);
565 + tasklet_schedule(&dma_dev->task);
567 + dev_dbg(dma_dev->ddev.dev, "chan %d no desc to issue\n",
570 + spin_unlock_irqrestore(&chan->vchan.lock, flags);
573 +static struct dma_async_tx_descriptor *gdma_dma_prep_slave_sg(
574 + struct dma_chan *c, struct scatterlist *sgl,
575 + unsigned int sg_len, enum dma_transfer_direction direction,
576 + unsigned long flags, void *context)
578 + struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
579 + struct gdma_dma_desc *desc;
580 + struct scatterlist *sg;
583 + desc = gdma_dma_alloc_desc(sg_len);
585 + dev_err(c->device->dev, "alloc sg decs error\n");
590 + for_each_sg(sgl, sg, sg_len, i) {
591 + if (direction == DMA_MEM_TO_DEV)
592 + desc->sg[i].src_addr = sg_dma_address(sg);
593 + else if (direction == DMA_DEV_TO_MEM)
594 + desc->sg[i].dst_addr = sg_dma_address(sg);
596 + dev_err(c->device->dev, "direction type %d error\n",
601 + if (unlikely(sg_dma_len(sg) > GDMA_REG_CTRL0_TX_MASK)) {
602 + dev_err(c->device->dev, "sg len too large %d\n",
606 + desc->sg[i].len = sg_dma_len(sg);
607 + desc->residue += sg_dma_len(sg);
610 + desc->num_sgs = sg_len;
611 + desc->direction = direction;
612 + desc->cyclic = false;
614 + return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
621 +static struct dma_async_tx_descriptor * gdma_dma_prep_dma_memcpy(
622 + struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
623 + size_t len, unsigned long flags)
625 + struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
626 + struct gdma_dma_desc *desc;
627 + unsigned int num_periods, i;
633 + chan->burst_size = gdma_dma_maxburst(len >> 2);
635 + xfer_count = GDMA_REG_CTRL0_TX_MASK;
636 + num_periods = DIV_ROUND_UP(len, xfer_count);
638 + desc = gdma_dma_alloc_desc(num_periods);
640 + dev_err(c->device->dev, "alloc memcpy decs error\n");
643 + desc->residue = len;
645 + for (i = 0; i < num_periods; i++) {
646 + desc->sg[i].src_addr = src;
647 + desc->sg[i].dst_addr = dest;
648 + if (len > xfer_count) {
649 + desc->sg[i].len = xfer_count;
651 + desc->sg[i].len = len;
653 + src += desc->sg[i].len;
654 + dest += desc->sg[i].len;
655 + len -= desc->sg[i].len;
658 + desc->num_sgs = num_periods;
659 + desc->direction = DMA_MEM_TO_MEM;
660 + desc->cyclic = false;
662 + return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
665 +static struct dma_async_tx_descriptor *gdma_dma_prep_dma_cyclic(
666 + struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
667 + size_t period_len, enum dma_transfer_direction direction,
668 + unsigned long flags)
670 + struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
671 + struct gdma_dma_desc *desc;
672 + unsigned int num_periods, i;
674 + if (buf_len % period_len)
677 + if (period_len > GDMA_REG_CTRL0_TX_MASK) {
678 + dev_err(c->device->dev, "cyclic len too large %d\n",
683 + num_periods = buf_len / period_len;
684 + desc = gdma_dma_alloc_desc(num_periods);
686 + dev_err(c->device->dev, "alloc cyclic decs error\n");
689 + desc->residue = buf_len;
691 + for (i = 0; i < num_periods; i++) {
692 + if (direction == DMA_MEM_TO_DEV)
693 + desc->sg[i].src_addr = buf_addr;
694 + else if (direction == DMA_DEV_TO_MEM)
695 + desc->sg[i].dst_addr = buf_addr;
697 + dev_err(c->device->dev, "direction type %d error\n",
701 + desc->sg[i].len = period_len;
702 + buf_addr += period_len;
705 + desc->num_sgs = num_periods;
706 + desc->direction = direction;
707 + desc->cyclic = true;
709 + return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
716 +static enum dma_status gdma_dma_tx_status(struct dma_chan *c,
717 + dma_cookie_t cookie, struct dma_tx_state *state)
719 + struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
720 + struct virt_dma_desc *vdesc;
721 + enum dma_status status;
722 + unsigned long flags;
723 + struct gdma_dma_desc *desc;
725 + status = dma_cookie_status(c, cookie, state);
726 + if (status == DMA_COMPLETE || !state)
729 + spin_lock_irqsave(&chan->vchan.lock, flags);
731 + if (desc && (cookie == desc->vdesc.tx.cookie)) {
733 + * We never update edesc->residue in the cyclic case, so we
734 + * can tell the remaining room to the end of the circular
738 + state->residue = desc->residue -
739 + ((chan->next_sg - 1) * desc->sg[0].len);
741 + state->residue = desc->residue;
742 + } else if ((vdesc = vchan_find_desc(&chan->vchan, cookie)))
743 + state->residue = to_gdma_dma_desc(vdesc)->residue;
744 + spin_unlock_irqrestore(&chan->vchan.lock, flags);
746 + dev_dbg(c->device->dev, "tx residue %d bytes\n", state->residue);
751 +static void gdma_dma_free_chan_resources(struct dma_chan *c)
753 + vchan_free_chan_resources(to_virt_chan(c));
756 +static void gdma_dma_desc_free(struct virt_dma_desc *vdesc)
758 + kfree(container_of(vdesc, struct gdma_dma_desc, vdesc));
761 +static void gdma_dma_tasklet(unsigned long arg)
763 + struct gdma_dma_dev *dma_dev = (struct gdma_dma_dev *)arg;
764 + struct gdma_dmaengine_chan *chan;
765 + static unsigned int last_chan;
766 + unsigned int i, chan_mask;
768 + /* record last chan to round robin all chans */
770 + chan_mask = dma_dev->data->chancnt - 1;
773 + * on mt7621. when verify with dmatest with all
774 + * channel is enable. we need to limit only two
775 + * channel is working at the same time. otherwise the
776 + * data will have problem.
778 + if (atomic_read(&dma_dev->cnt) >= 2) {
783 + if (test_and_clear_bit(i, &dma_dev->chan_issued)) {
784 + chan = &dma_dev->chan[i];
786 + atomic_inc(&dma_dev->cnt);
787 + gdma_start_transfer(dma_dev, chan);
789 + dev_dbg(dma_dev->ddev.dev, "chan %d no desc to issue\n", chan->id);
791 + if (!dma_dev->chan_issued)
795 + i = (i + 1) & chan_mask;
796 + } while (i != last_chan);
799 +static void rt305x_gdma_init(struct gdma_dma_dev *dma_dev)
803 + /* all chans round robin */
804 + gdma_dma_write(dma_dev, GDMA_RT305X_GCT, GDMA_REG_GCT_ARBIT_RR);
806 + gct = gdma_dma_read(dma_dev, GDMA_RT305X_GCT);
807 + dev_info(dma_dev->ddev.dev, "revision: %d, channels: %d\n",
808 + (gct >> GDMA_REG_GCT_VER_SHIFT) & GDMA_REG_GCT_VER_MASK,
809 + 8 << ((gct >> GDMA_REG_GCT_CHAN_SHIFT) &
810 + GDMA_REG_GCT_CHAN_MASK));
813 +static void rt3883_gdma_init(struct gdma_dma_dev *dma_dev)
817 + /* all chans round robin */
818 + gdma_dma_write(dma_dev, GDMA_REG_GCT, GDMA_REG_GCT_ARBIT_RR);
820 + gct = gdma_dma_read(dma_dev, GDMA_REG_GCT);
821 + dev_info(dma_dev->ddev.dev, "revision: %d, channels: %d\n",
822 + (gct >> GDMA_REG_GCT_VER_SHIFT) & GDMA_REG_GCT_VER_MASK,
823 + 8 << ((gct >> GDMA_REG_GCT_CHAN_SHIFT) &
824 + GDMA_REG_GCT_CHAN_MASK));
827 +static struct gdma_data rt305x_gdma_data = {
829 + .done_int_reg = GDMA_RT305X_STATUS_INT,
830 + .init = rt305x_gdma_init,
831 + .start_transfer = rt305x_gdma_start_transfer,
834 +static struct gdma_data rt3883_gdma_data = {
836 + .done_int_reg = GDMA_REG_DONE_INT,
837 + .init = rt3883_gdma_init,
838 + .start_transfer = rt3883_gdma_start_transfer,
841 +static const struct of_device_id gdma_of_match_table[] = {
842 + { .compatible = "ralink,rt305x-gdma", .data = &rt305x_gdma_data },
843 + { .compatible = "ralink,rt3883-gdma", .data = &rt3883_gdma_data },
847 +static int gdma_dma_probe(struct platform_device *pdev)
849 + const struct of_device_id *match;
850 + struct gdma_dmaengine_chan *chan;
851 + struct gdma_dma_dev *dma_dev;
852 + struct dma_device *dd;
854 + struct resource *res;
857 + void __iomem *base;
858 + struct gdma_data *data;
860 + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
864 + match = of_match_device(gdma_of_match_table, &pdev->dev);
867 + data = (struct gdma_data *) match->data;
869 + dma_dev = devm_kzalloc(&pdev->dev, sizeof(*dma_dev) +
870 + (sizeof(struct gdma_dmaengine_chan) * data->chancnt),
873 + dev_err(&pdev->dev, "alloc dma device failed\n");
876 + dma_dev->data = data;
878 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
879 + base = devm_ioremap_resource(&pdev->dev, res);
881 + return PTR_ERR(base);
882 + dma_dev->base = base;
883 + tasklet_init(&dma_dev->task, gdma_dma_tasklet, (unsigned long)dma_dev);
885 + irq = platform_get_irq(pdev, 0);
887 + dev_err(&pdev->dev, "failed to get irq\n");
890 + ret = devm_request_irq(&pdev->dev, irq, gdma_dma_irq,
891 + 0, dev_name(&pdev->dev), dma_dev);
893 + dev_err(&pdev->dev, "failed to request irq\n");
897 + device_reset(&pdev->dev);
899 + dd = &dma_dev->ddev;
900 + dma_cap_set(DMA_MEMCPY, dd->cap_mask);
901 + dma_cap_set(DMA_SLAVE, dd->cap_mask);
902 + dma_cap_set(DMA_CYCLIC, dd->cap_mask);
903 + dd->device_free_chan_resources = gdma_dma_free_chan_resources;
904 + dd->device_prep_dma_memcpy = gdma_dma_prep_dma_memcpy;
905 + dd->device_prep_slave_sg = gdma_dma_prep_slave_sg;
906 + dd->device_prep_dma_cyclic = gdma_dma_prep_dma_cyclic;
907 + dd->device_config = gdma_dma_config;
908 + dd->device_terminate_all = gdma_dma_terminate_all;
909 + dd->device_tx_status = gdma_dma_tx_status;
910 + dd->device_issue_pending = gdma_dma_issue_pending;
912 + dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
913 + dd->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
914 + dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
915 + dd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
917 + dd->dev = &pdev->dev;
918 + dd->dev->dma_parms = &dma_dev->dma_parms;
919 + dma_set_max_seg_size(dd->dev, GDMA_REG_CTRL0_TX_MASK);
920 + INIT_LIST_HEAD(&dd->channels);
922 + for (i = 0; i < data->chancnt; i++) {
923 + chan = &dma_dev->chan[i];
925 + chan->vchan.desc_free = gdma_dma_desc_free;
926 + vchan_init(&chan->vchan, dd);
929 + /* init hardware */
930 + data->init(dma_dev);
932 + ret = dma_async_device_register(dd);
934 + dev_err(&pdev->dev, "failed to register dma device\n");
938 + ret = of_dma_controller_register(pdev->dev.of_node,
939 + of_dma_xlate_by_chan_id, dma_dev);
941 + dev_err(&pdev->dev, "failed to register of dma controller\n");
942 + goto err_unregister;
945 + platform_set_drvdata(pdev, dma_dev);
950 + dma_async_device_unregister(dd);
954 +static int gdma_dma_remove(struct platform_device *pdev)
956 + struct gdma_dma_dev *dma_dev = platform_get_drvdata(pdev);
958 + tasklet_kill(&dma_dev->task);
959 + of_dma_controller_free(pdev->dev.of_node);
960 + dma_async_device_unregister(&dma_dev->ddev);
965 +static struct platform_driver gdma_dma_driver = {
966 + .probe = gdma_dma_probe,
967 + .remove = gdma_dma_remove,
969 + .name = "gdma-rt2880",
970 + .of_match_table = gdma_of_match_table,
973 +module_platform_driver(gdma_dma_driver);
975 +MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
976 +MODULE_DESCRIPTION("Ralink/MTK DMA driver");
977 +MODULE_LICENSE("GPL v2");
978 --- a/include/linux/dmaengine.h
979 +++ b/include/linux/dmaengine.h
980 @@ -534,6 +534,7 @@ static inline void dma_set_unmap(struct
981 struct dmaengine_unmap_data *
982 dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags);
983 void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap);
984 +struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
986 static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
987 struct dmaengine_unmap_data *unmap)
989 +++ b/drivers/dma/mtk-hsdma.c
992 + * Copyright (C) 2015, Michael Lee <igvtee@gmail.com>
993 + * MTK HSDMA support
995 + * This program is free software; you can redistribute it and/or modify it
996 + * under the terms of the GNU General Public License as published by the
997 + * Free Software Foundation; either version 2 of the License, or (at your
998 + * option) any later version.
1002 +#include <linux/dmaengine.h>
1003 +#include <linux/dma-mapping.h>
1004 +#include <linux/err.h>
1005 +#include <linux/init.h>
1006 +#include <linux/list.h>
1007 +#include <linux/module.h>
1008 +#include <linux/platform_device.h>
1009 +#include <linux/slab.h>
1010 +#include <linux/spinlock.h>
1011 +#include <linux/irq.h>
1012 +#include <linux/of_dma.h>
1013 +#include <linux/reset.h>
1014 +#include <linux/of_device.h>
1016 +#include "virt-dma.h"
1018 +#define HSDMA_BASE_OFFSET 0x800
1020 +#define HSDMA_REG_TX_BASE 0x00
1021 +#define HSDMA_REG_TX_CNT 0x04
1022 +#define HSDMA_REG_TX_CTX 0x08
1023 +#define HSDMA_REG_TX_DTX 0x0c
1024 +#define HSDMA_REG_RX_BASE 0x100
1025 +#define HSDMA_REG_RX_CNT 0x104
1026 +#define HSDMA_REG_RX_CRX 0x108
1027 +#define HSDMA_REG_RX_DRX 0x10c
1028 +#define HSDMA_REG_INFO 0x200
1029 +#define HSDMA_REG_GLO_CFG 0x204
1030 +#define HSDMA_REG_RST_CFG 0x208
1031 +#define HSDMA_REG_DELAY_INT 0x20c
1032 +#define HSDMA_REG_FREEQ_THRES 0x210
1033 +#define HSDMA_REG_INT_STATUS 0x220
1034 +#define HSDMA_REG_INT_MASK 0x228
1035 +#define HSDMA_REG_SCH_Q01 0x280
1036 +#define HSDMA_REG_SCH_Q23 0x284
1038 +#define HSDMA_DESCS_MAX 0xfff
1039 +#define HSDMA_DESCS_NUM 8
1040 +#define HSDMA_DESCS_MASK (HSDMA_DESCS_NUM - 1)
1041 +#define HSDMA_NEXT_DESC(x) (((x) + 1) & HSDMA_DESCS_MASK)
1043 +/* HSDMA_REG_INFO */
1044 +#define HSDMA_INFO_INDEX_MASK 0xf
1045 +#define HSDMA_INFO_INDEX_SHIFT 24
1046 +#define HSDMA_INFO_BASE_MASK 0xff
1047 +#define HSDMA_INFO_BASE_SHIFT 16
1048 +#define HSDMA_INFO_RX_MASK 0xff
1049 +#define HSDMA_INFO_RX_SHIFT 8
1050 +#define HSDMA_INFO_TX_MASK 0xff
1051 +#define HSDMA_INFO_TX_SHIFT 0
1053 +/* HSDMA_REG_GLO_CFG */
1054 +#define HSDMA_GLO_TX_2B_OFFSET BIT(31)
1055 +#define HSDMA_GLO_CLK_GATE BIT(30)
1056 +#define HSDMA_GLO_BYTE_SWAP BIT(29)
1057 +#define HSDMA_GLO_MULTI_DMA BIT(10)
1058 +#define HSDMA_GLO_TWO_BUF BIT(9)
1059 +#define HSDMA_GLO_32B_DESC BIT(8)
1060 +#define HSDMA_GLO_BIG_ENDIAN BIT(7)
1061 +#define HSDMA_GLO_TX_DONE BIT(6)
1062 +#define HSDMA_GLO_BT_MASK 0x3
1063 +#define HSDMA_GLO_BT_SHIFT 4
1064 +#define HSDMA_GLO_RX_BUSY BIT(3)
1065 +#define HSDMA_GLO_RX_DMA BIT(2)
1066 +#define HSDMA_GLO_TX_BUSY BIT(1)
1067 +#define HSDMA_GLO_TX_DMA BIT(0)
1069 +#define HSDMA_BT_SIZE_16BYTES (0 << HSDMA_GLO_BT_SHIFT)
1070 +#define HSDMA_BT_SIZE_32BYTES (1 << HSDMA_GLO_BT_SHIFT)
1071 +#define HSDMA_BT_SIZE_64BYTES (2 << HSDMA_GLO_BT_SHIFT)
1072 +#define HSDMA_BT_SIZE_128BYTES (3 << HSDMA_GLO_BT_SHIFT)
1074 +#define HSDMA_GLO_DEFAULT (HSDMA_GLO_MULTI_DMA | \
1075 + HSDMA_GLO_RX_DMA | HSDMA_GLO_TX_DMA | HSDMA_BT_SIZE_32BYTES)
1077 +/* HSDMA_REG_RST_CFG */
1078 +#define HSDMA_RST_RX_SHIFT 16
1079 +#define HSDMA_RST_TX_SHIFT 0
1081 +/* HSDMA_REG_DELAY_INT */
1082 +#define HSDMA_DELAY_INT_EN BIT(15)
1083 +#define HSDMA_DELAY_PEND_OFFSET 8
1084 +#define HSDMA_DELAY_TIME_OFFSET 0
1085 +#define HSDMA_DELAY_TX_OFFSET 16
1086 +#define HSDMA_DELAY_RX_OFFSET 0
1088 +#define HSDMA_DELAY_INIT(x) (HSDMA_DELAY_INT_EN | \
1089 + ((x) << HSDMA_DELAY_PEND_OFFSET))
1090 +#define HSDMA_DELAY(x) ((HSDMA_DELAY_INIT(x) << \
1091 + HSDMA_DELAY_TX_OFFSET) | HSDMA_DELAY_INIT(x))
1093 +/* HSDMA_REG_INT_STATUS */
1094 +#define HSDMA_INT_DELAY_RX_COH BIT(31)
1095 +#define HSDMA_INT_DELAY_RX_INT BIT(30)
1096 +#define HSDMA_INT_DELAY_TX_COH BIT(29)
1097 +#define HSDMA_INT_DELAY_TX_INT BIT(28)
1098 +#define HSDMA_INT_RX_MASK 0x3
1099 +#define HSDMA_INT_RX_SHIFT 16
1100 +#define HSDMA_INT_RX_Q0 BIT(16)
1101 +#define HSDMA_INT_TX_MASK 0xf
1102 +#define HSDMA_INT_TX_SHIFT 0
1103 +#define HSDMA_INT_TX_Q0 BIT(0)
1105 +/* tx/rx dma desc flags */
1106 +#define HSDMA_PLEN_MASK 0x3fff
1107 +#define HSDMA_DESC_DONE BIT(31)
1108 +#define HSDMA_DESC_LS0 BIT(30)
1109 +#define HSDMA_DESC_PLEN0(_x) (((_x) & HSDMA_PLEN_MASK) << 16)
1110 +#define HSDMA_DESC_TAG BIT(15)
1111 +#define HSDMA_DESC_LS1 BIT(14)
1112 +#define HSDMA_DESC_PLEN1(_x) ((_x) & HSDMA_PLEN_MASK)
1114 +/* align 4 bytes */
1115 +#define HSDMA_ALIGN_SIZE 3
1116 +/* align size 128bytes */
1117 +#define HSDMA_MAX_PLEN 0x3f80
1119 +struct hsdma_desc {
1126 +struct mtk_hsdma_sg {
1127 + dma_addr_t src_addr;
1128 + dma_addr_t dst_addr;
1132 +struct mtk_hsdma_desc {
1133 + struct virt_dma_desc vdesc;
1134 + unsigned int num_sgs;
1135 + struct mtk_hsdma_sg sg[1];
1138 +struct mtk_hsdma_chan {
1139 + struct virt_dma_chan vchan;
1141 + dma_addr_t desc_addr;
1144 + struct hsdma_desc *tx_ring;
1145 + struct hsdma_desc *rx_ring;
1146 + struct mtk_hsdma_desc *desc;
1147 + unsigned int next_sg;
1150 +struct mtk_hsdam_engine {
1151 + struct dma_device ddev;
1152 + struct device_dma_parameters dma_parms;
1153 + void __iomem *base;
1154 + struct tasklet_struct task;
1155 + volatile unsigned long chan_issued;
1157 + struct mtk_hsdma_chan chan[1];
1160 +static inline struct mtk_hsdam_engine *mtk_hsdma_chan_get_dev(
1161 + struct mtk_hsdma_chan *chan)
1163 + return container_of(chan->vchan.chan.device, struct mtk_hsdam_engine,
1167 +static inline struct mtk_hsdma_chan *to_mtk_hsdma_chan(struct dma_chan *c)
1169 + return container_of(c, struct mtk_hsdma_chan, vchan.chan);
1172 +static inline struct mtk_hsdma_desc *to_mtk_hsdma_desc(
1173 + struct virt_dma_desc *vdesc)
1175 + return container_of(vdesc, struct mtk_hsdma_desc, vdesc);
1178 +static inline u32 mtk_hsdma_read(struct mtk_hsdam_engine *hsdma, u32 reg)
1180 + return readl(hsdma->base + reg);
1183 +static inline void mtk_hsdma_write(struct mtk_hsdam_engine *hsdma,
1184 + unsigned reg, u32 val)
1186 + writel(val, hsdma->base + reg);
1189 +static void mtk_hsdma_reset_chan(struct mtk_hsdam_engine *hsdma,
1190 + struct mtk_hsdma_chan *chan)
1193 + chan->rx_idx = HSDMA_DESCS_NUM - 1;
1195 + mtk_hsdma_write(hsdma, HSDMA_REG_TX_CTX, chan->tx_idx);
1196 + mtk_hsdma_write(hsdma, HSDMA_REG_RX_CRX, chan->rx_idx);
1198 + mtk_hsdma_write(hsdma, HSDMA_REG_RST_CFG,
1199 + 0x1 << (chan->id + HSDMA_RST_TX_SHIFT));
1200 + mtk_hsdma_write(hsdma, HSDMA_REG_RST_CFG,
1201 + 0x1 << (chan->id + HSDMA_RST_RX_SHIFT));
1204 +static void hsdma_dump_reg(struct mtk_hsdam_engine *hsdma)
1206 + dev_dbg(hsdma->ddev.dev, "tbase %08x, tcnt %08x, " \
1207 + "tctx %08x, tdtx: %08x, rbase %08x, " \
1208 + "rcnt %08x, rctx %08x, rdtx %08x\n",
1209 + mtk_hsdma_read(hsdma, HSDMA_REG_TX_BASE),
1210 + mtk_hsdma_read(hsdma, HSDMA_REG_TX_CNT),
1211 + mtk_hsdma_read(hsdma, HSDMA_REG_TX_CTX),
1212 + mtk_hsdma_read(hsdma, HSDMA_REG_TX_DTX),
1213 + mtk_hsdma_read(hsdma, HSDMA_REG_RX_BASE),
1214 + mtk_hsdma_read(hsdma, HSDMA_REG_RX_CNT),
1215 + mtk_hsdma_read(hsdma, HSDMA_REG_RX_CRX),
1216 + mtk_hsdma_read(hsdma, HSDMA_REG_RX_DRX));
1218 + dev_dbg(hsdma->ddev.dev, "info %08x, glo %08x, delay %08x, " \
1219 + "intr_stat %08x, intr_mask %08x\n",
1220 + mtk_hsdma_read(hsdma, HSDMA_REG_INFO),
1221 + mtk_hsdma_read(hsdma, HSDMA_REG_GLO_CFG),
1222 + mtk_hsdma_read(hsdma, HSDMA_REG_DELAY_INT),
1223 + mtk_hsdma_read(hsdma, HSDMA_REG_INT_STATUS),
1224 + mtk_hsdma_read(hsdma, HSDMA_REG_INT_MASK));
1227 +static void hsdma_dump_desc(struct mtk_hsdam_engine *hsdma,
1228 + struct mtk_hsdma_chan *chan)
1230 + struct hsdma_desc *tx_desc;
1231 + struct hsdma_desc *rx_desc;
1234 + dev_dbg(hsdma->ddev.dev, "tx idx: %d, rx idx: %d\n",
1235 + chan->tx_idx, chan->rx_idx);
1237 + for (i = 0; i < HSDMA_DESCS_NUM; i++) {
1238 + tx_desc = &chan->tx_ring[i];
1239 + rx_desc = &chan->rx_ring[i];
1241 + dev_dbg(hsdma->ddev.dev, "%d tx addr0: %08x, flags %08x, " \
1242 + "tx addr1: %08x, rx addr0 %08x, flags %08x\n",
1243 + i, tx_desc->addr0, tx_desc->flags, \
1244 + tx_desc->addr1, rx_desc->addr0, rx_desc->flags);
1248 +static void mtk_hsdma_reset(struct mtk_hsdam_engine *hsdma,
1249 + struct mtk_hsdma_chan *chan)
1254 + mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, 0);
1256 + /* disable intr */
1257 + mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, 0);
1259 + /* init desc value */
1260 + for (i = 0; i < HSDMA_DESCS_NUM; i++) {
1261 + chan->tx_ring[i].addr0 = 0;
1262 + chan->tx_ring[i].flags = HSDMA_DESC_LS0 |
1265 + for (i = 0; i < HSDMA_DESCS_NUM; i++) {
1266 + chan->rx_ring[i].addr0 = 0;
1267 + chan->rx_ring[i].flags = 0;
1271 + mtk_hsdma_reset_chan(hsdma, chan);
1274 + mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, HSDMA_INT_RX_Q0);
1277 + mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, HSDMA_GLO_DEFAULT);
1280 +static int mtk_hsdma_terminate_all(struct dma_chan *c)
1282 + struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c);
1283 + struct mtk_hsdam_engine *hsdma = mtk_hsdma_chan_get_dev(chan);
1284 + unsigned long timeout;
1287 + spin_lock_bh(&chan->vchan.lock);
1288 + chan->desc = NULL;
1289 + clear_bit(chan->id, &hsdma->chan_issued);
1290 + vchan_get_all_descriptors(&chan->vchan, &head);
1291 + spin_unlock_bh(&chan->vchan.lock);
1293 + vchan_dma_desc_free_list(&chan->vchan, &head);
1295 + /* wait dma transfer complete */
1296 + timeout = jiffies + msecs_to_jiffies(2000);
1297 + while (mtk_hsdma_read(hsdma, HSDMA_REG_GLO_CFG) &
1298 + (HSDMA_GLO_RX_BUSY | HSDMA_GLO_TX_BUSY)) {
1299 + if (time_after_eq(jiffies, timeout)) {
1300 + hsdma_dump_desc(hsdma, chan);
1301 + mtk_hsdma_reset(hsdma, chan);
1302 + dev_err(hsdma->ddev.dev, "timeout, reset it\n");
1311 +static int mtk_hsdma_start_transfer(struct mtk_hsdam_engine *hsdma,
1312 + struct mtk_hsdma_chan *chan)
1314 + dma_addr_t src, dst;
1316 + struct hsdma_desc *tx_desc, *rx_desc;
1317 + struct mtk_hsdma_sg *sg;
1321 + sg = &chan->desc->sg[0];
1323 + chan->desc->num_sgs = DIV_ROUND_UP(len, HSDMA_MAX_PLEN);
1326 + src = sg->src_addr;
1327 + for (i = 0; i < chan->desc->num_sgs; i++) {
1328 + if (len > HSDMA_MAX_PLEN)
1329 + tlen = HSDMA_MAX_PLEN;
1334 + tx_desc->addr1 = src;
1335 + tx_desc->flags |= HSDMA_DESC_PLEN1(tlen);
1337 + tx_desc = &chan->tx_ring[chan->tx_idx];
1338 + tx_desc->addr0 = src;
1339 + tx_desc->flags = HSDMA_DESC_PLEN0(tlen);
1341 + /* update index */
1342 + chan->tx_idx = HSDMA_NEXT_DESC(chan->tx_idx);
1349 + tx_desc->flags |= HSDMA_DESC_LS0;
1351 + tx_desc->flags |= HSDMA_DESC_LS1;
1354 + rx_idx = HSDMA_NEXT_DESC(chan->rx_idx);
1356 + dst = sg->dst_addr;
1357 + for (i = 0; i < chan->desc->num_sgs; i++) {
1358 + rx_desc = &chan->rx_ring[rx_idx];
1359 + if (len > HSDMA_MAX_PLEN)
1360 + tlen = HSDMA_MAX_PLEN;
1364 + rx_desc->addr0 = dst;
1365 + rx_desc->flags = HSDMA_DESC_PLEN0(tlen);
1370 + /* update index */
1371 + rx_idx = HSDMA_NEXT_DESC(rx_idx);
1374 + /* make sure desc and index all up to date */
1376 + mtk_hsdma_write(hsdma, HSDMA_REG_TX_CTX, chan->tx_idx);
1381 +static int gdma_next_desc(struct mtk_hsdma_chan *chan)
1383 + struct virt_dma_desc *vdesc;
1385 + vdesc = vchan_next_desc(&chan->vchan);
1387 + chan->desc = NULL;
1390 + chan->desc = to_mtk_hsdma_desc(vdesc);
1391 + chan->next_sg = 0;
1396 +static void mtk_hsdma_chan_done(struct mtk_hsdam_engine *hsdma,
1397 + struct mtk_hsdma_chan *chan)
1399 + struct mtk_hsdma_desc *desc;
1403 + spin_lock_bh(&chan->vchan.lock);
1404 + desc = chan->desc;
1405 + if (likely(desc)) {
1406 + if (chan->next_sg == desc->num_sgs) {
1407 + list_del(&desc->vdesc.node);
1408 + vchan_cookie_complete(&desc->vdesc);
1409 + chan_issued = gdma_next_desc(chan);
1412 + dev_dbg(hsdma->ddev.dev, "no desc to complete\n");
1415 + set_bit(chan->id, &hsdma->chan_issued);
1416 + spin_unlock_bh(&chan->vchan.lock);
1419 +static irqreturn_t mtk_hsdma_irq(int irq, void *devid)
1421 + struct mtk_hsdam_engine *hsdma = devid;
1424 + status = mtk_hsdma_read(hsdma, HSDMA_REG_INT_STATUS);
1425 + if (unlikely(!status))
1428 + if (likely(status & HSDMA_INT_RX_Q0))
1429 + tasklet_schedule(&hsdma->task);
1431 + dev_dbg(hsdma->ddev.dev, "unhandle irq status %08x\n",
1433 + /* clean intr bits */
1434 + mtk_hsdma_write(hsdma, HSDMA_REG_INT_STATUS, status);
1436 + return IRQ_HANDLED;
1439 +static void mtk_hsdma_issue_pending(struct dma_chan *c)
1441 + struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c);
1442 + struct mtk_hsdam_engine *hsdma = mtk_hsdma_chan_get_dev(chan);
1444 + spin_lock_bh(&chan->vchan.lock);
1445 + if (vchan_issue_pending(&chan->vchan) && !chan->desc) {
1446 + if (gdma_next_desc(chan)) {
1447 + set_bit(chan->id, &hsdma->chan_issued);
1448 + tasklet_schedule(&hsdma->task);
1450 + dev_dbg(hsdma->ddev.dev, "no desc to issue\n");
1452 + spin_unlock_bh(&chan->vchan.lock);
1455 +static struct dma_async_tx_descriptor * mtk_hsdma_prep_dma_memcpy(
1456 + struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
1457 + size_t len, unsigned long flags)
1459 + struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c);
1460 + struct mtk_hsdma_desc *desc;
1465 + desc = kzalloc(sizeof(struct mtk_hsdma_desc), GFP_ATOMIC);
1467 + dev_err(c->device->dev, "alloc memcpy decs error\n");
1471 + desc->sg[0].src_addr = src;
1472 + desc->sg[0].dst_addr = dest;
1473 + desc->sg[0].len = len;
1475 + return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
1478 +static enum dma_status mtk_hsdma_tx_status(struct dma_chan *c,
1479 + dma_cookie_t cookie, struct dma_tx_state *state)
1481 + return dma_cookie_status(c, cookie, state);
1484 +static void mtk_hsdma_free_chan_resources(struct dma_chan *c)
1486 + vchan_free_chan_resources(to_virt_chan(c));
1489 +static void mtk_hsdma_desc_free(struct virt_dma_desc *vdesc)
1491 + kfree(container_of(vdesc, struct mtk_hsdma_desc, vdesc));
1494 +static void mtk_hsdma_tx(struct mtk_hsdam_engine *hsdma)
1496 + struct mtk_hsdma_chan *chan;
1498 + if (test_and_clear_bit(0, &hsdma->chan_issued)) {
1499 + chan = &hsdma->chan[0];
1501 + mtk_hsdma_start_transfer(hsdma, chan);
1503 + dev_dbg(hsdma->ddev.dev,"chan 0 no desc to issue\n");
1507 +static void mtk_hsdma_rx(struct mtk_hsdam_engine *hsdma)
1509 + struct mtk_hsdma_chan *chan;
1510 + int next_idx, drx_idx, cnt;
1512 + chan = &hsdma->chan[0];
1513 + next_idx = HSDMA_NEXT_DESC(chan->rx_idx);
1514 + drx_idx = mtk_hsdma_read(hsdma, HSDMA_REG_RX_DRX);
1516 + cnt = (drx_idx - next_idx) & HSDMA_DESCS_MASK;
1520 + chan->next_sg += cnt;
1521 + chan->rx_idx = (chan->rx_idx + cnt) & HSDMA_DESCS_MASK;
1523 + /* update rx crx */
1525 + mtk_hsdma_write(hsdma, HSDMA_REG_RX_CRX, chan->rx_idx);
1527 + mtk_hsdma_chan_done(hsdma, chan);
1530 +static void mtk_hsdma_tasklet(unsigned long arg)
1532 + struct mtk_hsdam_engine *hsdma = (struct mtk_hsdam_engine *)arg;
1534 + mtk_hsdma_rx(hsdma);
1535 + mtk_hsdma_tx(hsdma);
1538 +static int mtk_hsdam_alloc_desc(struct mtk_hsdam_engine *hsdma,
1539 + struct mtk_hsdma_chan *chan)
1543 + chan->tx_ring = dma_alloc_coherent(hsdma->ddev.dev,
1544 + 2 * HSDMA_DESCS_NUM * sizeof(*chan->tx_ring),
1545 + &chan->desc_addr, GFP_ATOMIC | __GFP_ZERO);
1546 + if (!chan->tx_ring)
1549 + chan->rx_ring = &chan->tx_ring[HSDMA_DESCS_NUM];
1551 + /* init tx ring value */
1552 + for (i = 0; i < HSDMA_DESCS_NUM; i++)
1553 + chan->tx_ring[i].flags = HSDMA_DESC_LS0 | HSDMA_DESC_DONE;
1560 +static void mtk_hsdam_free_desc(struct mtk_hsdam_engine *hsdma,
1561 + struct mtk_hsdma_chan *chan)
1563 + if (chan->tx_ring) {
1564 + dma_free_coherent(hsdma->ddev.dev,
1565 + 2 * HSDMA_DESCS_NUM * sizeof(*chan->tx_ring),
1566 + chan->tx_ring, chan->desc_addr);
1567 + chan->tx_ring = NULL;
1568 + chan->rx_ring = NULL;
1572 +static int mtk_hsdma_init(struct mtk_hsdam_engine *hsdma)
1574 + struct mtk_hsdma_chan *chan;
1579 + chan = &hsdma->chan[0];
1580 + ret = mtk_hsdam_alloc_desc(hsdma, chan);
1585 + mtk_hsdma_write(hsdma, HSDMA_REG_TX_BASE, chan->desc_addr);
1586 + mtk_hsdma_write(hsdma, HSDMA_REG_TX_CNT, HSDMA_DESCS_NUM);
1588 + mtk_hsdma_write(hsdma, HSDMA_REG_RX_BASE, chan->desc_addr +
1589 + (sizeof(struct hsdma_desc) * HSDMA_DESCS_NUM));
1590 + mtk_hsdma_write(hsdma, HSDMA_REG_RX_CNT, HSDMA_DESCS_NUM);
1592 + mtk_hsdma_reset_chan(hsdma, chan);
1594 + /* enable rx intr */
1595 + mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, HSDMA_INT_RX_Q0);
1598 + mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, HSDMA_GLO_DEFAULT);
1600 + /* hardware info */
1601 + reg = mtk_hsdma_read(hsdma, HSDMA_REG_INFO);
1602 + dev_info(hsdma->ddev.dev, "rx: %d, tx: %d\n",
1603 + (reg >> HSDMA_INFO_RX_SHIFT) & HSDMA_INFO_RX_MASK,
1604 + (reg >> HSDMA_INFO_TX_SHIFT) & HSDMA_INFO_TX_MASK);
1606 + hsdma_dump_reg(hsdma);
1611 +static void mtk_hsdma_uninit(struct mtk_hsdam_engine *hsdma)
1613 + struct mtk_hsdma_chan *chan;
1616 + mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, 0);
1618 + /* disable intr */
1619 + mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, 0);
1622 + chan = &hsdma->chan[0];
1623 + mtk_hsdam_free_desc(hsdma, chan);
1626 + mtk_hsdma_write(hsdma, HSDMA_REG_TX_BASE, 0);
1627 + mtk_hsdma_write(hsdma, HSDMA_REG_TX_CNT, 0);
1629 + mtk_hsdma_write(hsdma, HSDMA_REG_RX_BASE, 0);
1630 + mtk_hsdma_write(hsdma, HSDMA_REG_RX_CNT, 0);
1632 + mtk_hsdma_reset_chan(hsdma, chan);
1635 +static const struct of_device_id mtk_hsdma_of_match[] = {
1636 + { .compatible = "mediatek,mt7621-hsdma" },
1640 +static int mtk_hsdma_probe(struct platform_device *pdev)
1642 + const struct of_device_id *match;
1643 + struct mtk_hsdma_chan *chan;
1644 + struct mtk_hsdam_engine *hsdma;
1645 + struct dma_device *dd;
1646 + struct resource *res;
1649 + void __iomem *base;
1651 + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1655 + match = of_match_device(mtk_hsdma_of_match, &pdev->dev);
1659 + hsdma = devm_kzalloc(&pdev->dev, sizeof(*hsdma), GFP_KERNEL);
1661 + dev_err(&pdev->dev, "alloc dma device failed\n");
1665 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1666 + base = devm_ioremap_resource(&pdev->dev, res);
1668 + return PTR_ERR(base);
1669 + hsdma->base = base + HSDMA_BASE_OFFSET;
1670 + tasklet_init(&hsdma->task, mtk_hsdma_tasklet, (unsigned long)hsdma);
1672 + irq = platform_get_irq(pdev, 0);
1674 + dev_err(&pdev->dev, "failed to get irq\n");
1677 + ret = devm_request_irq(&pdev->dev, irq, mtk_hsdma_irq,
1678 + 0, dev_name(&pdev->dev), hsdma);
1680 + dev_err(&pdev->dev, "failed to request irq\n");
1684 + device_reset(&pdev->dev);
1686 + dd = &hsdma->ddev;
1687 + dma_cap_set(DMA_MEMCPY, dd->cap_mask);
1688 + dd->copy_align = HSDMA_ALIGN_SIZE;
1689 + dd->device_free_chan_resources = mtk_hsdma_free_chan_resources;
1690 + dd->device_prep_dma_memcpy = mtk_hsdma_prep_dma_memcpy;
1691 + dd->device_terminate_all = mtk_hsdma_terminate_all;
1692 + dd->device_tx_status = mtk_hsdma_tx_status;
1693 + dd->device_issue_pending = mtk_hsdma_issue_pending;
1694 + dd->dev = &pdev->dev;
1695 + dd->dev->dma_parms = &hsdma->dma_parms;
1696 + dma_set_max_seg_size(dd->dev, HSDMA_MAX_PLEN);
1697 + INIT_LIST_HEAD(&dd->channels);
1699 + chan = &hsdma->chan[0];
1701 + chan->vchan.desc_free = mtk_hsdma_desc_free;
1702 + vchan_init(&chan->vchan, dd);
1704 + /* init hardware */
1705 + ret = mtk_hsdma_init(hsdma);
1707 + dev_err(&pdev->dev, "failed to alloc ring descs\n");
1711 + ret = dma_async_device_register(dd);
1713 + dev_err(&pdev->dev, "failed to register dma device\n");
1717 + ret = of_dma_controller_register(pdev->dev.of_node,
1718 + of_dma_xlate_by_chan_id, hsdma);
1720 + dev_err(&pdev->dev, "failed to register of dma controller\n");
1721 + goto err_unregister;
1724 + platform_set_drvdata(pdev, hsdma);
1729 + dma_async_device_unregister(dd);
1733 +static int mtk_hsdma_remove(struct platform_device *pdev)
1735 + struct mtk_hsdam_engine *hsdma = platform_get_drvdata(pdev);
1737 + mtk_hsdma_uninit(hsdma);
1739 + of_dma_controller_free(pdev->dev.of_node);
1740 + dma_async_device_unregister(&hsdma->ddev);
1745 +static struct platform_driver mtk_hsdma_driver = {
1746 + .probe = mtk_hsdma_probe,
1747 + .remove = mtk_hsdma_remove,
1749 + .name = "hsdma-mt7621",
1750 + .of_match_table = mtk_hsdma_of_match,
1753 +module_platform_driver(mtk_hsdma_driver);
1755 +MODULE_AUTHOR("Michael Lee <igvtee@gmail.com>");
1756 +MODULE_DESCRIPTION("MTK HSDMA driver");
1757 +MODULE_LICENSE("GPL v2");