1 From 2d7e32d4825e20e9db4f0dff6b3e3c25c8c7ad7d Mon Sep 17 00:00:00 2001
2 From: John Crispin <blogic@openwrt.org>
3 Date: Tue, 3 Dec 2013 17:05:05 +0100
4 Subject: [PATCH 111/133] DMA: ralink: add rt2880 dma engine
6 Signed-off-by: John Crispin <blogic@openwrt.org>
8 drivers/dma/Kconfig | 6 +
9 drivers/dma/Makefile | 1 +
10 drivers/dma/dmaengine.c | 26 ++
11 drivers/dma/ralink-gdma.c | 577 +++++++++++++++++++++++++++++++++++++++++++++
12 include/linux/dmaengine.h | 1 +
13 5 files changed, 611 insertions(+)
14 create mode 100644 drivers/dma/ralink-gdma.c
16 --- a/drivers/dma/Kconfig
17 +++ b/drivers/dma/Kconfig
18 @@ -312,6 +312,12 @@ config MMP_PDMA
20 Support the MMP PDMA engine for PXA and MMP platfrom.
23 + tristate "RALINK DMA support"
24 + depends on RALINK && SOC_MT7620
26 + select DMA_VIRTUAL_CHANNELS
31 --- a/drivers/dma/Makefile
32 +++ b/drivers/dma/Makefile
33 @@ -38,3 +38,4 @@ obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
34 obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
35 obj-$(CONFIG_DMA_OMAP) += omap-dma.o
36 obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
37 +obj-$(CONFIG_DMA_RALINK) += ralink-gdma.o
38 --- a/drivers/dma/dmaengine.c
39 +++ b/drivers/dma/dmaengine.c
40 @@ -504,6 +504,32 @@ static struct dma_chan *private_candidat
44 + * dma_request_slave_channel - try to get specific channel exclusively
45 + * @chan: target channel
47 +struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
51 + /* lock against __dma_request_channel */
52 + mutex_lock(&dma_list_mutex);
54 + if (chan->client_count == 0) {
55 + err = dma_chan_get(chan);
57 + pr_debug("%s: failed to get %s: (%d)\n",
58 + __func__, dma_chan_name(chan), err);
62 + mutex_unlock(&dma_list_mutex);
66 +EXPORT_SYMBOL_GPL(dma_get_slave_channel);
70 * dma_request_channel - try to allocate an exclusive channel
71 * @mask: capabilities that the channel must satisfy
72 * @fn: optional callback to disposition available channels
74 +++ b/drivers/dma/ralink-gdma.c
77 + * Copyright (C) 2013, Lars-Peter Clausen <lars@metafoo.de>
78 + * GDMA4740 DMAC support
80 + * This program is free software; you can redistribute it and/or modify it
81 + * under the terms of the GNU General Public License as published by the
82 + * Free Software Foundation; either version 2 of the License, or (at your
83 + * option) any later version.
85 + * You should have received a copy of the GNU General Public License along
86 + * with this program; if not, write to the Free Software Foundation, Inc.,
87 + * 675 Mass Ave, Cambridge, MA 02139, USA.
91 +#include <linux/dmaengine.h>
92 +#include <linux/dma-mapping.h>
93 +#include <linux/err.h>
94 +#include <linux/init.h>
95 +#include <linux/list.h>
96 +#include <linux/module.h>
97 +#include <linux/platform_device.h>
98 +#include <linux/slab.h>
99 +#include <linux/spinlock.h>
100 +#include <linux/irq.h>
101 +#include <linux/of_dma.h>
103 +#include "virt-dma.h"
105 +#define GDMA_NR_CHANS 16
107 +#define GDMA_REG_SRC_ADDR(x) (0x00 + (x) * 0x10)
108 +#define GDMA_REG_DST_ADDR(x) (0x04 + (x) * 0x10)
110 +#define GDMA_REG_CTRL0(x) (0x08 + (x) * 0x10)
111 +#define GDMA_REG_CTRL0_TX_MASK 0xffff
112 +#define GDMA_REG_CTRL0_TX_SHIFT 16
113 +#define GDMA_REG_CTRL0_CURR_MASK 0xff
114 +#define GDMA_REG_CTRL0_CURR_SHIFT 8
115 +#define GDMA_REG_CTRL0_SRC_ADDR_FIXED BIT(7)
116 +#define GDMA_REG_CTRL0_DST_ADDR_FIXED BIT(6)
117 +#define GDMA_REG_CTRL0_BURST_MASK 0x7
118 +#define GDMA_REG_CTRL0_BURST_SHIFT 3
119 +#define GDMA_REG_CTRL0_DONE_INT BIT(2)
120 +#define GDMA_REG_CTRL0_ENABLE BIT(1)
121 +#define GDMA_REG_CTRL0_HW_MODE 0
123 +#define GDMA_REG_CTRL1(x) (0x0c + (x) * 0x10)
124 +#define GDMA_REG_CTRL1_SEG_MASK 0xf
125 +#define GDMA_REG_CTRL1_SEG_SHIFT 22
126 +#define GDMA_REG_CTRL1_REQ_MASK 0x3f
127 +#define GDMA_REG_CTRL1_SRC_REQ_SHIFT 16
128 +#define GDMA_REG_CTRL1_DST_REQ_SHIFT 8
129 +#define GDMA_REG_CTRL1_CONTINOUS BIT(14)
130 +#define GDMA_REG_CTRL1_NEXT_MASK 0x1f
131 +#define GDMA_REG_CTRL1_NEXT_SHIFT 3
132 +#define GDMA_REG_CTRL1_COHERENT BIT(2)
133 +#define GDMA_REG_CTRL1_FAIL BIT(1)
134 +#define GDMA_REG_CTRL1_MASK BIT(0)
136 +#define GDMA_REG_UNMASK_INT 0x200
137 +#define GDMA_REG_DONE_INT 0x204
139 +#define GDMA_REG_GCT 0x220
140 +#define GDMA_REG_GCT_CHAN_MASK 0x3
141 +#define GDMA_REG_GCT_CHAN_SHIFT 3
142 +#define GDMA_REG_GCT_VER_MASK 0x3
143 +#define GDMA_REG_GCT_VER_SHIFT 1
144 +#define GDMA_REG_GCT_ARBIT_RR BIT(0)
146 +enum gdma_dma_transfer_size {
147 + GDMA_TRANSFER_SIZE_4BYTE = 0,
148 + GDMA_TRANSFER_SIZE_8BYTE = 1,
149 + GDMA_TRANSFER_SIZE_16BYTE = 2,
150 + GDMA_TRANSFER_SIZE_32BYTE = 3,
153 +struct gdma_dma_sg {
158 +struct gdma_dma_desc {
159 + struct virt_dma_desc vdesc;
161 + enum dma_transfer_direction direction;
164 + unsigned int num_sgs;
165 + struct gdma_dma_sg sg[];
168 +struct gdma_dmaengine_chan {
169 + struct virt_dma_chan vchan;
172 + dma_addr_t fifo_addr;
173 + unsigned int transfer_shift;
175 + struct gdma_dma_desc *desc;
176 + unsigned int next_sg;
179 +struct gdma_dma_dev {
180 + struct dma_device ddev;
181 + void __iomem *base;
184 + struct gdma_dmaengine_chan chan[GDMA_NR_CHANS];
187 +static struct gdma_dma_dev *gdma_dma_chan_get_dev(
188 + struct gdma_dmaengine_chan *chan)
190 + return container_of(chan->vchan.chan.device, struct gdma_dma_dev,
194 +static struct gdma_dmaengine_chan *to_gdma_dma_chan(struct dma_chan *c)
196 + return container_of(c, struct gdma_dmaengine_chan, vchan.chan);
199 +static struct gdma_dma_desc *to_gdma_dma_desc(struct virt_dma_desc *vdesc)
201 + return container_of(vdesc, struct gdma_dma_desc, vdesc);
204 +static inline uint32_t gdma_dma_read(struct gdma_dma_dev *dma_dev,
207 + return readl(dma_dev->base + reg);
210 +static inline void gdma_dma_write(struct gdma_dma_dev *dma_dev,
211 + unsigned reg, uint32_t val)
213 + //printk("gdma --> %p = 0x%08X\n", dma_dev->base + reg, val);
214 + writel(val, dma_dev->base + reg);
217 +static inline void gdma_dma_write_mask(struct gdma_dma_dev *dma_dev,
218 + unsigned int reg, uint32_t val, uint32_t mask)
222 + tmp = gdma_dma_read(dma_dev, reg);
225 + gdma_dma_write(dma_dev, reg, tmp);
228 +static struct gdma_dma_desc *gdma_dma_alloc_desc(unsigned int num_sgs)
230 + return kzalloc(sizeof(struct gdma_dma_desc) +
231 + sizeof(struct gdma_dma_sg) * num_sgs, GFP_ATOMIC);
234 +static enum gdma_dma_transfer_size gdma_dma_maxburst(u32 maxburst)
237 + return GDMA_TRANSFER_SIZE_4BYTE;
238 + else if (maxburst <= 15)
239 + return GDMA_TRANSFER_SIZE_8BYTE;
240 + else if (maxburst <= 31)
241 + return GDMA_TRANSFER_SIZE_16BYTE;
243 + return GDMA_TRANSFER_SIZE_32BYTE;
246 +static int gdma_dma_slave_config(struct dma_chan *c,
247 + const struct dma_slave_config *config)
249 + struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
250 + struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
251 + enum gdma_dma_transfer_size transfer_size;
253 + uint32_t ctrl0, ctrl1;
255 + switch (config->direction) {
256 + case DMA_MEM_TO_DEV:
257 + ctrl1 = 32 << GDMA_REG_CTRL1_SRC_REQ_SHIFT;
258 + ctrl1 |= config->slave_id << GDMA_REG_CTRL1_DST_REQ_SHIFT;
259 + flags = GDMA_REG_CTRL0_DST_ADDR_FIXED;
260 + transfer_size = gdma_dma_maxburst(config->dst_maxburst);
261 + chan->fifo_addr = config->dst_addr;
264 + case DMA_DEV_TO_MEM:
265 + ctrl1 = config->slave_id << GDMA_REG_CTRL1_SRC_REQ_SHIFT;
266 + ctrl1 |= 32 << GDMA_REG_CTRL1_DST_REQ_SHIFT;
267 + flags = GDMA_REG_CTRL0_SRC_ADDR_FIXED;
268 + transfer_size = gdma_dma_maxburst(config->src_maxburst);
269 + chan->fifo_addr = config->src_addr;
276 + chan->transfer_shift = 1 + transfer_size;
278 + ctrl0 = flags | GDMA_REG_CTRL0_HW_MODE;
279 + ctrl0 |= GDMA_REG_CTRL0_DONE_INT;
281 + ctrl1 &= ~(GDMA_REG_CTRL1_NEXT_MASK << GDMA_REG_CTRL1_NEXT_SHIFT);
282 + ctrl1 |= chan->id << GDMA_REG_CTRL1_NEXT_SHIFT;
283 + ctrl1 |= GDMA_REG_CTRL1_FAIL;
284 + ctrl1 &= ~GDMA_REG_CTRL1_CONTINOUS;
285 + gdma_dma_write(dma_dev, GDMA_REG_CTRL0(chan->id), ctrl0);
286 + gdma_dma_write(dma_dev, GDMA_REG_CTRL1(chan->id), ctrl1);
291 +static int gdma_dma_terminate_all(struct dma_chan *c)
293 + struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
294 + struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
295 + unsigned long flags;
298 + spin_lock_irqsave(&chan->vchan.lock, flags);
299 + gdma_dma_write_mask(dma_dev, GDMA_REG_CTRL0(chan->id), 0,
300 + GDMA_REG_CTRL0_ENABLE);
302 + vchan_get_all_descriptors(&chan->vchan, &head);
303 + spin_unlock_irqrestore(&chan->vchan.lock, flags);
305 + vchan_dma_desc_free_list(&chan->vchan, &head);
310 +static int gdma_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
313 + struct dma_slave_config *config = (struct dma_slave_config *)arg;
316 + case DMA_SLAVE_CONFIG:
317 + return gdma_dma_slave_config(chan, config);
318 + case DMA_TERMINATE_ALL:
319 + return gdma_dma_terminate_all(chan);
325 +static int gdma_dma_start_transfer(struct gdma_dmaengine_chan *chan)
327 + struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
328 + dma_addr_t src_addr, dst_addr;
329 + struct virt_dma_desc *vdesc;
330 + struct gdma_dma_sg *sg;
332 + gdma_dma_write_mask(dma_dev, GDMA_REG_CTRL0(chan->id), 0,
333 + GDMA_REG_CTRL0_ENABLE);
336 + vdesc = vchan_next_desc(&chan->vchan);
339 + chan->desc = to_gdma_dma_desc(vdesc);
343 + if (chan->next_sg == chan->desc->num_sgs)
346 + sg = &chan->desc->sg[chan->next_sg];
348 + if (chan->desc->direction == DMA_MEM_TO_DEV) {
349 + src_addr = sg->addr;
350 + dst_addr = chan->fifo_addr;
352 + src_addr = chan->fifo_addr;
353 + dst_addr = sg->addr;
355 + gdma_dma_write(dma_dev, GDMA_REG_SRC_ADDR(chan->id), src_addr);
356 + gdma_dma_write(dma_dev, GDMA_REG_DST_ADDR(chan->id), dst_addr);
357 + gdma_dma_write_mask(dma_dev, GDMA_REG_CTRL0(chan->id),
358 + (sg->len << GDMA_REG_CTRL0_TX_SHIFT) | GDMA_REG_CTRL0_ENABLE,
359 + GDMA_REG_CTRL0_TX_MASK << GDMA_REG_CTRL0_TX_SHIFT);
361 + gdma_dma_write_mask(dma_dev, GDMA_REG_CTRL1(chan->id), 0, GDMA_REG_CTRL1_MASK);
366 +static void gdma_dma_chan_irq(struct gdma_dmaengine_chan *chan)
368 + spin_lock(&chan->vchan.lock);
370 + if (chan->desc && chan->desc->cyclic) {
371 + vchan_cyclic_callback(&chan->desc->vdesc);
373 + if (chan->next_sg == chan->desc->num_sgs) {
375 + vchan_cookie_complete(&chan->desc->vdesc);
379 + gdma_dma_start_transfer(chan);
380 + spin_unlock(&chan->vchan.lock);
383 +static irqreturn_t gdma_dma_irq(int irq, void *devid)
385 + struct gdma_dma_dev *dma_dev = devid;
386 + uint32_t unmask, done;
389 + unmask = gdma_dma_read(dma_dev, GDMA_REG_UNMASK_INT);
390 + gdma_dma_write(dma_dev, GDMA_REG_UNMASK_INT, unmask);
391 + done = gdma_dma_read(dma_dev, GDMA_REG_DONE_INT);
393 + for (i = 0; i < GDMA_NR_CHANS; ++i)
395 + gdma_dma_chan_irq(&dma_dev->chan[i]);
396 + gdma_dma_write(dma_dev, GDMA_REG_DONE_INT, done);
398 + return IRQ_HANDLED;
401 +static void gdma_dma_issue_pending(struct dma_chan *c)
403 + struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
404 + unsigned long flags;
406 + spin_lock_irqsave(&chan->vchan.lock, flags);
407 + if (vchan_issue_pending(&chan->vchan) && !chan->desc)
408 + gdma_dma_start_transfer(chan);
409 + spin_unlock_irqrestore(&chan->vchan.lock, flags);
412 +static struct dma_async_tx_descriptor *gdma_dma_prep_slave_sg(
413 + struct dma_chan *c, struct scatterlist *sgl,
414 + unsigned int sg_len, enum dma_transfer_direction direction,
415 + unsigned long flags, void *context)
417 + struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
418 + struct gdma_dma_desc *desc;
419 + struct scatterlist *sg;
422 + desc = gdma_dma_alloc_desc(sg_len);
426 + for_each_sg(sgl, sg, sg_len, i) {
427 + desc->sg[i].addr = sg_dma_address(sg);
428 + desc->sg[i].len = sg_dma_len(sg);
431 + desc->num_sgs = sg_len;
432 + desc->direction = direction;
433 + desc->cyclic = false;
435 + return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
438 +static struct dma_async_tx_descriptor *gdma_dma_prep_dma_cyclic(
439 + struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
440 + size_t period_len, enum dma_transfer_direction direction,
441 + unsigned long flags, void *context)
443 + struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
444 + struct gdma_dma_desc *desc;
445 + unsigned int num_periods, i;
447 + if (buf_len % period_len)
450 + num_periods = buf_len / period_len;
452 + desc = gdma_dma_alloc_desc(num_periods);
456 + for (i = 0; i < num_periods; i++) {
457 + desc->sg[i].addr = buf_addr;
458 + desc->sg[i].len = period_len;
459 + buf_addr += period_len;
462 + desc->num_sgs = num_periods;
463 + desc->direction = direction;
464 + desc->cyclic = true;
466 + return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
469 +static size_t gdma_dma_desc_residue(struct gdma_dmaengine_chan *chan,
470 + struct gdma_dma_desc *desc, unsigned int next_sg)
472 + struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
473 + unsigned int residue, count;
478 + for (i = next_sg; i < desc->num_sgs; i++)
479 + residue += desc->sg[i].len;
481 + if (next_sg != 0) {
482 + count = gdma_dma_read(dma_dev, GDMA_REG_CTRL0(chan->id));
483 + count >>= GDMA_REG_CTRL0_CURR_SHIFT;
484 + count &= GDMA_REG_CTRL0_CURR_MASK;
485 + residue += count << chan->transfer_shift;
491 +static enum dma_status gdma_dma_tx_status(struct dma_chan *c,
492 + dma_cookie_t cookie, struct dma_tx_state *state)
494 + struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
495 + struct virt_dma_desc *vdesc;
496 + enum dma_status status;
497 + unsigned long flags;
499 + status = dma_cookie_status(c, cookie, state);
500 + if (status == DMA_SUCCESS || !state)
503 + spin_lock_irqsave(&chan->vchan.lock, flags);
504 + vdesc = vchan_find_desc(&chan->vchan, cookie);
505 + if (cookie == chan->desc->vdesc.tx.cookie) {
506 + state->residue = gdma_dma_desc_residue(chan, chan->desc,
508 + } else if (vdesc) {
509 + state->residue = gdma_dma_desc_residue(chan,
510 + to_gdma_dma_desc(vdesc), 0);
512 + state->residue = 0;
514 + spin_unlock_irqrestore(&chan->vchan.lock, flags);
519 +static int gdma_dma_alloc_chan_resources(struct dma_chan *c)
524 +static void gdma_dma_free_chan_resources(struct dma_chan *c)
526 + vchan_free_chan_resources(to_virt_chan(c));
529 +static void gdma_dma_desc_free(struct virt_dma_desc *vdesc)
531 + kfree(container_of(vdesc, struct gdma_dma_desc, vdesc));
534 +static struct dma_chan *
535 +of_dma_xlate_by_chan_id(struct of_phandle_args *dma_spec,
536 + struct of_dma *ofdma)
538 + struct gdma_dma_dev *dma_dev = ofdma->of_dma_data;
539 + unsigned int request = dma_spec->args[0];
541 + if (request >= GDMA_NR_CHANS)
544 + return dma_get_slave_channel(&(dma_dev->chan[request].vchan.chan));
547 +static int gdma_dma_probe(struct platform_device *pdev)
549 + struct gdma_dmaengine_chan *chan;
550 + struct gdma_dma_dev *dma_dev;
551 + struct dma_device *dd;
553 + struct resource *res;
559 + dma_dev = devm_kzalloc(&pdev->dev, sizeof(*dma_dev), GFP_KERNEL);
563 + dd = &dma_dev->ddev;
565 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
566 + dma_dev->base = devm_ioremap_resource(&pdev->dev, res);
567 + if (IS_ERR(dma_dev->base))
568 + return PTR_ERR(dma_dev->base);
570 + dma_cap_set(DMA_SLAVE, dd->cap_mask);
571 + dma_cap_set(DMA_CYCLIC, dd->cap_mask);
572 + dd->device_alloc_chan_resources = gdma_dma_alloc_chan_resources;
573 + dd->device_free_chan_resources = gdma_dma_free_chan_resources;
574 + dd->device_tx_status = gdma_dma_tx_status;
575 + dd->device_issue_pending = gdma_dma_issue_pending;
576 + dd->device_prep_slave_sg = gdma_dma_prep_slave_sg;
577 + dd->device_prep_dma_cyclic = gdma_dma_prep_dma_cyclic;
578 + dd->device_control = gdma_dma_control;
579 + dd->dev = &pdev->dev;
580 + dd->chancnt = GDMA_NR_CHANS;
581 + INIT_LIST_HEAD(&dd->channels);
583 + for (i = 0; i < dd->chancnt; i++) {
584 + chan = &dma_dev->chan[i];
586 + chan->vchan.desc_free = gdma_dma_desc_free;
587 + vchan_init(&chan->vchan, dd);
590 + ret = dma_async_device_register(dd);
594 + ret = of_dma_controller_register(pdev->dev.of_node,
595 + of_dma_xlate_by_chan_id, dma_dev);
597 + goto err_unregister;
599 + irq = platform_get_irq(pdev, 0);
600 + ret = request_irq(irq, gdma_dma_irq, 0, dev_name(&pdev->dev), dma_dev);
602 + goto err_unregister;
604 + gdma_dma_write(dma_dev, GDMA_REG_UNMASK_INT, 0);
605 + gdma_dma_write(dma_dev, GDMA_REG_DONE_INT, BIT(dd->chancnt) - 1);
607 + gct = gdma_dma_read(dma_dev, GDMA_REG_GCT);
608 + dev_info(&pdev->dev, "revision: %d, channels: %d\n",
609 + (gct >> GDMA_REG_GCT_VER_SHIFT) & GDMA_REG_GCT_VER_MASK,
610 + 8 << ((gct >> GDMA_REG_GCT_CHAN_SHIFT) & GDMA_REG_GCT_CHAN_MASK));
611 + platform_set_drvdata(pdev, dma_dev);
613 + gdma_dma_write(dma_dev, GDMA_REG_GCT, GDMA_REG_GCT_ARBIT_RR);
618 + dma_async_device_unregister(dd);
622 +static int gdma_dma_remove(struct platform_device *pdev)
624 + struct gdma_dma_dev *dma_dev = platform_get_drvdata(pdev);
625 + int irq = platform_get_irq(pdev, 0);
627 + free_irq(irq, dma_dev);
628 + of_dma_controller_free(pdev->dev.of_node);
629 + dma_async_device_unregister(&dma_dev->ddev);
634 +static const struct of_device_id gdma_of_match_table[] = {
635 + { .compatible = "ralink,rt2880-gdma" },
639 +static struct platform_driver gdma_dma_driver = {
640 + .probe = gdma_dma_probe,
641 + .remove = gdma_dma_remove,
643 + .name = "gdma-rt2880",
644 + .owner = THIS_MODULE,
645 + .of_match_table = gdma_of_match_table,
648 +module_platform_driver(gdma_dma_driver);
650 +MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
651 +MODULE_DESCRIPTION("GDMA4740 DMA driver");
652 +MODULE_LICENSE("GPLv2");
653 --- a/include/linux/dmaengine.h
654 +++ b/include/linux/dmaengine.h
655 @@ -999,6 +999,7 @@ static inline void dma_release_channel(s
656 int dma_async_device_register(struct dma_device *device);
657 void dma_async_device_unregister(struct dma_device *device);
658 void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
659 +struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
660 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
661 struct dma_chan *net_dma_find_channel(void);
662 #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)