1 From 8984e3fc6db029479d6aa78882b39235379aebff Mon Sep 17 00:00:00 2001
2 From: Andy Gross <agross@codeaurora.org>
3 Date: Wed, 14 May 2014 13:45:07 -0500
4 Subject: [PATCH 177/182] dmaengine: Add QCOM ADM DMA driver
6 Add the DMA engine driver for the QCOM Application Data Mover (ADM) DMA
7 controller found in the MSM8960 and IPQ/APQ8064 platforms.
9 The ADM supports both memory to memory transactions and memory
10 to/from peripheral device transactions. The controller also provides flow
11 control capabilities for transactions to/from peripheral devices.
13 The initial release of this driver supports slave transfers to/from peripherals
14 and also incorporates CRCI (client rate control interface) flow control.
16 Signed-off-by: Andy Gross <agross@codeaurora.org>
18 drivers/dma/Kconfig | 10 +
19 drivers/dma/Makefile | 1 +
20 drivers/dma/qcom_adm.c | 871 ++++++++++++++++++++++++++++++++++++++++++++++++
21 3 files changed, 882 insertions(+)
22 create mode 100644 drivers/dma/qcom_adm.c
24 --- a/drivers/dma/Kconfig
25 +++ b/drivers/dma/Kconfig
26 @@ -410,4 +410,14 @@ config QCOM_BAM_DMA
27 Enable support for the QCOM BAM DMA controller. This controller
28 provides DMA capabilities for a variety of on-chip devices.
31 + tristate "Qualcomm ADM support"
32 + depends on ARCH_QCOM || (COMPILE_TEST && OF && ARM)
34 + select DMA_VIRTUAL_CHANNELS
36 + Enable support for the Qualcomm ADM DMA controller. This controller
37 + provides DMA capabilities for both general purpose and on-chip
41 --- a/drivers/dma/Makefile
42 +++ b/drivers/dma/Makefile
43 @@ -46,3 +46,4 @@ obj-$(CONFIG_K3_DMA) += k3dma.o
44 obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
45 obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
46 obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o
47 +obj-$(CONFIG_QCOM_ADM) += qcom_adm.o
49 +++ b/drivers/dma/qcom_adm.c
52 + * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
54 + * This program is free software; you can redistribute it and/or modify
55 + * it under the terms of the GNU General Public License version 2 and
56 + * only version 2 as published by the Free Software Foundation.
58 + * This program is distributed in the hope that it will be useful,
59 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
60 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
61 + * GNU General Public License for more details.
65 +#include <linux/kernel.h>
66 +#include <linux/io.h>
67 +#include <linux/init.h>
68 +#include <linux/slab.h>
69 +#include <linux/module.h>
70 +#include <linux/interrupt.h>
71 +#include <linux/dma-mapping.h>
72 +#include <linux/scatterlist.h>
73 +#include <linux/device.h>
74 +#include <linux/platform_device.h>
75 +#include <linux/of.h>
76 +#include <linux/of_address.h>
77 +#include <linux/of_irq.h>
78 +#include <linux/of_dma.h>
79 +#include <linux/reset.h>
80 +#include <linux/clk.h>
81 +#include <linux/dmaengine.h>
83 +#include "dmaengine.h"
84 +#include "virt-dma.h"
86 +/* ADM registers - calculated from channel number and security domain */
87 +#define HI_CH_CMD_PTR(chan, ee) (4*chan + 0x20800*ee)
88 +#define HI_CH_RSLT(chan, ee) (0x40 + 4*chan + 0x20800*ee)
89 +#define HI_CH_FLUSH_STATE0(chan, ee) (0x80 + 4*chan + 0x20800*ee)
90 +#define HI_CH_FLUSH_STATE1(chan, ee) (0xc0 + 4*chan + 0x20800*ee)
91 +#define HI_CH_FLUSH_STATE2(chan, ee) (0x100 + 4*chan + 0x20800*ee)
92 +#define HI_CH_FLUSH_STATE3(chan, ee) (0x140 + 4*chan + 0x20800*ee)
93 +#define HI_CH_FLUSH_STATE4(chan, ee) (0x180 + 4*chan + 0x20800*ee)
94 +#define HI_CH_FLUSH_STATE5(chan, ee) (0x1c0 + 4*chan + 0x20800*ee)
95 +#define HI_CH_STATUS_SD(chan, ee) (0x200 + 4*chan + 0x20800*ee)
96 +#define HI_CH_CONF(chan) (0x240 + 4*chan)
97 +#define HI_CH_RSLT_CONF(chan, ee) (0x300 + 4*chan + 0x20800*ee)
98 +#define HI_SEC_DOMAIN_IRQ_STATUS(ee) (0x380 + 0x20800*ee)
99 +#define HI_CI_CONF(ci) (0x390 + 4*ci)
100 +#define HI_CRCI_CONF0 0x3d0
101 +#define HI_CRCI_CONF1 0x3d4
102 +#define HI_GP_CTL 0x3d8
103 +#define HI_CRCI_CTL(chan, ee) (0x400 + 0x4*chan + 0x20800*ee)
105 +/* channel status */
106 +#define CH_STATUS_VALID BIT(1)
108 +/* channel result */
109 +#define CH_RSLT_VALID BIT(31)
110 +#define CH_RSLT_ERR BIT(3)
111 +#define CH_RSLT_FLUSH BIT(2)
112 +#define CH_RSLT_TPD BIT(1)
115 +#define CH_CONF_MPU_DISABLE BIT(11)
116 +#define CH_CONF_PERM_MPU_CONF BIT(9)
117 +#define CH_CONF_FLUSH_RSLT_EN BIT(8)
118 +#define CH_CONF_FORCE_RSLT_EN BIT(7)
119 +#define CH_CONF_IRQ_EN BIT(6)
121 +/* channel result conf */
122 +#define CH_RSLT_CONF_FLUSH_EN BIT(1)
123 +#define CH_RSLT_CONF_IRQ_EN BIT(0)
126 +#define CRCI_CTL_RST BIT(17)
128 +/* CI configuration */
129 +#define CI_RANGE_END(x) (x << 24)
130 +#define CI_RANGE_START(x) (x << 16)
131 +#define CI_BURST_4_WORDS 0x4
132 +#define CI_BURST_8_WORDS 0x8
135 +#define GP_CTL_LP_EN BIT(12)
136 +#define GP_CTL_LP_CNT(x) (x << 8)
138 +/* Command pointer list entry */
139 +#define CPLE_LP BIT(31)
141 +/* Command list entry */
142 +#define CMD_LC BIT(31)
143 +#define CMD_DST_CRCI(n) (((n) & 0xf) << 7)
144 +#define CMD_SRC_CRCI(n) (((n) & 0xf) << 3)
146 +#define CMD_TYPE_SINGLE 0x0
147 +#define CMD_TYPE_BOX 0x3
149 +#define ADM_DESC_ALIGN 8
150 +#define ADM_MAX_XFER (SZ_64K-1)
151 +#define ADM_MAX_ROWS (SZ_64K-1)
153 +/* Command Pointer List Entry */
154 +#define CMD_LP BIT(31)
155 +#define CMD_PT_MASK (0x3 << 29)
156 +#define CMD_ADDR_MASK 0x3fffffff
158 +struct adm_desc_hw {
167 +struct adm_cmd_ptr_list {
168 + u32 cple; /* command ptr list entry */
169 + struct adm_desc_hw desc[0];
172 +struct adm_async_desc {
173 + struct virt_dma_desc vd;
174 + struct adm_device *adev;
177 + enum dma_transfer_direction dir;
178 + dma_addr_t dma_addr;
181 + struct adm_cmd_ptr_list *cpl;
186 + struct virt_dma_chan vc;
187 + struct adm_device *adev;
189 + /* parsed from DT */
190 + u32 id; /* channel id */
191 + u32 crci; /* CRCI to be used for transfers */
192 + u32 blk_size; /* block size for CRCI, default 16 byte */
194 + struct adm_async_desc *curr_txd;
195 + struct dma_slave_config slave;
196 + struct list_head node;
202 +static inline struct adm_chan *to_adm_chan(struct dma_chan *common)
204 + return container_of(common, struct adm_chan, vc.chan);
208 + void __iomem *regs;
209 + struct device *dev;
210 + struct dma_device common;
211 + struct device_dma_parameters dma_parms;
212 + struct adm_chan *channels;
217 + struct clk *core_clk;
218 + struct clk *iface_clk;
220 + struct reset_control *clk_reset;
221 + struct reset_control *c0_reset;
222 + struct reset_control *c1_reset;
223 + struct reset_control *c2_reset;
228 + * adm_alloc_chan - Allocates channel resources for DMA channel
230 + * This function is effectively a stub, as we don't need to setup any resources
232 +static int adm_alloc_chan(struct dma_chan *chan)
238 + * adm_free_chan - Frees dma resources associated with the specific channel
240 + * Free all allocated descriptors associated with this channel
243 +static void adm_free_chan(struct dma_chan *chan)
245 + /* free all queued descriptors */
246 + vchan_free_chan_resources(to_virt_chan(chan));
250 + * adm_prep_slave_sg - Prep slave sg transaction
252 + * @chan: dma channel
253 + * @sgl: scatter gather list
254 + * @sg_len: length of sg
255 + * @direction: DMA transfer direction
256 + * @flags: DMA flags
257 + * @context: transfer context (unused)
259 +static struct dma_async_tx_descriptor *adm_prep_slave_sg(struct dma_chan *chan,
260 + struct scatterlist *sgl, unsigned int sg_len,
261 + enum dma_transfer_direction direction, unsigned long flags,
264 + struct adm_chan *achan = to_adm_chan(chan);
265 + struct adm_device *adev = achan->adev;
266 + struct adm_async_desc *async_desc;
267 + struct scatterlist *sg;
268 + u32 i, rows, num_desc = 0, idx = 0, desc_offset;
269 + struct adm_desc_hw *desc;
270 + struct adm_cmd_ptr_list *cpl;
271 + u32 burst = ADM_MAX_XFER;
274 + if (!is_slave_direction(direction)) {
275 + dev_err(adev->dev, "invalid dma direction\n");
279 + /* if using CRCI flow control, validate burst settings */
280 + if (achan->slave.device_fc) {
281 + burst = (direction == DMA_MEM_TO_DEV) ?
282 + achan->slave.dst_maxburst :
283 + achan->slave.src_maxburst;
286 + dev_err(adev->dev, "invalid burst value w/ crci: %d\n",
288 + return ERR_PTR(-EINVAL);
292 + /* iterate through sgs and compute allocation size of structures */
293 + for_each_sg(sgl, sg, sg_len, i) {
295 + /* calculate boxes using burst */
296 + rows = DIV_ROUND_UP(sg_dma_len(sg), burst);
297 + num_desc += DIV_ROUND_UP(rows, ADM_MAX_ROWS);
299 + /* flow control requires length as a multiple of burst */
300 + if (achan->slave.device_fc && (sg_dma_len(sg) % burst)) {
301 + dev_err(adev->dev, "length is not multiple of burst\n");
302 + return ERR_PTR(-EINVAL);
306 + async_desc = kzalloc(sizeof(*async_desc), GFP_NOWAIT);
308 + return ERR_PTR(-ENOMEM);
310 + async_desc->dma_len = num_desc * sizeof(*desc) + sizeof(*cpl) +
312 + async_desc->cpl = dma_alloc_writecombine(adev->dev, async_desc->dma_len,
313 + &async_desc->dma_addr, GFP_NOWAIT);
315 + if (!async_desc->cpl) {
317 + return ERR_PTR(-ENOMEM);
320 + async_desc->num_desc = num_desc;
321 + async_desc->adev = adev;
322 + cpl = PTR_ALIGN(async_desc->cpl, ADM_DESC_ALIGN);
323 + desc = PTR_ALIGN(&cpl->desc[0], ADM_DESC_ALIGN);
324 + desc_offset = (u32)desc - (u32)async_desc->cpl;
326 + /* init cmd list */
327 + cpl->cple |= CPLE_LP;
328 + cpl->cple |= (async_desc->dma_addr + desc_offset) >> 3;
330 + for_each_sg(sgl, sg, sg_len, i) {
331 + unsigned int remainder = sg_dma_len(sg);
332 + unsigned int curr_offset = 0;
333 + unsigned int row_len;
336 + desc[idx].cmd = CMD_TYPE_BOX;
337 + desc[idx].row_offset = 0;
339 + if (direction == DMA_DEV_TO_MEM) {
340 + desc[idx].dst_addr = sg_dma_address(sg) +
342 + desc[idx].src_addr = achan->slave.src_addr;
343 + desc[idx].cmd |= CMD_SRC_CRCI(achan->crci);
344 + desc[idx].row_offset = burst;
346 + desc[idx].src_addr = sg_dma_address(sg) +
348 + desc[idx].dst_addr = achan->slave.dst_addr;
349 + desc[idx].cmd |= CMD_DST_CRCI(achan->crci);
350 + desc[idx].row_offset = burst << 16;
353 + if (remainder < burst) {
355 + row_len = remainder;
357 + rows = remainder / burst;
358 + rows = min_t(u32, rows, ADM_MAX_ROWS);
362 + desc[idx].num_rows = rows << 16 | rows;
363 + desc[idx].row_len = row_len << 16 | row_len;
365 + remainder -= row_len * rows;
366 + async_desc->length += row_len * rows;
367 + curr_offset += row_len * rows;
370 + } while (remainder > 0);
373 + /* set last command flag */
374 + desc[idx - 1].cmd |= CMD_LC;
376 + /* reset channel error */
379 + return vchan_tx_prep(&achan->vc, &async_desc->vd, flags);
383 + * adm_slave_config - set slave configuration for channel
384 + * @chan: dma channel
385 + * @cfg: slave configuration
387 + * Sets slave configuration for channel
390 +static int adm_slave_config(struct adm_chan *achan,
391 + struct dma_slave_config *cfg)
395 + struct adm_device *adev = achan->adev;
397 + memcpy(&achan->slave, cfg, sizeof(*cfg));
399 + /* set channel CRCI burst, if applicable */
401 + burst = max_t(u32, cfg->src_maxburst, cfg->dst_maxburst);
405 + achan->blk_size = 0;
408 + achan->blk_size = 1;
411 + achan->blk_size = 2;
414 + achan->blk_size = 3;
417 + achan->blk_size = 4;
420 + achan->blk_size = 5;
423 + achan->slave.src_maxburst = 0;
424 + achan->slave.dst_maxburst = 0;
430 + writel(achan->blk_size,
431 + adev->regs + HI_CRCI_CTL(achan->id, adev->ee));
438 + * adm_terminate_all - terminate all transactions on a channel
439 + * @achan: adm dma channel
441 + * Dequeues and frees all transactions, aborts current transaction
442 + * No callbacks are done
445 +static void adm_terminate_all(struct adm_chan *achan)
447 + struct adm_device *adev = achan->adev;
448 + unsigned long flags;
451 + /* send flush command to terminate current transaction */
452 + writel_relaxed(0x0,
453 + adev->regs + HI_CH_FLUSH_STATE0(achan->id, adev->ee));
455 + spin_lock_irqsave(&achan->vc.lock, flags);
456 + vchan_get_all_descriptors(&achan->vc, &head);
457 + spin_unlock_irqrestore(&achan->vc.lock, flags);
459 + vchan_dma_desc_free_list(&achan->vc, &head);
463 + * adm_control - DMA device control
464 + * @chan: dma channel
465 + * @cmd: control cmd
466 + * @arg: cmd argument
468 + * Perform DMA control command
471 +static int adm_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
474 + struct adm_chan *achan = to_adm_chan(chan);
475 + unsigned long flag;
479 + case DMA_SLAVE_CONFIG:
480 + spin_lock_irqsave(&achan->vc.lock, flag);
481 + ret = adm_slave_config(achan, (struct dma_slave_config *)arg);
482 + spin_unlock_irqrestore(&achan->vc.lock, flag);
485 + case DMA_TERMINATE_ALL:
486 + adm_terminate_all(achan);
498 + * adm_start_dma - start next transaction
499 + * @achan - ADM dma channel
501 +static void adm_start_dma(struct adm_chan *achan)
503 + struct virt_dma_desc *vd = vchan_next_desc(&achan->vc);
504 + struct adm_device *adev = achan->adev;
505 + struct adm_async_desc *async_desc;
506 + struct adm_desc_hw *desc;
507 + struct adm_cmd_ptr_list *cpl;
509 + lockdep_assert_held(&achan->vc.lock);
514 + list_del(&vd->node);
516 + /* write next command list out to the CMD FIFO */
517 + async_desc = container_of(vd, struct adm_async_desc, vd);
518 + achan->curr_txd = async_desc;
520 + cpl = PTR_ALIGN(async_desc->cpl, ADM_DESC_ALIGN);
521 + desc = PTR_ALIGN(&cpl->desc[0], ADM_DESC_ALIGN);
523 + if (!achan->initialized) {
524 + /* enable interrupts */
525 + writel(CH_CONF_IRQ_EN | CH_CONF_FLUSH_RSLT_EN |
526 + CH_CONF_FORCE_RSLT_EN | CH_CONF_PERM_MPU_CONF |
527 + CH_CONF_MPU_DISABLE,
528 + adev->regs + HI_CH_CONF(achan->id));
530 + writel(CH_RSLT_CONF_IRQ_EN | CH_RSLT_CONF_FLUSH_EN,
531 + adev->regs + HI_CH_RSLT_CONF(achan->id, adev->ee));
534 + writel(achan->blk_size, adev->regs +
535 + HI_CRCI_CTL(achan->crci, adev->ee));
537 + achan->initialized = 1;
540 + /* make sure IRQ enable doesn't get reordered */
543 + /* write next command list out to the CMD FIFO */
544 + writel(round_up(async_desc->dma_addr, ADM_DESC_ALIGN) >> 3,
545 + adev->regs + HI_CH_CMD_PTR(achan->id, adev->ee));
549 + * adm_dma_irq - irq handler for ADM controller
550 + * @irq: IRQ of interrupt
551 + * @data: callback data
553 + * IRQ handler for the bam controller
555 +static irqreturn_t adm_dma_irq(int irq, void *data)
557 + struct adm_device *adev = data;
559 + struct adm_async_desc *async_desc;
560 + unsigned long flags;
562 + srcs = readl_relaxed(adev->regs +
563 + HI_SEC_DOMAIN_IRQ_STATUS(adev->ee));
565 + for (i = 0; i < 16; i++) {
566 + struct adm_chan *achan = &adev->channels[i];
567 + u32 status, result;
568 + if (srcs & BIT(i)) {
569 + status = readl_relaxed(adev->regs +
570 + HI_CH_STATUS_SD(i, adev->ee));
572 + /* if no result present, skip */
573 + if (!(status & CH_STATUS_VALID))
576 + result = readl_relaxed(adev->regs +
577 + HI_CH_RSLT(i, adev->ee));
579 + /* no valid results, skip */
580 + if (!(result & CH_RSLT_VALID))
583 + /* flag error if transaction was flushed or failed */
584 + if (result & (CH_RSLT_ERR | CH_RSLT_FLUSH))
587 + spin_lock_irqsave(&achan->vc.lock, flags);
588 + async_desc = achan->curr_txd;
590 + achan->curr_txd = NULL;
593 + vchan_cookie_complete(&async_desc->vd);
595 + /* kick off next DMA */
596 + adm_start_dma(achan);
599 + spin_unlock_irqrestore(&achan->vc.lock, flags);
603 + return IRQ_HANDLED;
607 + * adm_tx_status - returns status of transaction
608 + * @chan: dma channel
609 + * @cookie: transaction cookie
610 + * @txstate: DMA transaction state
612 + * Return status of dma transaction
614 +static enum dma_status adm_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
615 + struct dma_tx_state *txstate)
617 + struct adm_chan *achan = to_adm_chan(chan);
618 + struct virt_dma_desc *vd;
619 + enum dma_status ret;
620 + unsigned long flags;
621 + size_t residue = 0;
623 + ret = dma_cookie_status(chan, cookie, txstate);
625 + spin_lock_irqsave(&achan->vc.lock, flags);
627 + vd = vchan_find_desc(&achan->vc, cookie);
629 + residue = container_of(vd, struct adm_async_desc, vd)->length;
630 + else if (achan->curr_txd && achan->curr_txd->vd.tx.cookie == cookie)
631 + residue = achan->curr_txd->length;
633 + spin_unlock_irqrestore(&achan->vc.lock, flags);
635 + dma_set_residue(txstate, residue);
643 +static struct dma_chan *adm_dma_xlate(struct of_phandle_args *dma_spec,
646 + struct adm_device *adev = container_of(of->of_dma_data,
647 + struct adm_device, common);
648 + struct adm_chan *achan;
649 + struct dma_chan *chan;
650 + unsigned int request;
653 + if (dma_spec->args_count != 2) {
654 + dev_err(adev->dev, "incorrect number of dma arguments\n");
658 + request = dma_spec->args[0];
659 + if (request >= adev->num_channels)
662 + crci = dma_spec->args[1];
664 + chan = dma_get_slave_channel(&(adev->channels[request].vc.chan));
669 + achan = to_adm_chan(chan);
670 + achan->crci = crci;
676 + * adm_issue_pending - starts pending transactions
677 + * @chan: dma channel
679 + * Issues all pending transactions and starts DMA
681 +static void adm_issue_pending(struct dma_chan *chan)
683 + struct adm_chan *achan = to_adm_chan(chan);
684 + unsigned long flags;
686 + spin_lock_irqsave(&achan->vc.lock, flags);
688 + if (vchan_issue_pending(&achan->vc) && !achan->curr_txd)
689 + adm_start_dma(achan);
690 + spin_unlock_irqrestore(&achan->vc.lock, flags);
694 + * adm_dma_free_desc - free descriptor memory
695 + * @vd: virtual descriptor
698 +static void adm_dma_free_desc(struct virt_dma_desc *vd)
700 + struct adm_async_desc *async_desc = container_of(vd,
701 + struct adm_async_desc, vd);
703 + dma_free_writecombine(async_desc->adev->dev, async_desc->dma_len,
704 + async_desc->cpl, async_desc->dma_addr);
708 +static void adm_channel_init(struct adm_device *adev, struct adm_chan *achan,
712 + achan->adev = adev;
714 + vchan_init(&achan->vc, &adev->common);
715 + achan->vc.desc_free = adm_dma_free_desc;
718 +static int adm_dma_probe(struct platform_device *pdev)
720 + struct adm_device *adev;
721 + struct resource *iores;
725 + adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL);
729 + adev->dev = &pdev->dev;
731 + iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
732 + adev->regs = devm_ioremap_resource(&pdev->dev, iores);
733 + if (IS_ERR(adev->regs))
734 + return PTR_ERR(adev->regs);
736 + adev->irq = platform_get_irq(pdev, 0);
740 + ret = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &adev->ee);
742 + dev_err(adev->dev, "Execution environment unspecified\n");
746 + adev->core_clk = devm_clk_get(adev->dev, "core");
747 + if (IS_ERR(adev->core_clk))
748 + return PTR_ERR(adev->core_clk);
750 + ret = clk_prepare_enable(adev->core_clk);
752 + dev_err(adev->dev, "failed to prepare/enable core clock\n");
756 + adev->iface_clk = devm_clk_get(adev->dev, "iface");
757 + if (IS_ERR(adev->iface_clk))
758 + return PTR_ERR(adev->iface_clk);
760 + ret = clk_prepare_enable(adev->iface_clk);
762 + dev_err(adev->dev, "failed to prepare/enable iface clock\n");
766 + adev->clk_reset = devm_reset_control_get(&pdev->dev, "clk");
767 + if (IS_ERR(adev->clk_reset)) {
768 + dev_err(adev->dev, "failed to get ADM0 reset\n");
769 + return PTR_ERR(adev->clk_reset);
772 + adev->c0_reset = devm_reset_control_get(&pdev->dev, "c0");
773 + if (IS_ERR(adev->c0_reset)) {
774 + dev_err(adev->dev, "failed to get ADM0 C0 reset\n");
775 + return PTR_ERR(adev->c0_reset);
778 + adev->c1_reset = devm_reset_control_get(&pdev->dev, "c1");
779 + if (IS_ERR(adev->c1_reset)) {
780 + dev_err(adev->dev, "failed to get ADM0 C1 reset\n");
781 + return PTR_ERR(adev->c1_reset);
784 + adev->c2_reset = devm_reset_control_get(&pdev->dev, "c2");
785 + if (IS_ERR(adev->c2_reset)) {
786 + dev_err(adev->dev, "failed to get ADM0 C2 reset\n");
787 + return PTR_ERR(adev->c2_reset);
790 + reset_control_assert(adev->clk_reset);
791 + reset_control_assert(adev->c0_reset);
792 + reset_control_assert(adev->c1_reset);
793 + reset_control_assert(adev->c2_reset);
795 + reset_control_deassert(adev->clk_reset);
796 + reset_control_deassert(adev->c0_reset);
797 + reset_control_deassert(adev->c1_reset);
798 + reset_control_deassert(adev->c2_reset);
800 + adev->num_channels = 16;
802 + adev->channels = devm_kcalloc(adev->dev, adev->num_channels,
803 + sizeof(*adev->channels), GFP_KERNEL);
805 + if (!adev->channels) {
807 + goto err_disable_clk;
810 + /* allocate and initialize channels */
811 + INIT_LIST_HEAD(&adev->common.channels);
813 + for (i = 0; i < adev->num_channels; i++)
814 + adm_channel_init(adev, &adev->channels[i], i);
817 + for (i = 0; i < 16; i++)
818 + writel(CRCI_CTL_RST, adev->regs + HI_CRCI_CTL(i, adev->ee));
820 + /* configure client interfaces */
821 + writel(CI_RANGE_START(0x40) | CI_RANGE_END(0xb0) | CI_BURST_8_WORDS,
822 + adev->regs + HI_CI_CONF(0));
823 + writel(CI_RANGE_START(0x2a) | CI_RANGE_END(0x2c) | CI_BURST_8_WORDS,
824 + adev->regs + HI_CI_CONF(1));
825 + writel(CI_RANGE_START(0x12) | CI_RANGE_END(0x28) | CI_BURST_8_WORDS,
826 + adev->regs + HI_CI_CONF(2));
827 + writel(GP_CTL_LP_EN | GP_CTL_LP_CNT(0xf), adev->regs + HI_GP_CTL);
829 + ret = devm_request_irq(adev->dev, adev->irq, adm_dma_irq,
830 + 0, "adm_dma", adev);
832 + goto err_disable_clk;
834 + platform_set_drvdata(pdev, adev);
836 + adev->common.dev = adev->dev;
837 + adev->common.dev->dma_parms = &adev->dma_parms;
839 + /* set capabilities */
840 + dma_cap_zero(adev->common.cap_mask);
841 + dma_cap_set(DMA_SLAVE, adev->common.cap_mask);
842 + dma_cap_set(DMA_PRIVATE, adev->common.cap_mask);
844 + /* initialize dmaengine apis */
845 + adev->common.device_alloc_chan_resources = adm_alloc_chan;
846 + adev->common.device_free_chan_resources = adm_free_chan;
847 + adev->common.device_prep_slave_sg = adm_prep_slave_sg;
848 + adev->common.device_control = adm_control;
849 + adev->common.device_issue_pending = adm_issue_pending;
850 + adev->common.device_tx_status = adm_tx_status;
852 + ret = dma_async_device_register(&adev->common);
854 + dev_err(adev->dev, "failed to register dma async device\n");
855 + goto err_disable_clk;
858 + ret = of_dma_controller_register(pdev->dev.of_node, adm_dma_xlate,
861 + goto err_unregister_dma;
866 + dma_async_device_unregister(&adev->common);
868 + clk_disable_unprepare(adev->core_clk);
869 + clk_disable_unprepare(adev->iface_clk);
874 +static int adm_dma_remove(struct platform_device *pdev)
876 + struct adm_device *adev = platform_get_drvdata(pdev);
877 + struct adm_chan *achan;
880 + of_dma_controller_free(pdev->dev.of_node);
881 + dma_async_device_unregister(&adev->common);
883 + devm_free_irq(adev->dev, adev->irq, adev);
885 + for (i = 0; i < adev->num_channels; i++) {
886 + achan = &adev->channels[i];
887 + writel(CH_CONF_FLUSH_RSLT_EN,
888 + adev->regs + HI_CH_CONF(achan->id));
889 + writel(CH_RSLT_CONF_FLUSH_EN,
890 + adev->regs + HI_CH_RSLT_CONF(achan->id, adev->ee));
892 + adm_terminate_all(&adev->channels[i]);
895 + clk_disable_unprepare(adev->core_clk);
896 + clk_disable_unprepare(adev->iface_clk);
901 +static const struct of_device_id adm_of_match[] = {
902 + { .compatible = "qcom,adm", },
905 +MODULE_DEVICE_TABLE(of, adm_of_match);
907 +static struct platform_driver adm_dma_driver = {
908 + .probe = adm_dma_probe,
909 + .remove = adm_dma_remove,
911 + .name = "adm-dma-engine",
912 + .owner = THIS_MODULE,
913 + .of_match_table = adm_of_match,
917 +module_platform_driver(adm_dma_driver);
919 +MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>");
920 +MODULE_DESCRIPTION("QCOM ADM DMA engine driver");
921 +MODULE_LICENSE("GPL v2");