1 From 5c9f8c2dbdbe53818bcde6aa6695e1331e5f841f Mon Sep 17 00:00:00 2001
2 From: Jonathan McDowell <noodles@earth.li>
3 Date: Sat, 14 Nov 2020 14:02:33 +0000
4 Subject: dmaengine: qcom: Add ADM driver
6 Add the DMA engine driver for the QCOM Application Data Mover (ADM) DMA
7 controller found in the MSM8x60 and IPQ/APQ8064 platforms.
9 The ADM supports both memory to memory transactions and memory
10 to/from peripheral device transactions. The controller also provides
11 flow control capabilities for transactions to/from peripheral devices.
13 The initial release of this driver supports slave transfers to/from
14 peripherals and also incorporates CRCI (client rate control interface)
17 The hardware only supports a 32 bit physical address, so specifying
18 !PHYS_ADDR_T_64BIT gives maximum COMPILE_TEST coverage without having to
19 spend effort on kludging things in the code that will never actually be
20 needed on real hardware.
22 Signed-off-by: Andy Gross <agross@codeaurora.org>
23 Signed-off-by: Thomas Pedersen <twp@codeaurora.org>
24 Signed-off-by: Jonathan McDowell <noodles@earth.li>
25 Link: https://lore.kernel.org/r/20201114140233.GM32650@earth.li
26 Signed-off-by: Vinod Koul <vkoul@kernel.org>
28 drivers/dma/qcom/Kconfig | 11 +
29 drivers/dma/qcom/Makefile | 1 +
30 drivers/dma/qcom/qcom_adm.c | 903 ++++++++++++++++++++++++++++++++++++++++++++
31 3 files changed, 915 insertions(+)
32 create mode 100644 drivers/dma/qcom/qcom_adm.c
34 --- a/drivers/dma/qcom/Kconfig
35 +++ b/drivers/dma/qcom/Kconfig
37 # SPDX-License-Identifier: GPL-2.0-only
39 + tristate "Qualcomm ADM support"
40 + depends on (ARCH_QCOM || COMPILE_TEST) && !PHYS_ADDR_T_64BIT
42 + select DMA_VIRTUAL_CHANNELS
44 + Enable support for the Qualcomm Application Data Mover (ADM) DMA
45 + controller, as present on MSM8x60, APQ8064, and IPQ8064 devices.
46 + This controller provides DMA capabilities for both general purpose
47 + and on-chip peripheral devices.
50 tristate "QCOM BAM DMA support"
51 depends on ARCH_QCOM || (COMPILE_TEST && OF && ARM)
52 --- a/drivers/dma/qcom/Makefile
53 +++ b/drivers/dma/qcom/Makefile
55 # SPDX-License-Identifier: GPL-2.0
56 +obj-$(CONFIG_QCOM_ADM) += qcom_adm.o
57 obj-$(CONFIG_QCOM_BAM_DMA) += bam_dma.o
58 obj-$(CONFIG_QCOM_HIDMA_MGMT) += hdma_mgmt.o
59 hdma_mgmt-objs := hidma_mgmt.o hidma_mgmt_sys.o
61 +++ b/drivers/dma/qcom/qcom_adm.c
63 +// SPDX-License-Identifier: GPL-2.0-only
65 + * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
68 +#include <linux/clk.h>
69 +#include <linux/delay.h>
70 +#include <linux/device.h>
71 +#include <linux/dmaengine.h>
72 +#include <linux/dma-mapping.h>
73 +#include <linux/init.h>
74 +#include <linux/interrupt.h>
75 +#include <linux/io.h>
76 +#include <linux/kernel.h>
77 +#include <linux/module.h>
78 +#include <linux/of.h>
79 +#include <linux/of_address.h>
80 +#include <linux/of_irq.h>
81 +#include <linux/of_dma.h>
82 +#include <linux/platform_device.h>
83 +#include <linux/reset.h>
84 +#include <linux/scatterlist.h>
85 +#include <linux/slab.h>
87 +#include "../dmaengine.h"
88 +#include "../virt-dma.h"
90 +/* ADM registers - calculated from channel number and security domain */
91 +#define ADM_CHAN_MULTI 0x4
92 +#define ADM_CI_MULTI 0x4
93 +#define ADM_CRCI_MULTI 0x4
94 +#define ADM_EE_MULTI 0x800
95 +#define ADM_CHAN_OFFS(chan) (ADM_CHAN_MULTI * (chan))
96 +#define ADM_EE_OFFS(ee) (ADM_EE_MULTI * (ee))
97 +#define ADM_CHAN_EE_OFFS(chan, ee) (ADM_CHAN_OFFS(chan) + ADM_EE_OFFS(ee))
98 +#define ADM_CHAN_OFFS(chan) (ADM_CHAN_MULTI * (chan))
99 +#define ADM_CI_OFFS(ci) (ADM_CHAN_OFF(ci))
100 +#define ADM_CH_CMD_PTR(chan, ee) (ADM_CHAN_EE_OFFS(chan, ee))
101 +#define ADM_CH_RSLT(chan, ee) (0x40 + ADM_CHAN_EE_OFFS(chan, ee))
102 +#define ADM_CH_FLUSH_STATE0(chan, ee) (0x80 + ADM_CHAN_EE_OFFS(chan, ee))
103 +#define ADM_CH_STATUS_SD(chan, ee) (0x200 + ADM_CHAN_EE_OFFS(chan, ee))
104 +#define ADM_CH_CONF(chan) (0x240 + ADM_CHAN_OFFS(chan))
105 +#define ADM_CH_RSLT_CONF(chan, ee) (0x300 + ADM_CHAN_EE_OFFS(chan, ee))
106 +#define ADM_SEC_DOMAIN_IRQ_STATUS(ee) (0x380 + ADM_EE_OFFS(ee))
107 +#define ADM_CI_CONF(ci) (0x390 + (ci) * ADM_CI_MULTI)
108 +#define ADM_GP_CTL 0x3d8
109 +#define ADM_CRCI_CTL(crci, ee) (0x400 + (crci) * ADM_CRCI_MULTI + \
112 +/* channel status */
113 +#define ADM_CH_STATUS_VALID BIT(1)
115 +/* channel result */
116 +#define ADM_CH_RSLT_VALID BIT(31)
117 +#define ADM_CH_RSLT_ERR BIT(3)
118 +#define ADM_CH_RSLT_FLUSH BIT(2)
119 +#define ADM_CH_RSLT_TPD BIT(1)
122 +#define ADM_CH_CONF_SHADOW_EN BIT(12)
123 +#define ADM_CH_CONF_MPU_DISABLE BIT(11)
124 +#define ADM_CH_CONF_PERM_MPU_CONF BIT(9)
125 +#define ADM_CH_CONF_FORCE_RSLT_EN BIT(7)
126 +#define ADM_CH_CONF_SEC_DOMAIN(ee) ((((ee) & 0x3) << 4) | (((ee) & 0x4) << 11))
128 +/* channel result conf */
129 +#define ADM_CH_RSLT_CONF_FLUSH_EN BIT(1)
130 +#define ADM_CH_RSLT_CONF_IRQ_EN BIT(0)
133 +#define ADM_CRCI_CTL_MUX_SEL BIT(18)
134 +#define ADM_CRCI_CTL_RST BIT(17)
136 +/* CI configuration */
137 +#define ADM_CI_RANGE_END(x) ((x) << 24)
138 +#define ADM_CI_RANGE_START(x) ((x) << 16)
139 +#define ADM_CI_BURST_4_WORDS BIT(2)
140 +#define ADM_CI_BURST_8_WORDS BIT(3)
143 +#define ADM_GP_CTL_LP_EN BIT(12)
144 +#define ADM_GP_CTL_LP_CNT(x) ((x) << 8)
146 +/* Command pointer list entry */
147 +#define ADM_CPLE_LP BIT(31)
148 +#define ADM_CPLE_CMD_PTR_LIST BIT(29)
150 +/* Command list entry */
151 +#define ADM_CMD_LC BIT(31)
152 +#define ADM_CMD_DST_CRCI(n) (((n) & 0xf) << 7)
153 +#define ADM_CMD_SRC_CRCI(n) (((n) & 0xf) << 3)
155 +#define ADM_CMD_TYPE_SINGLE 0x0
156 +#define ADM_CMD_TYPE_BOX 0x3
158 +#define ADM_CRCI_MUX_SEL BIT(4)
159 +#define ADM_DESC_ALIGN 8
160 +#define ADM_MAX_XFER (SZ_64K - 1)
161 +#define ADM_MAX_ROWS (SZ_64K - 1)
162 +#define ADM_MAX_CHANNELS 16
164 +struct adm_desc_hw_box {
173 +struct adm_desc_hw_single {
180 +struct adm_async_desc {
181 + struct virt_dma_desc vd;
182 + struct adm_device *adev;
185 + enum dma_transfer_direction dir;
186 + dma_addr_t dma_addr;
190 + dma_addr_t cp_addr;
197 + struct virt_dma_chan vc;
198 + struct adm_device *adev;
200 + /* parsed from DT */
201 + u32 id; /* channel id */
203 + struct adm_async_desc *curr_txd;
204 + struct dma_slave_config slave;
205 + struct list_head node;
211 +static inline struct adm_chan *to_adm_chan(struct dma_chan *common)
213 + return container_of(common, struct adm_chan, vc.chan);
217 + void __iomem *regs;
218 + struct device *dev;
219 + struct dma_device common;
220 + struct device_dma_parameters dma_parms;
221 + struct adm_chan *channels;
225 + struct clk *core_clk;
226 + struct clk *iface_clk;
228 + struct reset_control *clk_reset;
229 + struct reset_control *c0_reset;
230 + struct reset_control *c1_reset;
231 + struct reset_control *c2_reset;
236 + * adm_free_chan - Frees dma resources associated with the specific channel
238 + * Free all allocated descriptors associated with this channel
241 +static void adm_free_chan(struct dma_chan *chan)
243 + /* free all queued descriptors */
244 + vchan_free_chan_resources(to_virt_chan(chan));
248 + * adm_get_blksize - Get block size from burst value
251 +static int adm_get_blksize(unsigned int burst)
260 + ret = ffs(burst >> 4) - 1;
277 + * adm_process_fc_descriptors - Process descriptors for flow controlled xfers
279 + * @achan: ADM channel
280 + * @desc: Descriptor memory pointer
281 + * @sg: Scatterlist entry
282 + * @crci: CRCI value
283 + * @burst: Burst size of transaction
284 + * @direction: DMA transfer direction
286 +static void *adm_process_fc_descriptors(struct adm_chan *achan, void *desc,
287 + struct scatterlist *sg, u32 crci,
289 + enum dma_transfer_direction direction)
291 + struct adm_desc_hw_box *box_desc = NULL;
292 + struct adm_desc_hw_single *single_desc;
293 + u32 remainder = sg_dma_len(sg);
294 + u32 rows, row_offset, crci_cmd;
295 + u32 mem_addr = sg_dma_address(sg);
296 + u32 *incr_addr = &mem_addr;
299 + if (direction == DMA_DEV_TO_MEM) {
300 + crci_cmd = ADM_CMD_SRC_CRCI(crci);
301 + row_offset = burst;
302 + src = &achan->slave.src_addr;
305 + crci_cmd = ADM_CMD_DST_CRCI(crci);
306 + row_offset = burst << 16;
308 + dst = &achan->slave.dst_addr;
311 + while (remainder >= burst) {
313 + box_desc->cmd = ADM_CMD_TYPE_BOX | crci_cmd;
314 + box_desc->row_offset = row_offset;
315 + box_desc->src_addr = *src;
316 + box_desc->dst_addr = *dst;
318 + rows = remainder / burst;
319 + rows = min_t(u32, rows, ADM_MAX_ROWS);
320 + box_desc->num_rows = rows << 16 | rows;
321 + box_desc->row_len = burst << 16 | burst;
323 + *incr_addr += burst * rows;
324 + remainder -= burst * rows;
325 + desc += sizeof(*box_desc);
328 + /* if leftover bytes, do one single descriptor */
330 + single_desc = desc;
331 + single_desc->cmd = ADM_CMD_TYPE_SINGLE | crci_cmd;
332 + single_desc->len = remainder;
333 + single_desc->src_addr = *src;
334 + single_desc->dst_addr = *dst;
335 + desc += sizeof(*single_desc);
337 + if (sg_is_last(sg))
338 + single_desc->cmd |= ADM_CMD_LC;
340 + if (box_desc && sg_is_last(sg))
341 + box_desc->cmd |= ADM_CMD_LC;
348 + * adm_process_non_fc_descriptors - Process descriptors for non-fc xfers
350 + * @achan: ADM channel
351 + * @desc: Descriptor memory pointer
352 + * @sg: Scatterlist entry
353 + * @direction: DMA transfer direction
355 +static void *adm_process_non_fc_descriptors(struct adm_chan *achan, void *desc,
356 + struct scatterlist *sg,
357 + enum dma_transfer_direction direction)
359 + struct adm_desc_hw_single *single_desc;
360 + u32 remainder = sg_dma_len(sg);
361 + u32 mem_addr = sg_dma_address(sg);
362 + u32 *incr_addr = &mem_addr;
365 + if (direction == DMA_DEV_TO_MEM) {
366 + src = &achan->slave.src_addr;
370 + dst = &achan->slave.dst_addr;
374 + single_desc = desc;
375 + single_desc->cmd = ADM_CMD_TYPE_SINGLE;
376 + single_desc->src_addr = *src;
377 + single_desc->dst_addr = *dst;
378 + single_desc->len = (remainder > ADM_MAX_XFER) ?
379 + ADM_MAX_XFER : remainder;
381 + remainder -= single_desc->len;
382 + *incr_addr += single_desc->len;
383 + desc += sizeof(*single_desc);
384 + } while (remainder);
386 + /* set last command if this is the end of the whole transaction */
387 + if (sg_is_last(sg))
388 + single_desc->cmd |= ADM_CMD_LC;
394 + * adm_prep_slave_sg - Prep slave sg transaction
396 + * @chan: dma channel
397 + * @sgl: scatter gather list
398 + * @sg_len: length of sg
399 + * @direction: DMA transfer direction
400 + * @flags: DMA flags
401 + * @context: transfer context (unused)
403 +static struct dma_async_tx_descriptor *adm_prep_slave_sg(struct dma_chan *chan,
404 + struct scatterlist *sgl,
405 + unsigned int sg_len,
406 + enum dma_transfer_direction direction,
407 + unsigned long flags,
410 + struct adm_chan *achan = to_adm_chan(chan);
411 + struct adm_device *adev = achan->adev;
412 + struct adm_async_desc *async_desc;
413 + struct scatterlist *sg;
414 + dma_addr_t cple_addr;
416 + u32 single_count = 0, box_count = 0, crci = 0;
421 + if (!is_slave_direction(direction)) {
422 + dev_err(adev->dev, "invalid dma direction\n");
427 + * get burst value from slave configuration
429 + burst = (direction == DMA_MEM_TO_DEV) ?
430 + achan->slave.dst_maxburst :
431 + achan->slave.src_maxburst;
433 + /* if using flow control, validate burst and crci values */
434 + if (achan->slave.device_fc) {
435 + blk_size = adm_get_blksize(burst);
436 + if (blk_size < 0) {
437 + dev_err(adev->dev, "invalid burst value: %d\n",
439 + return ERR_PTR(-EINVAL);
442 + crci = achan->slave.slave_id & 0xf;
443 + if (!crci || achan->slave.slave_id > 0x1f) {
444 + dev_err(adev->dev, "invalid crci value\n");
445 + return ERR_PTR(-EINVAL);
449 + /* iterate through sgs and compute allocation size of structures */
450 + for_each_sg(sgl, sg, sg_len, i) {
451 + if (achan->slave.device_fc) {
452 + box_count += DIV_ROUND_UP(sg_dma_len(sg) / burst,
454 + if (sg_dma_len(sg) % burst)
457 + single_count += DIV_ROUND_UP(sg_dma_len(sg),
462 + async_desc = kzalloc(sizeof(*async_desc), GFP_NOWAIT);
464 + return ERR_PTR(-ENOMEM);
467 + async_desc->mux = achan->slave.slave_id & ADM_CRCI_MUX_SEL ?
468 + ADM_CRCI_CTL_MUX_SEL : 0;
469 + async_desc->crci = crci;
470 + async_desc->blk_size = blk_size;
471 + async_desc->dma_len = single_count * sizeof(struct adm_desc_hw_single) +
472 + box_count * sizeof(struct adm_desc_hw_box) +
473 + sizeof(*cple) + 2 * ADM_DESC_ALIGN;
475 + async_desc->cpl = kzalloc(async_desc->dma_len, GFP_NOWAIT);
476 + if (!async_desc->cpl)
479 + async_desc->adev = adev;
481 + /* both command list entry and descriptors must be 8 byte aligned */
482 + cple = PTR_ALIGN(async_desc->cpl, ADM_DESC_ALIGN);
483 + desc = PTR_ALIGN(cple + 1, ADM_DESC_ALIGN);
485 + for_each_sg(sgl, sg, sg_len, i) {
486 + async_desc->length += sg_dma_len(sg);
488 + if (achan->slave.device_fc)
489 + desc = adm_process_fc_descriptors(achan, desc, sg, crci,
492 + desc = adm_process_non_fc_descriptors(achan, desc, sg,
496 + async_desc->dma_addr = dma_map_single(adev->dev, async_desc->cpl,
497 + async_desc->dma_len,
499 + if (dma_mapping_error(adev->dev, async_desc->dma_addr))
502 + cple_addr = async_desc->dma_addr + ((void *)cple - async_desc->cpl);
504 + /* init cmd list */
505 + dma_sync_single_for_cpu(adev->dev, cple_addr, sizeof(*cple),
507 + *cple = ADM_CPLE_LP;
508 + *cple |= (async_desc->dma_addr + ADM_DESC_ALIGN) >> 3;
509 + dma_sync_single_for_device(adev->dev, cple_addr, sizeof(*cple),
512 + return vchan_tx_prep(&achan->vc, &async_desc->vd, flags);
516 + return ERR_PTR(-ENOMEM);
520 + * adm_terminate_all - terminate all transactions on a channel
521 + * @achan: adm dma channel
523 + * Dequeues and frees all transactions, aborts current transaction
524 + * No callbacks are done
527 +static int adm_terminate_all(struct dma_chan *chan)
529 + struct adm_chan *achan = to_adm_chan(chan);
530 + struct adm_device *adev = achan->adev;
531 + unsigned long flags;
534 + spin_lock_irqsave(&achan->vc.lock, flags);
535 + vchan_get_all_descriptors(&achan->vc, &head);
537 + /* send flush command to terminate current transaction */
538 + writel_relaxed(0x0,
539 + adev->regs + ADM_CH_FLUSH_STATE0(achan->id, adev->ee));
541 + spin_unlock_irqrestore(&achan->vc.lock, flags);
543 + vchan_dma_desc_free_list(&achan->vc, &head);
548 +static int adm_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg)
550 + struct adm_chan *achan = to_adm_chan(chan);
551 + unsigned long flag;
553 + spin_lock_irqsave(&achan->vc.lock, flag);
554 + memcpy(&achan->slave, cfg, sizeof(struct dma_slave_config));
555 + spin_unlock_irqrestore(&achan->vc.lock, flag);
561 + * adm_start_dma - start next transaction
562 + * @achan - ADM dma channel
564 +static void adm_start_dma(struct adm_chan *achan)
566 + struct virt_dma_desc *vd = vchan_next_desc(&achan->vc);
567 + struct adm_device *adev = achan->adev;
568 + struct adm_async_desc *async_desc;
570 + lockdep_assert_held(&achan->vc.lock);
575 + list_del(&vd->node);
577 + /* write next command list out to the CMD FIFO */
578 + async_desc = container_of(vd, struct adm_async_desc, vd);
579 + achan->curr_txd = async_desc;
581 + /* reset channel error */
584 + if (!achan->initialized) {
585 + /* enable interrupts */
586 + writel(ADM_CH_CONF_SHADOW_EN |
587 + ADM_CH_CONF_PERM_MPU_CONF |
588 + ADM_CH_CONF_MPU_DISABLE |
589 + ADM_CH_CONF_SEC_DOMAIN(adev->ee),
590 + adev->regs + ADM_CH_CONF(achan->id));
592 + writel(ADM_CH_RSLT_CONF_IRQ_EN | ADM_CH_RSLT_CONF_FLUSH_EN,
593 + adev->regs + ADM_CH_RSLT_CONF(achan->id, adev->ee));
595 + achan->initialized = 1;
598 + /* set the crci block size if this transaction requires CRCI */
599 + if (async_desc->crci) {
600 + writel(async_desc->mux | async_desc->blk_size,
601 + adev->regs + ADM_CRCI_CTL(async_desc->crci, adev->ee));
604 + /* make sure IRQ enable doesn't get reordered */
607 + /* write next command list out to the CMD FIFO */
608 + writel(ALIGN(async_desc->dma_addr, ADM_DESC_ALIGN) >> 3,
609 + adev->regs + ADM_CH_CMD_PTR(achan->id, adev->ee));
613 + * adm_dma_irq - irq handler for ADM controller
614 + * @irq: IRQ of interrupt
615 + * @data: callback data
617 + * IRQ handler for the bam controller
619 +static irqreturn_t adm_dma_irq(int irq, void *data)
621 + struct adm_device *adev = data;
623 + struct adm_async_desc *async_desc;
624 + unsigned long flags;
626 + srcs = readl_relaxed(adev->regs +
627 + ADM_SEC_DOMAIN_IRQ_STATUS(adev->ee));
629 + for (i = 0; i < ADM_MAX_CHANNELS; i++) {
630 + struct adm_chan *achan = &adev->channels[i];
631 + u32 status, result;
633 + if (srcs & BIT(i)) {
634 + status = readl_relaxed(adev->regs +
635 + ADM_CH_STATUS_SD(i, adev->ee));
637 + /* if no result present, skip */
638 + if (!(status & ADM_CH_STATUS_VALID))
641 + result = readl_relaxed(adev->regs +
642 + ADM_CH_RSLT(i, adev->ee));
644 + /* no valid results, skip */
645 + if (!(result & ADM_CH_RSLT_VALID))
648 + /* flag error if transaction was flushed or failed */
649 + if (result & (ADM_CH_RSLT_ERR | ADM_CH_RSLT_FLUSH))
652 + spin_lock_irqsave(&achan->vc.lock, flags);
653 + async_desc = achan->curr_txd;
655 + achan->curr_txd = NULL;
658 + vchan_cookie_complete(&async_desc->vd);
660 + /* kick off next DMA */
661 + adm_start_dma(achan);
664 + spin_unlock_irqrestore(&achan->vc.lock, flags);
668 + return IRQ_HANDLED;
672 + * adm_tx_status - returns status of transaction
673 + * @chan: dma channel
674 + * @cookie: transaction cookie
675 + * @txstate: DMA transaction state
677 + * Return status of dma transaction
679 +static enum dma_status adm_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
680 + struct dma_tx_state *txstate)
682 + struct adm_chan *achan = to_adm_chan(chan);
683 + struct virt_dma_desc *vd;
684 + enum dma_status ret;
685 + unsigned long flags;
686 + size_t residue = 0;
688 + ret = dma_cookie_status(chan, cookie, txstate);
689 + if (ret == DMA_COMPLETE || !txstate)
692 + spin_lock_irqsave(&achan->vc.lock, flags);
694 + vd = vchan_find_desc(&achan->vc, cookie);
696 + residue = container_of(vd, struct adm_async_desc, vd)->length;
698 + spin_unlock_irqrestore(&achan->vc.lock, flags);
701 + * residue is either the full length if it is in the issued list, or 0
702 + * if it is in progress. We have no reliable way of determining
703 + * anything inbetween
705 + dma_set_residue(txstate, residue);
714 + * adm_issue_pending - starts pending transactions
715 + * @chan: dma channel
717 + * Issues all pending transactions and starts DMA
719 +static void adm_issue_pending(struct dma_chan *chan)
721 + struct adm_chan *achan = to_adm_chan(chan);
722 + unsigned long flags;
724 + spin_lock_irqsave(&achan->vc.lock, flags);
726 + if (vchan_issue_pending(&achan->vc) && !achan->curr_txd)
727 + adm_start_dma(achan);
728 + spin_unlock_irqrestore(&achan->vc.lock, flags);
732 + * adm_dma_free_desc - free descriptor memory
733 + * @vd: virtual descriptor
736 +static void adm_dma_free_desc(struct virt_dma_desc *vd)
738 + struct adm_async_desc *async_desc = container_of(vd,
739 + struct adm_async_desc, vd);
741 + dma_unmap_single(async_desc->adev->dev, async_desc->dma_addr,
742 + async_desc->dma_len, DMA_TO_DEVICE);
743 + kfree(async_desc->cpl);
747 +static void adm_channel_init(struct adm_device *adev, struct adm_chan *achan,
751 + achan->adev = adev;
753 + vchan_init(&achan->vc, &adev->common);
754 + achan->vc.desc_free = adm_dma_free_desc;
757 +static int adm_dma_probe(struct platform_device *pdev)
759 + struct adm_device *adev;
763 + adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL);
767 + adev->dev = &pdev->dev;
769 + adev->regs = devm_platform_ioremap_resource(pdev, 0);
770 + if (IS_ERR(adev->regs))
771 + return PTR_ERR(adev->regs);
773 + adev->irq = platform_get_irq(pdev, 0);
777 + ret = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &adev->ee);
779 + dev_err(adev->dev, "Execution environment unspecified\n");
783 + adev->core_clk = devm_clk_get(adev->dev, "core");
784 + if (IS_ERR(adev->core_clk))
785 + return PTR_ERR(adev->core_clk);
787 + adev->iface_clk = devm_clk_get(adev->dev, "iface");
788 + if (IS_ERR(adev->iface_clk))
789 + return PTR_ERR(adev->iface_clk);
791 + adev->clk_reset = devm_reset_control_get_exclusive(&pdev->dev, "clk");
792 + if (IS_ERR(adev->clk_reset)) {
793 + dev_err(adev->dev, "failed to get ADM0 reset\n");
794 + return PTR_ERR(adev->clk_reset);
797 + adev->c0_reset = devm_reset_control_get_exclusive(&pdev->dev, "c0");
798 + if (IS_ERR(adev->c0_reset)) {
799 + dev_err(adev->dev, "failed to get ADM0 C0 reset\n");
800 + return PTR_ERR(adev->c0_reset);
803 + adev->c1_reset = devm_reset_control_get_exclusive(&pdev->dev, "c1");
804 + if (IS_ERR(adev->c1_reset)) {
805 + dev_err(adev->dev, "failed to get ADM0 C1 reset\n");
806 + return PTR_ERR(adev->c1_reset);
809 + adev->c2_reset = devm_reset_control_get_exclusive(&pdev->dev, "c2");
810 + if (IS_ERR(adev->c2_reset)) {
811 + dev_err(adev->dev, "failed to get ADM0 C2 reset\n");
812 + return PTR_ERR(adev->c2_reset);
815 + ret = clk_prepare_enable(adev->core_clk);
817 + dev_err(adev->dev, "failed to prepare/enable core clock\n");
821 + ret = clk_prepare_enable(adev->iface_clk);
823 + dev_err(adev->dev, "failed to prepare/enable iface clock\n");
824 + goto err_disable_core_clk;
827 + reset_control_assert(adev->clk_reset);
828 + reset_control_assert(adev->c0_reset);
829 + reset_control_assert(adev->c1_reset);
830 + reset_control_assert(adev->c2_reset);
834 + reset_control_deassert(adev->clk_reset);
835 + reset_control_deassert(adev->c0_reset);
836 + reset_control_deassert(adev->c1_reset);
837 + reset_control_deassert(adev->c2_reset);
839 + adev->channels = devm_kcalloc(adev->dev, ADM_MAX_CHANNELS,
840 + sizeof(*adev->channels), GFP_KERNEL);
842 + if (!adev->channels) {
844 + goto err_disable_clks;
847 + /* allocate and initialize channels */
848 + INIT_LIST_HEAD(&adev->common.channels);
850 + for (i = 0; i < ADM_MAX_CHANNELS; i++)
851 + adm_channel_init(adev, &adev->channels[i], i);
854 + for (i = 0; i < 16; i++)
855 + writel(ADM_CRCI_CTL_RST, adev->regs +
856 + ADM_CRCI_CTL(i, adev->ee));
858 + /* configure client interfaces */
859 + writel(ADM_CI_RANGE_START(0x40) | ADM_CI_RANGE_END(0xb0) |
860 + ADM_CI_BURST_8_WORDS, adev->regs + ADM_CI_CONF(0));
861 + writel(ADM_CI_RANGE_START(0x2a) | ADM_CI_RANGE_END(0x2c) |
862 + ADM_CI_BURST_8_WORDS, adev->regs + ADM_CI_CONF(1));
863 + writel(ADM_CI_RANGE_START(0x12) | ADM_CI_RANGE_END(0x28) |
864 + ADM_CI_BURST_8_WORDS, adev->regs + ADM_CI_CONF(2));
865 + writel(ADM_GP_CTL_LP_EN | ADM_GP_CTL_LP_CNT(0xf),
866 + adev->regs + ADM_GP_CTL);
868 + ret = devm_request_irq(adev->dev, adev->irq, adm_dma_irq,
869 + 0, "adm_dma", adev);
871 + goto err_disable_clks;
873 + platform_set_drvdata(pdev, adev);
875 + adev->common.dev = adev->dev;
876 + adev->common.dev->dma_parms = &adev->dma_parms;
878 + /* set capabilities */
879 + dma_cap_zero(adev->common.cap_mask);
880 + dma_cap_set(DMA_SLAVE, adev->common.cap_mask);
881 + dma_cap_set(DMA_PRIVATE, adev->common.cap_mask);
883 + /* initialize dmaengine apis */
884 + adev->common.directions = BIT(DMA_DEV_TO_MEM | DMA_MEM_TO_DEV);
885 + adev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
886 + adev->common.src_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
887 + adev->common.dst_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
888 + adev->common.device_free_chan_resources = adm_free_chan;
889 + adev->common.device_prep_slave_sg = adm_prep_slave_sg;
890 + adev->common.device_issue_pending = adm_issue_pending;
891 + adev->common.device_tx_status = adm_tx_status;
892 + adev->common.device_terminate_all = adm_terminate_all;
893 + adev->common.device_config = adm_slave_config;
895 + ret = dma_async_device_register(&adev->common);
897 + dev_err(adev->dev, "failed to register dma async device\n");
898 + goto err_disable_clks;
901 + ret = of_dma_controller_register(pdev->dev.of_node,
902 + of_dma_xlate_by_chan_id,
905 + goto err_unregister_dma;
910 + dma_async_device_unregister(&adev->common);
912 + clk_disable_unprepare(adev->iface_clk);
913 +err_disable_core_clk:
914 + clk_disable_unprepare(adev->core_clk);
919 +static int adm_dma_remove(struct platform_device *pdev)
921 + struct adm_device *adev = platform_get_drvdata(pdev);
922 + struct adm_chan *achan;
925 + of_dma_controller_free(pdev->dev.of_node);
926 + dma_async_device_unregister(&adev->common);
928 + for (i = 0; i < ADM_MAX_CHANNELS; i++) {
929 + achan = &adev->channels[i];
931 + /* mask IRQs for this channel/EE pair */
932 + writel(0, adev->regs + ADM_CH_RSLT_CONF(achan->id, adev->ee));
934 + tasklet_kill(&adev->channels[i].vc.task);
935 + adm_terminate_all(&adev->channels[i].vc.chan);
938 + devm_free_irq(adev->dev, adev->irq, adev);
940 + clk_disable_unprepare(adev->core_clk);
941 + clk_disable_unprepare(adev->iface_clk);
946 +static const struct of_device_id adm_of_match[] = {
947 + { .compatible = "qcom,adm", },
950 +MODULE_DEVICE_TABLE(of, adm_of_match);
952 +static struct platform_driver adm_dma_driver = {
953 + .probe = adm_dma_probe,
954 + .remove = adm_dma_remove,
956 + .name = "adm-dma-engine",
957 + .of_match_table = adm_of_match,
961 +module_platform_driver(adm_dma_driver);
963 +MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>");
964 +MODULE_DESCRIPTION("QCOM ADM DMA engine driver");
965 +MODULE_LICENSE("GPL v2");