1 From 23dd43da9c885789b3d5aceed3e401345f8e8106 Mon Sep 17 00:00:00 2001
2 From: Andy Gross <agross@codeaurora.org>
3 Date: Sat, 29 Mar 2014 18:53:16 +0530
4 Subject: [PATCH 045/182] dmaengine: add Qualcomm BAM dma driver
6 Add the DMA engine driver for the QCOM Bus Access Manager (BAM) DMA controller
7 found in the MSM 8x74 platforms.
9 Each BAM DMA device is associated with a specific on-chip peripheral. Each
10 channel provides a uni-directional data transfer engine that is capable of
11 transferring data between the peripheral and system memory (System mode), or
12 between two peripherals (BAM2BAM).
14 The initial release of this driver only supports slave transfers between
15 peripherals and system memory.
17 Signed-off-by: Andy Gross <agross@codeaurora.org>
18 Tested-by: Stanimir Varbanov <svarbanov@mm-sol.com>
19 Signed-off-by: Vinod Koul <vinod.koul@intel.com>
21 drivers/dma/Kconfig | 9 +
22 drivers/dma/Makefile | 2 +
23 drivers/dma/qcom_bam_dma.c | 1111 ++++++++++++++++++++++++++++++++++++++++++++
24 3 files changed, 1122 insertions(+)
25 create mode 100644 drivers/dma/qcom_bam_dma.c
27 --- a/drivers/dma/Kconfig
28 +++ b/drivers/dma/Kconfig
29 @@ -401,4 +401,13 @@ config DMATEST
30 config DMA_ENGINE_RAID
34 + tristate "QCOM BAM DMA support"
35 + depends on ARCH_QCOM || (COMPILE_TEST && OF && ARM)
37 + select DMA_VIRTUAL_CHANNELS
39 + Enable support for the QCOM BAM DMA controller. This controller
40 + provides DMA capabilities for a variety of on-chip devices.
43 --- a/drivers/dma/Makefile
44 +++ b/drivers/dma/Makefile
45 @@ -44,3 +44,5 @@ obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
46 obj-$(CONFIG_TI_CPPI41) += cppi41.o
47 obj-$(CONFIG_K3_DMA) += k3dma.o
48 obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
49 +obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
50 +obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o
52 +++ b/drivers/dma/qcom_bam_dma.c
55 + * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
57 + * This program is free software; you can redistribute it and/or modify
58 + * it under the terms of the GNU General Public License version 2 and
59 + * only version 2 as published by the Free Software Foundation.
61 + * This program is distributed in the hope that it will be useful,
62 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
63 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
64 + * GNU General Public License for more details.
68 + * QCOM BAM DMA engine driver
70 + * QCOM BAM DMA blocks are distributed amongst a number of the on-chip
71 + * peripherals on the MSM 8x74. The configuration of the channels are dependent
72 + * on the way they are hard wired to that specific peripheral. The peripheral
73 + * device tree entries specify the configuration of each channel.
75 + * The DMA controller requires the use of external memory for storage of the
76 + * hardware descriptors for each channel. The descriptor FIFO is accessed as a
77 + * circular buffer and operations are managed according to the offset within the
78 + * FIFO. After pipe/channel reset, all of the pipe registers and internal state
79 + * are back to defaults.
81 + * During DMA operations, we write descriptors to the FIFO, being careful to
82 + * handle wrapping and then write the last FIFO offset to that channel's
83 + * P_EVNT_REG register to kick off the transaction. The P_SW_OFSTS register
84 + * indicates the current FIFO offset that is being processed, so there is some
85 + * indication of where the hardware is currently working.
88 +#include <linux/kernel.h>
89 +#include <linux/io.h>
90 +#include <linux/init.h>
91 +#include <linux/slab.h>
92 +#include <linux/module.h>
93 +#include <linux/interrupt.h>
94 +#include <linux/dma-mapping.h>
95 +#include <linux/scatterlist.h>
96 +#include <linux/device.h>
97 +#include <linux/platform_device.h>
98 +#include <linux/of.h>
99 +#include <linux/of_address.h>
100 +#include <linux/of_irq.h>
101 +#include <linux/of_dma.h>
102 +#include <linux/clk.h>
103 +#include <linux/dmaengine.h>
105 +#include "dmaengine.h"
106 +#include "virt-dma.h"
108 +struct bam_desc_hw {
109 + u32 addr; /* Buffer physical address */
110 + u16 size; /* Buffer size in bytes */
114 +#define DESC_FLAG_INT BIT(15)
115 +#define DESC_FLAG_EOT BIT(14)
116 +#define DESC_FLAG_EOB BIT(13)
118 +struct bam_async_desc {
119 + struct virt_dma_desc vd;
123 + struct bam_desc_hw *curr_desc;
125 + enum dma_transfer_direction dir;
127 + struct bam_desc_hw desc[0];
130 +#define BAM_CTRL 0x0000
131 +#define BAM_REVISION 0x0004
132 +#define BAM_SW_REVISION 0x0080
133 +#define BAM_NUM_PIPES 0x003C
134 +#define BAM_TIMER 0x0040
135 +#define BAM_TIMER_CTRL 0x0044
136 +#define BAM_DESC_CNT_TRSHLD 0x0008
137 +#define BAM_IRQ_SRCS 0x000C
138 +#define BAM_IRQ_SRCS_MSK 0x0010
139 +#define BAM_IRQ_SRCS_UNMASKED 0x0030
140 +#define BAM_IRQ_STTS 0x0014
141 +#define BAM_IRQ_CLR 0x0018
142 +#define BAM_IRQ_EN 0x001C
143 +#define BAM_CNFG_BITS 0x007C
144 +#define BAM_IRQ_SRCS_EE(ee) (0x0800 + ((ee) * 0x80))
145 +#define BAM_IRQ_SRCS_MSK_EE(ee) (0x0804 + ((ee) * 0x80))
146 +#define BAM_P_CTRL(pipe) (0x1000 + ((pipe) * 0x1000))
147 +#define BAM_P_RST(pipe) (0x1004 + ((pipe) * 0x1000))
148 +#define BAM_P_HALT(pipe) (0x1008 + ((pipe) * 0x1000))
149 +#define BAM_P_IRQ_STTS(pipe) (0x1010 + ((pipe) * 0x1000))
150 +#define BAM_P_IRQ_CLR(pipe) (0x1014 + ((pipe) * 0x1000))
151 +#define BAM_P_IRQ_EN(pipe) (0x1018 + ((pipe) * 0x1000))
152 +#define BAM_P_EVNT_DEST_ADDR(pipe) (0x182C + ((pipe) * 0x1000))
153 +#define BAM_P_EVNT_REG(pipe) (0x1818 + ((pipe) * 0x1000))
154 +#define BAM_P_SW_OFSTS(pipe) (0x1800 + ((pipe) * 0x1000))
155 +#define BAM_P_DATA_FIFO_ADDR(pipe) (0x1824 + ((pipe) * 0x1000))
156 +#define BAM_P_DESC_FIFO_ADDR(pipe) (0x181C + ((pipe) * 0x1000))
157 +#define BAM_P_EVNT_TRSHLD(pipe) (0x1828 + ((pipe) * 0x1000))
158 +#define BAM_P_FIFO_SIZES(pipe) (0x1820 + ((pipe) * 0x1000))
161 +#define BAM_SW_RST BIT(0)
162 +#define BAM_EN BIT(1)
163 +#define BAM_EN_ACCUM BIT(4)
164 +#define BAM_TESTBUS_SEL_SHIFT 5
165 +#define BAM_TESTBUS_SEL_MASK 0x3F
166 +#define BAM_DESC_CACHE_SEL_SHIFT 13
167 +#define BAM_DESC_CACHE_SEL_MASK 0x3
168 +#define BAM_CACHED_DESC_STORE BIT(15)
169 +#define IBC_DISABLE BIT(16)
172 +#define REVISION_SHIFT 0
173 +#define REVISION_MASK 0xFF
174 +#define NUM_EES_SHIFT 8
175 +#define NUM_EES_MASK 0xF
176 +#define CE_BUFFER_SIZE BIT(13)
177 +#define AXI_ACTIVE BIT(14)
178 +#define USE_VMIDMT BIT(15)
179 +#define SECURED BIT(16)
180 +#define BAM_HAS_NO_BYPASS BIT(17)
181 +#define HIGH_FREQUENCY_BAM BIT(18)
182 +#define INACTIV_TMRS_EXST BIT(19)
183 +#define NUM_INACTIV_TMRS BIT(20)
184 +#define DESC_CACHE_DEPTH_SHIFT 21
185 +#define DESC_CACHE_DEPTH_1 (0 << DESC_CACHE_DEPTH_SHIFT)
186 +#define DESC_CACHE_DEPTH_2 (1 << DESC_CACHE_DEPTH_SHIFT)
187 +#define DESC_CACHE_DEPTH_3 (2 << DESC_CACHE_DEPTH_SHIFT)
188 +#define DESC_CACHE_DEPTH_4 (3 << DESC_CACHE_DEPTH_SHIFT)
189 +#define CMD_DESC_EN BIT(23)
190 +#define INACTIV_TMR_BASE_SHIFT 24
191 +#define INACTIV_TMR_BASE_MASK 0xFF
194 +#define BAM_NUM_PIPES_SHIFT 0
195 +#define BAM_NUM_PIPES_MASK 0xFF
196 +#define PERIPH_NON_PIPE_GRP_SHIFT 16
197 +#define PERIPH_NON_PIP_GRP_MASK 0xFF
198 +#define BAM_NON_PIPE_GRP_SHIFT 24
199 +#define BAM_NON_PIPE_GRP_MASK 0xFF
202 +#define BAM_PIPE_CNFG BIT(2)
203 +#define BAM_FULL_PIPE BIT(11)
204 +#define BAM_NO_EXT_P_RST BIT(12)
205 +#define BAM_IBC_DISABLE BIT(13)
206 +#define BAM_SB_CLK_REQ BIT(14)
207 +#define BAM_PSM_CSW_REQ BIT(15)
208 +#define BAM_PSM_P_RES BIT(16)
209 +#define BAM_AU_P_RES BIT(17)
210 +#define BAM_SI_P_RES BIT(18)
211 +#define BAM_WB_P_RES BIT(19)
212 +#define BAM_WB_BLK_CSW BIT(20)
213 +#define BAM_WB_CSW_ACK_IDL BIT(21)
214 +#define BAM_WB_RETR_SVPNT BIT(22)
215 +#define BAM_WB_DSC_AVL_P_RST BIT(23)
216 +#define BAM_REG_P_EN BIT(24)
217 +#define BAM_PSM_P_HD_DATA BIT(25)
218 +#define BAM_AU_ACCUMED BIT(26)
219 +#define BAM_CMD_ENABLE BIT(27)
221 +#define BAM_CNFG_BITS_DEFAULT (BAM_PIPE_CNFG | \
222 + BAM_NO_EXT_P_RST | \
223 + BAM_IBC_DISABLE | \
225 + BAM_PSM_CSW_REQ | \
231 + BAM_WB_CSW_ACK_IDL | \
232 + BAM_WB_RETR_SVPNT | \
233 + BAM_WB_DSC_AVL_P_RST | \
235 + BAM_PSM_P_HD_DATA | \
241 +#define P_DIRECTION BIT(3)
242 +#define P_SYS_STRM BIT(4)
243 +#define P_SYS_MODE BIT(5)
244 +#define P_AUTO_EOB BIT(6)
245 +#define P_AUTO_EOB_SEL_SHIFT 7
246 +#define P_AUTO_EOB_SEL_512 (0 << P_AUTO_EOB_SEL_SHIFT)
247 +#define P_AUTO_EOB_SEL_256 (1 << P_AUTO_EOB_SEL_SHIFT)
248 +#define P_AUTO_EOB_SEL_128 (2 << P_AUTO_EOB_SEL_SHIFT)
249 +#define P_AUTO_EOB_SEL_64 (3 << P_AUTO_EOB_SEL_SHIFT)
250 +#define P_PREFETCH_LIMIT_SHIFT 9
251 +#define P_PREFETCH_LIMIT_32 (0 << P_PREFETCH_LIMIT_SHIFT)
252 +#define P_PREFETCH_LIMIT_16 (1 << P_PREFETCH_LIMIT_SHIFT)
253 +#define P_PREFETCH_LIMIT_4 (2 << P_PREFETCH_LIMIT_SHIFT)
254 +#define P_WRITE_NWD BIT(11)
255 +#define P_LOCK_GROUP_SHIFT 16
256 +#define P_LOCK_GROUP_MASK 0x1F
258 +/* BAM_DESC_CNT_TRSHLD */
259 +#define CNT_TRSHLD 0xffff
260 +#define DEFAULT_CNT_THRSHLD 0x4
263 +#define BAM_IRQ BIT(31)
264 +#define P_IRQ 0x7fffffff
266 +/* BAM_IRQ_SRCS_MSK */
267 +#define BAM_IRQ_MSK BAM_IRQ
268 +#define P_IRQ_MSK P_IRQ
271 +#define BAM_TIMER_IRQ BIT(4)
272 +#define BAM_EMPTY_IRQ BIT(3)
273 +#define BAM_ERROR_IRQ BIT(2)
274 +#define BAM_HRESP_ERR_IRQ BIT(1)
277 +#define BAM_TIMER_CLR BIT(4)
278 +#define BAM_EMPTY_CLR BIT(3)
279 +#define BAM_ERROR_CLR BIT(2)
280 +#define BAM_HRESP_ERR_CLR BIT(1)
283 +#define BAM_TIMER_EN BIT(4)
284 +#define BAM_EMPTY_EN BIT(3)
285 +#define BAM_ERROR_EN BIT(2)
286 +#define BAM_HRESP_ERR_EN BIT(1)
289 +#define P_PRCSD_DESC_EN BIT(0)
290 +#define P_TIMER_EN BIT(1)
291 +#define P_WAKE_EN BIT(2)
292 +#define P_OUT_OF_DESC_EN BIT(3)
293 +#define P_ERR_EN BIT(4)
294 +#define P_TRNSFR_END_EN BIT(5)
295 +#define P_DEFAULT_IRQS_EN (P_PRCSD_DESC_EN | P_ERR_EN | P_TRNSFR_END_EN)
297 +/* BAM_P_SW_OFSTS */
298 +#define P_SW_OFSTS_MASK 0xffff
300 +#define BAM_DESC_FIFO_SIZE SZ_32K
301 +#define MAX_DESCRIPTORS (BAM_DESC_FIFO_SIZE / sizeof(struct bam_desc_hw) - 1)
302 +#define BAM_MAX_DATA_SIZE (SZ_32K - 8)
305 + struct virt_dma_chan vc;
307 + struct bam_device *bdev;
309 + /* configuration from device tree */
312 + struct bam_async_desc *curr_txd; /* current running dma */
314 + /* runtime configuration */
315 + struct dma_slave_config slave;
318 + struct bam_desc_hw *fifo_virt;
319 + dma_addr_t fifo_phys;
322 + unsigned short head; /* start of active descriptor entries */
323 + unsigned short tail; /* end of active descriptor entries */
325 + unsigned int initialized; /* is the channel hw initialized? */
326 + unsigned int paused; /* is the channel paused? */
327 + unsigned int reconfigure; /* new slave config? */
329 + struct list_head node;
332 +static inline struct bam_chan *to_bam_chan(struct dma_chan *common)
334 + return container_of(common, struct bam_chan, vc.chan);
338 + void __iomem *regs;
339 + struct device *dev;
340 + struct dma_device common;
341 + struct device_dma_parameters dma_parms;
342 + struct bam_chan *channels;
345 + /* execution environment ID, from DT */
348 + struct clk *bamclk;
351 + /* dma start transaction tasklet */
352 + struct tasklet_struct task;
356 + * bam_reset_channel - Reset individual BAM DMA channel
357 + * @bchan: bam channel
359 + * This function resets a specific BAM channel
361 +static void bam_reset_channel(struct bam_chan *bchan)
363 + struct bam_device *bdev = bchan->bdev;
365 + lockdep_assert_held(&bchan->vc.lock);
367 + /* reset channel */
368 + writel_relaxed(1, bdev->regs + BAM_P_RST(bchan->id));
369 + writel_relaxed(0, bdev->regs + BAM_P_RST(bchan->id));
371 + /* don't allow cpu to reorder BAM register accesses done after this */
374 + /* make sure hw is initialized when channel is used the first time */
375 + bchan->initialized = 0;
379 + * bam_chan_init_hw - Initialize channel hardware
380 + * @bchan: bam channel
382 + * This function resets and initializes the BAM channel
384 +static void bam_chan_init_hw(struct bam_chan *bchan,
385 + enum dma_transfer_direction dir)
387 + struct bam_device *bdev = bchan->bdev;
390 + /* Reset the channel to clear internal state of the FIFO */
391 + bam_reset_channel(bchan);
394 + * write out 8 byte aligned address. We have enough space for this
395 + * because we allocated 1 more descriptor (8 bytes) than we can use
397 + writel_relaxed(ALIGN(bchan->fifo_phys, sizeof(struct bam_desc_hw)),
398 + bdev->regs + BAM_P_DESC_FIFO_ADDR(bchan->id));
399 + writel_relaxed(BAM_DESC_FIFO_SIZE, bdev->regs +
400 + BAM_P_FIFO_SIZES(bchan->id));
402 + /* enable the per pipe interrupts, enable EOT, ERR, and INT irqs */
403 + writel_relaxed(P_DEFAULT_IRQS_EN, bdev->regs + BAM_P_IRQ_EN(bchan->id));
405 + /* unmask the specific pipe and EE combo */
406 + val = readl_relaxed(bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
407 + val |= BIT(bchan->id);
408 + writel_relaxed(val, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
410 + /* don't allow cpu to reorder the channel enable done below */
413 + /* set fixed direction and mode, then enable channel */
414 + val = P_EN | P_SYS_MODE;
415 + if (dir == DMA_DEV_TO_MEM)
416 + val |= P_DIRECTION;
418 + writel_relaxed(val, bdev->regs + BAM_P_CTRL(bchan->id));
420 + bchan->initialized = 1;
422 + /* init FIFO pointers */
428 + * bam_alloc_chan - Allocate channel resources for DMA channel.
429 + * @chan: specified channel
431 + * This function allocates the FIFO descriptor memory
433 +static int bam_alloc_chan(struct dma_chan *chan)
435 + struct bam_chan *bchan = to_bam_chan(chan);
436 + struct bam_device *bdev = bchan->bdev;
438 + if (bchan->fifo_virt)
441 + /* allocate FIFO descriptor space, but only if necessary */
442 + bchan->fifo_virt = dma_alloc_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE,
443 + &bchan->fifo_phys, GFP_KERNEL);
445 + if (!bchan->fifo_virt) {
446 + dev_err(bdev->dev, "Failed to allocate desc fifo\n");
454 + * bam_free_chan - Frees dma resources associated with specific channel
455 + * @chan: specified channel
457 + * Free the allocated fifo descriptor memory and channel resources
460 +static void bam_free_chan(struct dma_chan *chan)
462 + struct bam_chan *bchan = to_bam_chan(chan);
463 + struct bam_device *bdev = bchan->bdev;
465 + unsigned long flags;
467 + vchan_free_chan_resources(to_virt_chan(chan));
469 + if (bchan->curr_txd) {
470 + dev_err(bchan->bdev->dev, "Cannot free busy channel\n");
474 + spin_lock_irqsave(&bchan->vc.lock, flags);
475 + bam_reset_channel(bchan);
476 + spin_unlock_irqrestore(&bchan->vc.lock, flags);
478 + dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE, bchan->fifo_virt,
480 + bchan->fifo_virt = NULL;
482 + /* mask irq for pipe/channel */
483 + val = readl_relaxed(bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
484 + val &= ~BIT(bchan->id);
485 + writel_relaxed(val, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
488 + writel_relaxed(0, bdev->regs + BAM_P_IRQ_EN(bchan->id));
492 + * bam_slave_config - set slave configuration for channel
493 + * @chan: dma channel
494 + * @cfg: slave configuration
496 + * Sets slave configuration for channel
499 +static void bam_slave_config(struct bam_chan *bchan,
500 + struct dma_slave_config *cfg)
502 + memcpy(&bchan->slave, cfg, sizeof(*cfg));
503 + bchan->reconfigure = 1;
507 + * bam_prep_slave_sg - Prep slave sg transaction
509 + * @chan: dma channel
510 + * @sgl: scatter gather list
511 + * @sg_len: length of sg
512 + * @direction: DMA transfer direction
513 + * @flags: DMA flags
514 + * @context: transfer context (unused)
516 +static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
517 + struct scatterlist *sgl, unsigned int sg_len,
518 + enum dma_transfer_direction direction, unsigned long flags,
521 + struct bam_chan *bchan = to_bam_chan(chan);
522 + struct bam_device *bdev = bchan->bdev;
523 + struct bam_async_desc *async_desc;
524 + struct scatterlist *sg;
526 + struct bam_desc_hw *desc;
527 + unsigned int num_alloc = 0;
530 + if (!is_slave_direction(direction)) {
531 + dev_err(bdev->dev, "invalid dma direction\n");
535 + /* calculate number of required entries */
536 + for_each_sg(sgl, sg, sg_len, i)
537 + num_alloc += DIV_ROUND_UP(sg_dma_len(sg), BAM_MAX_DATA_SIZE);
539 + /* allocate enough room to accomodate the number of entries */
540 + async_desc = kzalloc(sizeof(*async_desc) +
541 + (num_alloc * sizeof(struct bam_desc_hw)), GFP_NOWAIT);
546 + async_desc->num_desc = num_alloc;
547 + async_desc->curr_desc = async_desc->desc;
548 + async_desc->dir = direction;
550 + /* fill in temporary descriptors */
551 + desc = async_desc->desc;
552 + for_each_sg(sgl, sg, sg_len, i) {
553 + unsigned int remainder = sg_dma_len(sg);
554 + unsigned int curr_offset = 0;
557 + desc->addr = sg_dma_address(sg) + curr_offset;
559 + if (remainder > BAM_MAX_DATA_SIZE) {
560 + desc->size = BAM_MAX_DATA_SIZE;
561 + remainder -= BAM_MAX_DATA_SIZE;
562 + curr_offset += BAM_MAX_DATA_SIZE;
564 + desc->size = remainder;
568 + async_desc->length += desc->size;
570 + } while (remainder > 0);
573 + return vchan_tx_prep(&bchan->vc, &async_desc->vd, flags);
581 + * bam_dma_terminate_all - terminate all transactions on a channel
582 + * @bchan: bam dma channel
584 + * Dequeues and frees all transactions
585 + * No callbacks are done
588 +static void bam_dma_terminate_all(struct bam_chan *bchan)
590 + unsigned long flag;
593 + /* remove all transactions, including active transaction */
594 + spin_lock_irqsave(&bchan->vc.lock, flag);
595 + if (bchan->curr_txd) {
596 + list_add(&bchan->curr_txd->vd.node, &bchan->vc.desc_issued);
597 + bchan->curr_txd = NULL;
600 + vchan_get_all_descriptors(&bchan->vc, &head);
601 + spin_unlock_irqrestore(&bchan->vc.lock, flag);
603 + vchan_dma_desc_free_list(&bchan->vc, &head);
607 + * bam_control - DMA device control
608 + * @chan: dma channel
609 + * @cmd: control cmd
610 + * @arg: cmd argument
612 + * Perform DMA control command
615 +static int bam_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
618 + struct bam_chan *bchan = to_bam_chan(chan);
619 + struct bam_device *bdev = bchan->bdev;
621 + unsigned long flag;
625 + spin_lock_irqsave(&bchan->vc.lock, flag);
626 + writel_relaxed(1, bdev->regs + BAM_P_HALT(bchan->id));
628 + spin_unlock_irqrestore(&bchan->vc.lock, flag);
632 + spin_lock_irqsave(&bchan->vc.lock, flag);
633 + writel_relaxed(0, bdev->regs + BAM_P_HALT(bchan->id));
635 + spin_unlock_irqrestore(&bchan->vc.lock, flag);
638 + case DMA_TERMINATE_ALL:
639 + bam_dma_terminate_all(bchan);
642 + case DMA_SLAVE_CONFIG:
643 + spin_lock_irqsave(&bchan->vc.lock, flag);
644 + bam_slave_config(bchan, (struct dma_slave_config *)arg);
645 + spin_unlock_irqrestore(&bchan->vc.lock, flag);
657 + * process_channel_irqs - processes the channel interrupts
658 + * @bdev: bam controller
660 + * This function processes the channel interrupts
663 +static u32 process_channel_irqs(struct bam_device *bdev)
665 + u32 i, srcs, pipe_stts;
666 + unsigned long flags;
667 + struct bam_async_desc *async_desc;
669 + srcs = readl_relaxed(bdev->regs + BAM_IRQ_SRCS_EE(bdev->ee));
671 + /* return early if no pipe/channel interrupts are present */
672 + if (!(srcs & P_IRQ))
675 + for (i = 0; i < bdev->num_channels; i++) {
676 + struct bam_chan *bchan = &bdev->channels[i];
678 + if (!(srcs & BIT(i)))
681 + /* clear pipe irq */
682 + pipe_stts = readl_relaxed(bdev->regs +
683 + BAM_P_IRQ_STTS(i));
685 + writel_relaxed(pipe_stts, bdev->regs +
688 + spin_lock_irqsave(&bchan->vc.lock, flags);
689 + async_desc = bchan->curr_txd;
692 + async_desc->num_desc -= async_desc->xfer_len;
693 + async_desc->curr_desc += async_desc->xfer_len;
694 + bchan->curr_txd = NULL;
697 + bchan->head += async_desc->xfer_len;
698 + bchan->head %= MAX_DESCRIPTORS;
701 + * if complete, process cookie. Otherwise
702 + * push back to front of desc_issued so that
703 + * it gets restarted by the tasklet
705 + if (!async_desc->num_desc)
706 + vchan_cookie_complete(&async_desc->vd);
708 + list_add(&async_desc->vd.node,
709 + &bchan->vc.desc_issued);
712 + spin_unlock_irqrestore(&bchan->vc.lock, flags);
719 + * bam_dma_irq - irq handler for bam controller
720 + * @irq: IRQ of interrupt
721 + * @data: callback data
723 + * IRQ handler for the bam controller
725 +static irqreturn_t bam_dma_irq(int irq, void *data)
727 + struct bam_device *bdev = data;
728 + u32 clr_mask = 0, srcs = 0;
730 + srcs |= process_channel_irqs(bdev);
732 + /* kick off tasklet to start next dma transfer */
734 + tasklet_schedule(&bdev->task);
736 + if (srcs & BAM_IRQ)
737 + clr_mask = readl_relaxed(bdev->regs + BAM_IRQ_STTS);
739 + /* don't allow reorder of the various accesses to the BAM registers */
742 + writel_relaxed(clr_mask, bdev->regs + BAM_IRQ_CLR);
744 + return IRQ_HANDLED;
748 + * bam_tx_status - returns status of transaction
749 + * @chan: dma channel
750 + * @cookie: transaction cookie
751 + * @txstate: DMA transaction state
753 + * Return status of dma transaction
755 +static enum dma_status bam_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
756 + struct dma_tx_state *txstate)
758 + struct bam_chan *bchan = to_bam_chan(chan);
759 + struct virt_dma_desc *vd;
761 + size_t residue = 0;
763 + unsigned long flags;
765 + ret = dma_cookie_status(chan, cookie, txstate);
766 + if (ret == DMA_COMPLETE)
770 + return bchan->paused ? DMA_PAUSED : ret;
772 + spin_lock_irqsave(&bchan->vc.lock, flags);
773 + vd = vchan_find_desc(&bchan->vc, cookie);
775 + residue = container_of(vd, struct bam_async_desc, vd)->length;
776 + else if (bchan->curr_txd && bchan->curr_txd->vd.tx.cookie == cookie)
777 + for (i = 0; i < bchan->curr_txd->num_desc; i++)
778 + residue += bchan->curr_txd->curr_desc[i].size;
780 + spin_unlock_irqrestore(&bchan->vc.lock, flags);
782 + dma_set_residue(txstate, residue);
784 + if (ret == DMA_IN_PROGRESS && bchan->paused)
791 + * bam_apply_new_config
792 + * @bchan: bam dma channel
793 + * @dir: DMA direction
795 +static void bam_apply_new_config(struct bam_chan *bchan,
796 + enum dma_transfer_direction dir)
798 + struct bam_device *bdev = bchan->bdev;
801 + if (dir == DMA_DEV_TO_MEM)
802 + maxburst = bchan->slave.src_maxburst;
804 + maxburst = bchan->slave.dst_maxburst;
806 + writel_relaxed(maxburst, bdev->regs + BAM_DESC_CNT_TRSHLD);
808 + bchan->reconfigure = 0;
812 + * bam_start_dma - start next transaction
813 + * @bchan - bam dma channel
815 +static void bam_start_dma(struct bam_chan *bchan)
817 + struct virt_dma_desc *vd = vchan_next_desc(&bchan->vc);
818 + struct bam_device *bdev = bchan->bdev;
819 + struct bam_async_desc *async_desc;
820 + struct bam_desc_hw *desc;
821 + struct bam_desc_hw *fifo = PTR_ALIGN(bchan->fifo_virt,
822 + sizeof(struct bam_desc_hw));
824 + lockdep_assert_held(&bchan->vc.lock);
829 + list_del(&vd->node);
831 + async_desc = container_of(vd, struct bam_async_desc, vd);
832 + bchan->curr_txd = async_desc;
834 + /* on first use, initialize the channel hardware */
835 + if (!bchan->initialized)
836 + bam_chan_init_hw(bchan, async_desc->dir);
838 + /* apply new slave config changes, if necessary */
839 + if (bchan->reconfigure)
840 + bam_apply_new_config(bchan, async_desc->dir);
842 + desc = bchan->curr_txd->curr_desc;
844 + if (async_desc->num_desc > MAX_DESCRIPTORS)
845 + async_desc->xfer_len = MAX_DESCRIPTORS;
847 + async_desc->xfer_len = async_desc->num_desc;
849 + /* set INT on last descriptor */
850 + desc[async_desc->xfer_len - 1].flags |= DESC_FLAG_INT;
852 + if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) {
853 + u32 partial = MAX_DESCRIPTORS - bchan->tail;
855 + memcpy(&fifo[bchan->tail], desc,
856 + partial * sizeof(struct bam_desc_hw));
857 + memcpy(fifo, &desc[partial], (async_desc->xfer_len - partial) *
858 + sizeof(struct bam_desc_hw));
860 + memcpy(&fifo[bchan->tail], desc,
861 + async_desc->xfer_len * sizeof(struct bam_desc_hw));
864 + bchan->tail += async_desc->xfer_len;
865 + bchan->tail %= MAX_DESCRIPTORS;
867 + /* ensure descriptor writes and dma start not reordered */
869 + writel_relaxed(bchan->tail * sizeof(struct bam_desc_hw),
870 + bdev->regs + BAM_P_EVNT_REG(bchan->id));
874 + * dma_tasklet - DMA IRQ tasklet
875 + * @data: tasklet argument (bam controller structure)
877 + * Sets up next DMA operation and then processes all completed transactions
879 +static void dma_tasklet(unsigned long data)
881 + struct bam_device *bdev = (struct bam_device *)data;
882 + struct bam_chan *bchan;
883 + unsigned long flags;
886 + /* go through the channels and kick off transactions */
887 + for (i = 0; i < bdev->num_channels; i++) {
888 + bchan = &bdev->channels[i];
889 + spin_lock_irqsave(&bchan->vc.lock, flags);
891 + if (!list_empty(&bchan->vc.desc_issued) && !bchan->curr_txd)
892 + bam_start_dma(bchan);
893 + spin_unlock_irqrestore(&bchan->vc.lock, flags);
898 + * bam_issue_pending - starts pending transactions
899 + * @chan: dma channel
901 + * Calls tasklet directly which in turn starts any pending transactions
903 +static void bam_issue_pending(struct dma_chan *chan)
905 + struct bam_chan *bchan = to_bam_chan(chan);
906 + unsigned long flags;
908 + spin_lock_irqsave(&bchan->vc.lock, flags);
910 + /* if work pending and idle, start a transaction */
911 + if (vchan_issue_pending(&bchan->vc) && !bchan->curr_txd)
912 + bam_start_dma(bchan);
914 + spin_unlock_irqrestore(&bchan->vc.lock, flags);
918 + * bam_dma_free_desc - free descriptor memory
919 + * @vd: virtual descriptor
922 +static void bam_dma_free_desc(struct virt_dma_desc *vd)
924 + struct bam_async_desc *async_desc = container_of(vd,
925 + struct bam_async_desc, vd);
930 +static struct dma_chan *bam_dma_xlate(struct of_phandle_args *dma_spec,
933 + struct bam_device *bdev = container_of(of->of_dma_data,
934 + struct bam_device, common);
935 + unsigned int request;
937 + if (dma_spec->args_count != 1)
940 + request = dma_spec->args[0];
941 + if (request >= bdev->num_channels)
944 + return dma_get_slave_channel(&(bdev->channels[request].vc.chan));
949 + * @bdev: bam device
951 + * Initialization helper for global bam registers
953 +static int bam_init(struct bam_device *bdev)
957 + /* read revision and configuration information */
958 + val = readl_relaxed(bdev->regs + BAM_REVISION) >> NUM_EES_SHIFT;
959 + val &= NUM_EES_MASK;
961 + /* check that configured EE is within range */
962 + if (bdev->ee >= val)
965 + val = readl_relaxed(bdev->regs + BAM_NUM_PIPES);
966 + bdev->num_channels = val & BAM_NUM_PIPES_MASK;
968 + /* s/w reset bam */
969 + /* after reset all pipes are disabled and idle */
970 + val = readl_relaxed(bdev->regs + BAM_CTRL);
972 + writel_relaxed(val, bdev->regs + BAM_CTRL);
973 + val &= ~BAM_SW_RST;
974 + writel_relaxed(val, bdev->regs + BAM_CTRL);
976 + /* make sure previous stores are visible before enabling BAM */
981 + writel_relaxed(val, bdev->regs + BAM_CTRL);
983 + /* set descriptor threshhold, start with 4 bytes */
984 + writel_relaxed(DEFAULT_CNT_THRSHLD, bdev->regs + BAM_DESC_CNT_TRSHLD);
986 + /* Enable default set of h/w workarounds, ie all except BAM_FULL_PIPE */
987 + writel_relaxed(BAM_CNFG_BITS_DEFAULT, bdev->regs + BAM_CNFG_BITS);
989 + /* enable irqs for errors */
990 + writel_relaxed(BAM_ERROR_EN | BAM_HRESP_ERR_EN,
991 + bdev->regs + BAM_IRQ_EN);
993 + /* unmask global bam interrupt */
994 + writel_relaxed(BAM_IRQ_MSK, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
999 +static void bam_channel_init(struct bam_device *bdev, struct bam_chan *bchan,
1002 + bchan->id = index;
1003 + bchan->bdev = bdev;
1005 + vchan_init(&bchan->vc, &bdev->common);
1006 + bchan->vc.desc_free = bam_dma_free_desc;
1009 +static int bam_dma_probe(struct platform_device *pdev)
1011 + struct bam_device *bdev;
1012 + struct resource *iores;
1015 + bdev = devm_kzalloc(&pdev->dev, sizeof(*bdev), GFP_KERNEL);
1019 + bdev->dev = &pdev->dev;
1021 + iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1022 + bdev->regs = devm_ioremap_resource(&pdev->dev, iores);
1023 + if (IS_ERR(bdev->regs))
1024 + return PTR_ERR(bdev->regs);
1026 + bdev->irq = platform_get_irq(pdev, 0);
1027 + if (bdev->irq < 0)
1030 + ret = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &bdev->ee);
1032 + dev_err(bdev->dev, "Execution environment unspecified\n");
1036 + bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk");
1037 + if (IS_ERR(bdev->bamclk))
1038 + return PTR_ERR(bdev->bamclk);
1040 + ret = clk_prepare_enable(bdev->bamclk);
1042 + dev_err(bdev->dev, "failed to prepare/enable clock\n");
1046 + ret = bam_init(bdev);
1048 + goto err_disable_clk;
1050 + tasklet_init(&bdev->task, dma_tasklet, (unsigned long)bdev);
1052 + bdev->channels = devm_kcalloc(bdev->dev, bdev->num_channels,
1053 + sizeof(*bdev->channels), GFP_KERNEL);
1055 + if (!bdev->channels) {
1057 + goto err_disable_clk;
1060 + /* allocate and initialize channels */
1061 + INIT_LIST_HEAD(&bdev->common.channels);
1063 + for (i = 0; i < bdev->num_channels; i++)
1064 + bam_channel_init(bdev, &bdev->channels[i], i);
1066 + ret = devm_request_irq(bdev->dev, bdev->irq, bam_dma_irq,
1067 + IRQF_TRIGGER_HIGH, "bam_dma", bdev);
1069 + goto err_disable_clk;
1071 + /* set max dma segment size */
1072 + bdev->common.dev = bdev->dev;
1073 + bdev->common.dev->dma_parms = &bdev->dma_parms;
1074 + ret = dma_set_max_seg_size(bdev->common.dev, BAM_MAX_DATA_SIZE);
1076 + dev_err(bdev->dev, "cannot set maximum segment size\n");
1077 + goto err_disable_clk;
1080 + platform_set_drvdata(pdev, bdev);
1082 + /* set capabilities */
1083 + dma_cap_zero(bdev->common.cap_mask);
1084 + dma_cap_set(DMA_SLAVE, bdev->common.cap_mask);
1086 + /* initialize dmaengine apis */
1087 + bdev->common.device_alloc_chan_resources = bam_alloc_chan;
1088 + bdev->common.device_free_chan_resources = bam_free_chan;
1089 + bdev->common.device_prep_slave_sg = bam_prep_slave_sg;
1090 + bdev->common.device_control = bam_control;
1091 + bdev->common.device_issue_pending = bam_issue_pending;
1092 + bdev->common.device_tx_status = bam_tx_status;
1093 + bdev->common.dev = bdev->dev;
1095 + ret = dma_async_device_register(&bdev->common);
1097 + dev_err(bdev->dev, "failed to register dma async device\n");
1098 + goto err_disable_clk;
1101 + ret = of_dma_controller_register(pdev->dev.of_node, bam_dma_xlate,
1104 + goto err_unregister_dma;
1108 +err_unregister_dma:
1109 + dma_async_device_unregister(&bdev->common);
1111 + clk_disable_unprepare(bdev->bamclk);
1115 +static int bam_dma_remove(struct platform_device *pdev)
1117 + struct bam_device *bdev = platform_get_drvdata(pdev);
1120 + of_dma_controller_free(pdev->dev.of_node);
1121 + dma_async_device_unregister(&bdev->common);
1123 + /* mask all interrupts for this execution environment */
1124 + writel_relaxed(0, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
1126 + devm_free_irq(bdev->dev, bdev->irq, bdev);
1128 + for (i = 0; i < bdev->num_channels; i++) {
1129 + bam_dma_terminate_all(&bdev->channels[i]);
1130 + tasklet_kill(&bdev->channels[i].vc.task);
1132 + dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE,
1133 + bdev->channels[i].fifo_virt,
1134 + bdev->channels[i].fifo_phys);
1137 + tasklet_kill(&bdev->task);
1139 + clk_disable_unprepare(bdev->bamclk);
1144 +static const struct of_device_id bam_of_match[] = {
1145 + { .compatible = "qcom,bam-v1.4.0", },
1148 +MODULE_DEVICE_TABLE(of, bam_of_match);
1150 +static struct platform_driver bam_dma_driver = {
1151 + .probe = bam_dma_probe,
1152 + .remove = bam_dma_remove,
1154 + .name = "bam-dma-engine",
1155 + .owner = THIS_MODULE,
1156 + .of_match_table = bam_of_match,
1160 +module_platform_driver(bam_dma_driver);
1162 +MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>");
1163 +MODULE_DESCRIPTION("QCOM BAM DMA engine driver");
1164 +MODULE_LICENSE("GPL v2");