1 From 731adfb43892a1d7fe00e2036200f33a9b61a589 Mon Sep 17 00:00:00 2001
2 From: Biwen Li <biwen.li@nxp.com>
3 Date: Tue, 30 Oct 2018 18:26:02 +0800
4 Subject: [PATCH 19/40] dma: support layerscape
5 This is an integrated patch of dma for layerscape
7 Signed-off-by: Catalin Horghidan <catalin.horghidan@nxp.com>
8 Signed-off-by: Changming Huang <jerry.huang@nxp.com>
9 Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
10 Signed-off-by: jiaheng.fan <jiaheng.fan@nxp.com>
11 Signed-off-by: Peng Ma <peng.ma@nxp.com>
12 Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
13 Signed-off-by: Rajiv Vishwakarma <rajiv.vishwakarma@nxp.com>
14 Signed-off-by: Tudor Ambarus <tudor-dan.ambarus@nxp.com>
15 Signed-off-by: Wen He <wen.he_1@nxp.com>
16 Signed-off-by: Yuan Yao <yao.yuan@nxp.com>
17 Signed-off-by: Biwen Li <biwen.li@nxp.com>
19 .../devicetree/bindings/dma/fsl-qdma.txt | 51 +
20 drivers/dma/Kconfig | 33 +-
21 drivers/dma/Makefile | 3 +
22 drivers/dma/caam_dma.c | 462 ++++++
23 drivers/dma/dpaa2-qdma/Kconfig | 8 +
24 drivers/dma/dpaa2-qdma/Makefile | 8 +
25 drivers/dma/dpaa2-qdma/dpaa2-qdma.c | 940 ++++++++++++
26 drivers/dma/dpaa2-qdma/dpaa2-qdma.h | 227 +++
27 drivers/dma/dpaa2-qdma/dpdmai.c | 515 +++++++
28 drivers/dma/dpaa2-qdma/fsl_dpdmai.h | 521 +++++++
29 drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h | 222 +++
30 drivers/dma/fsl-qdma.c | 1278 +++++++++++++++++
31 12 files changed, 4267 insertions(+), 1 deletion(-)
32 create mode 100644 Documentation/devicetree/bindings/dma/fsl-qdma.txt
33 create mode 100644 drivers/dma/caam_dma.c
34 create mode 100644 drivers/dma/dpaa2-qdma/Kconfig
35 create mode 100644 drivers/dma/dpaa2-qdma/Makefile
36 create mode 100644 drivers/dma/dpaa2-qdma/dpaa2-qdma.c
37 create mode 100644 drivers/dma/dpaa2-qdma/dpaa2-qdma.h
38 create mode 100644 drivers/dma/dpaa2-qdma/dpdmai.c
39 create mode 100644 drivers/dma/dpaa2-qdma/fsl_dpdmai.h
40 create mode 100644 drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h
41 create mode 100644 drivers/dma/fsl-qdma.c
44 +++ b/Documentation/devicetree/bindings/dma/fsl-qdma.txt
46 +* Freescale queue Direct Memory Access(qDMA) Controller
48 +The qDMA supports channel virtualization by allowing DMA jobs to be enqueued into
49 +different command queues. Core can initiate a DMA transaction by preparing a command
50 +descriptor for each DMA job and enqueuing this job to a command queue.
55 + should be "fsl,ls1021a-qdma".
56 +- reg : Specifies base physical address(s) and size of the qDMA registers.
57 + The 1st region is qDMA control register's address and size.
58 + The 2nd region is status queue control register's address and size.
59 + The 3rd region is virtual block control register's address and size.
60 +- interrupts : A list of interrupt-specifiers, one for each entry in
62 +- interrupt-names : Should contain:
63 + "qdma-queue0" - the block0 interrupt
64 + "qdma-queue1" - the block1 interrupt
65 + "qdma-queue2" - the block2 interrupt
66 + "qdma-queue3" - the block3 interrupt
67 + "qdma-error" - the error interrupt
68 +- channels : Number of DMA channels supported
69 +- block-number : the virtual block number
70 +- block-offset : the offset of different virtual block
71 +- queues : the number of command queue per virtual block
72 +- status-sizes : status queue size of per virtual block
73 +- queue-sizes : command queue size of per virtual block, the size number based on queues
74 +- big-endian: If present registers and hardware scatter/gather descriptors
75 + of the qDMA are implemented in big endian mode, otherwise in little
79 + qdma: qdma@8390000 {
80 + compatible = "fsl,ls1021a-qdma";
81 + reg = <0x0 0x8388000 0x0 0x1000>, /* Controller regs */
82 + <0x0 0x8389000 0x0 0x1000>, /* Status regs */
83 + <0x0 0x838a000 0x0 0x2000>; /* Block regs */
84 + interrupts = <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>,
85 + <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>,
86 + <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
87 + interrupt-names = "qdma-error",
88 + "qdma-queue0", "qdma-queue1";
91 + block-offset = <0x1000>;
93 + status-sizes = <64>;
94 + queue-sizes = <64 64>;
97 --- a/drivers/dma/Kconfig
98 +++ b/drivers/dma/Kconfig
99 @@ -129,6 +129,24 @@ config COH901318
101 Enable support for ST-Ericsson COH 901 318 DMA.
103 +config CRYPTO_DEV_FSL_CAAM_DMA
104 + tristate "CAAM DMA engine support"
105 + depends on CRYPTO_DEV_FSL_CAAM_JR
109 + select ASYNC_TX_ENABLE_CHANNEL_SWITCH
111 + Selecting this will offload the DMA operations for users of
112 + the scatter gather memcopy API to the CAAM via job rings. The
113 + CAAM is a hardware module that provides hardware acceleration to
114 + cryptographic operations. It has a built-in DMA controller that can
115 + be programmed to read/write cryptographic data. This module defines
116 + a DMA driver that uses the DMA capabilities of the CAAM.
118 + To compile this as a module, choose M here: the module
119 + will be called caam_dma.
122 tristate "BCM2835 DMA engine support"
123 depends on ARCH_BCM2835
124 @@ -215,6 +233,20 @@ config FSL_EDMA
125 multiplexing capability for DMA request sources(slot).
126 This module can be found on Freescale Vybrid and LS-1 SoCs.
129 + tristate "NXP Layerscape qDMA engine support"
131 + select DMA_VIRTUAL_CHANNELS
132 + select DMA_ENGINE_RAID
133 + select ASYNC_TX_ENABLE_CHANNEL_SWITCH
135 + Support the NXP Layerscape qDMA engine with command queue and legacy mode.
136 + Channel virtualization is supported through enqueuing of DMA jobs to,
137 + or dequeuing DMA jobs from, different work queues.
138 + This module can be found on NXP Layerscape SoCs.
140 +source drivers/dma/dpaa2-qdma/Kconfig
143 tristate "Freescale RAID engine Support"
144 depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH
145 @@ -600,7 +632,6 @@ config ZX_DMA
147 Support the DMA engine for ZTE ZX family platform devices.
151 source "drivers/dma/bestcomm/Kconfig"
153 --- a/drivers/dma/Makefile
154 +++ b/drivers/dma/Makefile
155 @@ -31,7 +31,9 @@ obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o
156 obj-$(CONFIG_DW_DMAC_CORE) += dw/
157 obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
158 obj-$(CONFIG_FSL_DMA) += fsldma.o
159 +obj-$(CONFIG_FSL_DPAA2_QDMA) += dpaa2-qdma/
160 obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
161 +obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o
162 obj-$(CONFIG_FSL_RAID) += fsl_raid.o
163 obj-$(CONFIG_HSU_DMA) += hsu/
164 obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o
165 @@ -71,6 +73,7 @@ obj-$(CONFIG_TI_EDMA) += edma.o
166 obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
167 obj-$(CONFIG_ZX_DMA) += zx_dma.o
168 obj-$(CONFIG_ST_FDMA) += st_fdma.o
169 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_DMA) += caam_dma.o
174 +++ b/drivers/dma/caam_dma.c
177 + * caam support for SG DMA
179 + * Copyright 2016 Freescale Semiconductor, Inc
180 + * Copyright 2017 NXP
182 + * Redistribution and use in source and binary forms, with or without
183 + * modification, are permitted provided that the following conditions are met:
184 + * * Redistributions of source code must retain the above copyright
185 + * notice, this list of conditions and the following disclaimer.
186 + * * Redistributions in binary form must reproduce the above copyright
187 + * notice, this list of conditions and the following disclaimer in the
188 + * documentation and/or other materials provided with the distribution.
189 + * * Neither the names of the above-listed copyright holders nor the
190 + * names of any contributors may be used to endorse or promote products
191 + * derived from this software without specific prior written permission.
194 + * ALTERNATIVELY, this software may be distributed under the terms of the
195 + * GNU General Public License ("GPL") as published by the Free Software
196 + * Foundation, either version 2 of that License or (at your option) any
199 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
200 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
201 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
202 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
203 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
204 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
205 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
206 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
207 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
208 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
209 + * POSSIBILITY OF SUCH DAMAGE.
212 +#include <linux/dma-mapping.h>
213 +#include <linux/dmaengine.h>
214 +#include <linux/module.h>
215 +#include <linux/platform_device.h>
216 +#include <linux/slab.h>
218 +#include "dmaengine.h"
220 +#include "../crypto/caam/regs.h"
221 +#include "../crypto/caam/jr.h"
222 +#include "../crypto/caam/error.h"
223 +#include "../crypto/caam/desc_constr.h"
225 +#define DESC_DMA_MEMCPY_LEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / \
229 + * This is max chunk size of a DMA transfer. If a buffer is larger than this
230 + * value it is internally broken into chunks of max CAAM_DMA_CHUNK_SIZE bytes
231 + * and for each chunk a DMA transfer request is issued.
232 + * This value is the largest number on 16 bits that is a multiple of 256 bytes
233 + * (the largest configurable CAAM DMA burst size).
235 +#define CAAM_DMA_CHUNK_SIZE 65280
237 +struct caam_dma_sh_desc {
238 + u32 desc[DESC_DMA_MEMCPY_LEN] ____cacheline_aligned;
239 + dma_addr_t desc_dma;
242 +/* caam dma extended descriptor */
243 +struct caam_dma_edesc {
244 + struct dma_async_tx_descriptor async_tx;
245 + struct list_head node;
246 + struct caam_dma_ctx *ctx;
247 + dma_addr_t src_dma;
248 + dma_addr_t dst_dma;
249 + unsigned int src_len;
250 + unsigned int dst_len;
251 + u32 jd[] ____cacheline_aligned;
255 + * caam_dma_ctx - per jr/channel context
256 + * @chan: dma channel used by async_tx API
257 + * @node: list_head used to attach to the global dma_ctx_list
258 + * @jrdev: Job Ring device
259 + * @pending_q: queue of pending (submitted, but not enqueued) jobs
260 + * @done_not_acked: jobs that have been completed by jr, but maybe not acked
261 + * @edesc_lock: protects extended descriptor
263 +struct caam_dma_ctx {
264 + struct dma_chan chan;
265 + struct list_head node;
266 + struct device *jrdev;
267 + struct list_head pending_q;
268 + struct list_head done_not_acked;
269 + spinlock_t edesc_lock;
272 +static struct dma_device *dma_dev;
273 +static struct caam_dma_sh_desc *dma_sh_desc;
274 +static LIST_HEAD(dma_ctx_list);
276 +static dma_cookie_t caam_dma_tx_submit(struct dma_async_tx_descriptor *tx)
278 + struct caam_dma_edesc *edesc = NULL;
279 + struct caam_dma_ctx *ctx = NULL;
280 + dma_cookie_t cookie;
282 + edesc = container_of(tx, struct caam_dma_edesc, async_tx);
283 + ctx = container_of(tx->chan, struct caam_dma_ctx, chan);
285 + spin_lock_bh(&ctx->edesc_lock);
287 + cookie = dma_cookie_assign(tx);
288 + list_add_tail(&edesc->node, &ctx->pending_q);
290 + spin_unlock_bh(&ctx->edesc_lock);
295 +static void caam_jr_chan_free_edesc(struct caam_dma_edesc *edesc)
297 + struct caam_dma_ctx *ctx = edesc->ctx;
298 + struct caam_dma_edesc *_edesc = NULL;
300 + spin_lock_bh(&ctx->edesc_lock);
302 + list_add_tail(&edesc->node, &ctx->done_not_acked);
303 + list_for_each_entry_safe(edesc, _edesc, &ctx->done_not_acked, node) {
304 + if (async_tx_test_ack(&edesc->async_tx)) {
305 + list_del(&edesc->node);
310 + spin_unlock_bh(&ctx->edesc_lock);
313 +static void caam_dma_done(struct device *dev, u32 *hwdesc, u32 err,
316 + struct caam_dma_edesc *edesc = context;
317 + struct caam_dma_ctx *ctx = edesc->ctx;
318 + dma_async_tx_callback callback;
319 + void *callback_param;
322 + caam_jr_strstatus(ctx->jrdev, err);
324 + dma_run_dependencies(&edesc->async_tx);
326 + spin_lock_bh(&ctx->edesc_lock);
327 + dma_cookie_complete(&edesc->async_tx);
328 + spin_unlock_bh(&ctx->edesc_lock);
330 + callback = edesc->async_tx.callback;
331 + callback_param = edesc->async_tx.callback_param;
333 + dma_descriptor_unmap(&edesc->async_tx);
335 + caam_jr_chan_free_edesc(edesc);
338 + callback(callback_param);
341 +static void caam_dma_memcpy_init_job_desc(struct caam_dma_edesc *edesc)
343 + u32 *jd = edesc->jd;
344 + u32 *sh_desc = dma_sh_desc->desc;
345 + dma_addr_t desc_dma = dma_sh_desc->desc_dma;
347 + /* init the job descriptor */
348 + init_job_desc_shared(jd, desc_dma, desc_len(sh_desc), HDR_REVERSE);
350 + /* set SEQIN PTR */
351 + append_seq_in_ptr(jd, edesc->src_dma, edesc->src_len, 0);
353 + /* set SEQOUT PTR */
354 + append_seq_out_ptr(jd, edesc->dst_dma, edesc->dst_len, 0);
356 + print_hex_dump_debug("caam dma desc@" __stringify(__LINE__) ": ",
357 + DUMP_PREFIX_ADDRESS, 16, 4, jd, desc_bytes(jd), 1);
360 +static struct dma_async_tx_descriptor *
361 +caam_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
362 + size_t len, unsigned long flags)
364 + struct caam_dma_edesc *edesc;
365 + struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
368 + edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN, GFP_DMA | GFP_NOWAIT);
370 + return ERR_PTR(-ENOMEM);
372 + dma_async_tx_descriptor_init(&edesc->async_tx, chan);
373 + edesc->async_tx.tx_submit = caam_dma_tx_submit;
374 + edesc->async_tx.flags = flags;
375 + edesc->async_tx.cookie = -EBUSY;
377 + edesc->src_dma = src;
378 + edesc->src_len = len;
379 + edesc->dst_dma = dst;
380 + edesc->dst_len = len;
383 + caam_dma_memcpy_init_job_desc(edesc);
385 + return &edesc->async_tx;
388 +/* This function can be called in an interrupt context */
389 +static void caam_dma_issue_pending(struct dma_chan *chan)
391 + struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
393 + struct caam_dma_edesc *edesc, *_edesc;
395 + spin_lock_bh(&ctx->edesc_lock);
396 + list_for_each_entry_safe(edesc, _edesc, &ctx->pending_q, node) {
397 + if (caam_jr_enqueue(ctx->jrdev, edesc->jd,
398 + caam_dma_done, edesc) < 0)
400 + list_del(&edesc->node);
402 + spin_unlock_bh(&ctx->edesc_lock);
405 +static void caam_dma_free_chan_resources(struct dma_chan *chan)
407 + struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
409 + struct caam_dma_edesc *edesc, *_edesc;
411 + spin_lock_bh(&ctx->edesc_lock);
412 + list_for_each_entry_safe(edesc, _edesc, &ctx->pending_q, node) {
413 + list_del(&edesc->node);
416 + list_for_each_entry_safe(edesc, _edesc, &ctx->done_not_acked, node) {
417 + list_del(&edesc->node);
420 + spin_unlock_bh(&ctx->edesc_lock);
423 +static int caam_dma_jr_chan_bind(void)
425 + struct device *jrdev;
426 + struct caam_dma_ctx *ctx;
430 + for (i = 0; i < caam_jr_driver_probed(); i++) {
431 + jrdev = caam_jridx_alloc(i);
432 + if (IS_ERR(jrdev)) {
433 + pr_err("job ring device %d allocation failed\n", i);
437 + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
439 + caam_jr_free(jrdev);
443 + ctx->chan.device = dma_dev;
444 + ctx->chan.private = ctx;
446 + ctx->jrdev = jrdev;
448 + INIT_LIST_HEAD(&ctx->pending_q);
449 + INIT_LIST_HEAD(&ctx->done_not_acked);
450 + INIT_LIST_HEAD(&ctx->node);
451 + spin_lock_init(&ctx->edesc_lock);
453 + dma_cookie_init(&ctx->chan);
455 + /* add the context of this channel to the context list */
456 + list_add_tail(&ctx->node, &dma_ctx_list);
458 + /* add this channel to the device chan list */
459 + list_add_tail(&ctx->chan.device_node, &dma_dev->channels);
467 +static inline void caam_jr_dma_free(struct dma_chan *chan)
469 + struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
472 + list_del(&ctx->node);
473 + list_del(&chan->device_node);
474 + caam_jr_free(ctx->jrdev);
478 +static void set_caam_dma_desc(u32 *desc)
482 + /* dma shared descriptor */
483 + init_sh_desc(desc, HDR_SHARE_NEVER | (1 << HDR_START_IDX_SHIFT));
485 + /* REG1 = CAAM_DMA_CHUNK_SIZE */
486 + append_math_add_imm_u32(desc, REG1, ZERO, IMM, CAAM_DMA_CHUNK_SIZE);
488 + /* REG0 = SEQINLEN - CAAM_DMA_CHUNK_SIZE */
489 + append_math_sub_imm_u32(desc, REG0, SEQINLEN, IMM, CAAM_DMA_CHUNK_SIZE);
495 + jmp_cmd = append_jump(desc, JUMP_TEST_INVALL | JUMP_COND_MATH_N |
498 + /* REG1 = SEQINLEN */
499 + append_math_sub(desc, REG1, SEQINLEN, ZERO, CAAM_CMD_SZ);
502 + set_jump_tgt_here(desc, jmp_cmd);
504 + /* VARSEQINLEN = REG1 */
505 + append_math_add(desc, VARSEQINLEN, REG1, ZERO, CAAM_CMD_SZ);
507 + /* VARSEQOUTLEN = REG1 */
508 + append_math_add(desc, VARSEQOUTLEN, REG1, ZERO, CAAM_CMD_SZ);
510 + /* do FIFO STORE */
511 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_METADATA | LDST_VLF);
514 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
515 + FIFOLD_TYPE_IFIFO | LDST_VLF);
519 + * jmp 0xF8 (after shared desc header)
521 + append_jump(desc, JUMP_TEST_INVALL | JUMP_COND_MATH_N |
522 + JUMP_COND_MATH_Z | 0xF8);
524 + print_hex_dump_debug("caam dma shdesc@" __stringify(__LINE__) ": ",
525 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
529 +static int __init caam_dma_probe(struct platform_device *pdev)
531 + struct device *dev = &pdev->dev;
532 + struct device *ctrldev = dev->parent;
533 + struct dma_chan *chan, *_chan;
538 + if (!caam_jr_driver_probed()) {
539 + dev_info(dev, "Defer probing after JR driver probing\n");
540 + return -EPROBE_DEFER;
543 + dma_dev = kzalloc(sizeof(*dma_dev), GFP_KERNEL);
547 + dma_sh_desc = kzalloc(sizeof(*dma_sh_desc), GFP_KERNEL | GFP_DMA);
551 + sh_desc = dma_sh_desc->desc;
552 + set_caam_dma_desc(sh_desc);
553 + dma_sh_desc->desc_dma = dma_map_single(ctrldev, sh_desc,
554 + desc_bytes(sh_desc),
556 + if (dma_mapping_error(ctrldev, dma_sh_desc->desc_dma)) {
557 + dev_err(dev, "unable to map dma descriptor\n");
561 + INIT_LIST_HEAD(&dma_dev->channels);
563 + bonds = caam_dma_jr_chan_bind();
569 + dma_dev->dev = dev;
570 + dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
571 + dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
572 + dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
573 + dma_dev->device_tx_status = dma_cookie_status;
574 + dma_dev->device_issue_pending = caam_dma_issue_pending;
575 + dma_dev->device_prep_dma_memcpy = caam_dma_prep_memcpy;
576 + dma_dev->device_free_chan_resources = caam_dma_free_chan_resources;
578 + err = dma_async_device_register(dma_dev);
580 + dev_err(dev, "Failed to register CAAM DMA engine\n");
584 + dev_info(dev, "caam dma support with %d job rings\n", bonds);
589 + list_for_each_entry_safe(chan, _chan, &dma_dev->channels, device_node)
590 + caam_jr_dma_free(chan);
592 + dma_unmap_single(ctrldev, dma_sh_desc->desc_dma, desc_bytes(sh_desc),
595 + kfree(dma_sh_desc);
601 +static int caam_dma_remove(struct platform_device *pdev)
603 + struct device *dev = &pdev->dev;
604 + struct device *ctrldev = dev->parent;
605 + struct caam_dma_ctx *ctx, *_ctx;
607 + dma_async_device_unregister(dma_dev);
609 + list_for_each_entry_safe(ctx, _ctx, &dma_ctx_list, node) {
610 + list_del(&ctx->node);
611 + caam_jr_free(ctx->jrdev);
615 + dma_unmap_single(ctrldev, dma_sh_desc->desc_dma,
616 + desc_bytes(dma_sh_desc->desc), DMA_TO_DEVICE);
618 + kfree(dma_sh_desc);
621 + dev_info(dev, "caam dma support disabled\n");
625 +static struct platform_driver caam_dma_driver = {
627 + .name = "caam-dma",
629 + .probe = caam_dma_probe,
630 + .remove = caam_dma_remove,
632 +module_platform_driver(caam_dma_driver);
634 +MODULE_LICENSE("Dual BSD/GPL");
635 +MODULE_DESCRIPTION("NXP CAAM support for DMA engine");
636 +MODULE_AUTHOR("NXP Semiconductors");
637 +MODULE_ALIAS("platform:caam-dma");
639 +++ b/drivers/dma/dpaa2-qdma/Kconfig
641 +menuconfig FSL_DPAA2_QDMA
642 + tristate "NXP DPAA2 QDMA"
643 + depends on FSL_MC_BUS && FSL_MC_DPIO
645 + select DMA_VIRTUAL_CHANNELS
647 + NXP Data Path Acceleration Architecture 2 QDMA driver,
648 + using the NXP MC bus driver.
650 +++ b/drivers/dma/dpaa2-qdma/Makefile
653 +# Makefile for the NXP DPAA2 CAAM controllers
655 +ccflags-y += -DVERSION=\"\"
657 +obj-$(CONFIG_FSL_DPAA2_QDMA) += fsl-dpaa2-qdma.o
659 +fsl-dpaa2-qdma-objs := dpaa2-qdma.o dpdmai.o
661 +++ b/drivers/dma/dpaa2-qdma/dpaa2-qdma.c
664 + * drivers/dma/dpaa2-qdma/dpaa2-qdma.c
666 + * Copyright 2015-2017 NXP Semiconductor, Inc.
667 + * Author: Changming Huang <jerry.huang@nxp.com>
669 + * Driver for the NXP QDMA engine with QMan mode.
670 + * Channel virtualization is supported through enqueuing of DMA jobs to,
671 + * or dequeuing DMA jobs from different work queues with QMan portal.
672 + * This module can be found on NXP LS2 SoCs.
674 + * This program is free software; you can redistribute it and/or modify it
675 + * under the terms of the GNU General Public License as published by the
676 + * Free Software Foundation; either version 2 of the License, or (at your
677 + * option) any later version.
680 +#include <linux/init.h>
681 +#include <linux/module.h>
682 +#include <linux/interrupt.h>
683 +#include <linux/clk.h>
684 +#include <linux/dma-mapping.h>
685 +#include <linux/dmapool.h>
686 +#include <linux/slab.h>
687 +#include <linux/spinlock.h>
688 +#include <linux/of.h>
689 +#include <linux/of_device.h>
690 +#include <linux/of_address.h>
691 +#include <linux/of_irq.h>
692 +#include <linux/of_dma.h>
693 +#include <linux/types.h>
694 +#include <linux/delay.h>
695 +#include <linux/iommu.h>
697 +#include "../virt-dma.h"
699 +#include <linux/fsl/mc.h>
700 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
701 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
702 +#include "fsl_dpdmai_cmd.h"
703 +#include "fsl_dpdmai.h"
704 +#include "dpaa2-qdma.h"
706 +static bool smmu_disable = true;
708 +static struct dpaa2_qdma_chan *to_dpaa2_qdma_chan(struct dma_chan *chan)
710 + return container_of(chan, struct dpaa2_qdma_chan, vchan.chan);
713 +static struct dpaa2_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
715 + return container_of(vd, struct dpaa2_qdma_comp, vdesc);
718 +static int dpaa2_qdma_alloc_chan_resources(struct dma_chan *chan)
723 +static void dpaa2_qdma_free_chan_resources(struct dma_chan *chan)
725 + struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
726 + unsigned long flags;
729 + spin_lock_irqsave(&dpaa2_chan->vchan.lock, flags);
730 + vchan_get_all_descriptors(&dpaa2_chan->vchan, &head);
731 + spin_unlock_irqrestore(&dpaa2_chan->vchan.lock, flags);
733 + vchan_dma_desc_free_list(&dpaa2_chan->vchan, &head);
737 + * Request a command descriptor for enqueue.
739 +static struct dpaa2_qdma_comp *
740 +dpaa2_qdma_request_desc(struct dpaa2_qdma_chan *dpaa2_chan)
742 + struct dpaa2_qdma_comp *comp_temp = NULL;
743 + unsigned long flags;
745 + spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
746 + if (list_empty(&dpaa2_chan->comp_free)) {
747 + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
748 + comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
751 + comp_temp->fd_virt_addr = dma_pool_alloc(dpaa2_chan->fd_pool,
752 + GFP_NOWAIT, &comp_temp->fd_bus_addr);
753 + if (!comp_temp->fd_virt_addr)
756 + comp_temp->fl_virt_addr =
757 + (void *)((struct dpaa2_fd *)
758 + comp_temp->fd_virt_addr + 1);
759 + comp_temp->fl_bus_addr = comp_temp->fd_bus_addr +
760 + sizeof(struct dpaa2_fd);
761 + comp_temp->desc_virt_addr =
762 + (void *)((struct dpaa2_fl_entry *)
763 + comp_temp->fl_virt_addr + 3);
764 + comp_temp->desc_bus_addr = comp_temp->fl_bus_addr +
765 + sizeof(struct dpaa2_fl_entry) * 3;
767 + comp_temp->qchan = dpaa2_chan;
768 + comp_temp->sg_blk_num = 0;
769 + INIT_LIST_HEAD(&comp_temp->sg_src_head);
770 + INIT_LIST_HEAD(&comp_temp->sg_dst_head);
773 + comp_temp = list_first_entry(&dpaa2_chan->comp_free,
774 + struct dpaa2_qdma_comp, list);
775 + list_del(&comp_temp->list);
776 + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
778 + comp_temp->qchan = dpaa2_chan;
783 +static void dpaa2_qdma_populate_fd(uint32_t format,
784 + struct dpaa2_qdma_comp *dpaa2_comp)
786 + struct dpaa2_fd *fd;
788 + fd = (struct dpaa2_fd *)dpaa2_comp->fd_virt_addr;
789 + memset(fd, 0, sizeof(struct dpaa2_fd));
792 + dpaa2_fd_set_addr(fd, dpaa2_comp->fl_bus_addr);
793 + /* Bypass memory translation, Frame list format, short length disable */
794 + /* we need to disable BMT if fsl-mc use iova addr */
796 + dpaa2_fd_set_bpid(fd, QMAN_FD_BMT_ENABLE);
797 + dpaa2_fd_set_format(fd, QMAN_FD_FMT_ENABLE | QMAN_FD_SL_DISABLE);
799 + dpaa2_fd_set_frc(fd, format | QDMA_SER_CTX);
802 +/* first frame list for descriptor buffer */
803 +static void dpaa2_qdma_populate_first_framel(
804 + struct dpaa2_fl_entry *f_list,
805 + struct dpaa2_qdma_comp *dpaa2_comp)
807 + struct dpaa2_qdma_sd_d *sdd;
809 + sdd = (struct dpaa2_qdma_sd_d *)dpaa2_comp->desc_virt_addr;
810 + memset(sdd, 0, 2 * (sizeof(*sdd)));
811 + /* source and destination descriptor */
812 + sdd->cmd = cpu_to_le32(QDMA_SD_CMD_RDTTYPE_COHERENT); /* source descriptor CMD */
814 + sdd->cmd = cpu_to_le32(QDMA_DD_CMD_WRTTYPE_COHERENT); /* dest descriptor CMD */
816 + memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
817 + /* first frame list to source descriptor */
819 + dpaa2_fl_set_addr(f_list, dpaa2_comp->desc_bus_addr);
820 + dpaa2_fl_set_len(f_list, 0x20);
821 + dpaa2_fl_set_format(f_list, QDMA_FL_FMT_SBF | QDMA_FL_SL_LONG);
824 + f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE); /* bypass memory translation */
827 +/* source and destination frame list */
828 +static void dpaa2_qdma_populate_frames(struct dpaa2_fl_entry *f_list,
829 + dma_addr_t dst, dma_addr_t src, size_t len, uint8_t fmt)
831 + /* source frame list to source buffer */
832 + memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
835 + dpaa2_fl_set_addr(f_list, src);
836 + dpaa2_fl_set_len(f_list, len);
837 + dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG)); /* single buffer frame or scatter gather frame */
839 + f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE); /* bypass memory translation */
842 + /* destination frame list to destination buffer */
843 + memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
845 + dpaa2_fl_set_addr(f_list, dst);
846 + dpaa2_fl_set_len(f_list, len);
847 + dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG));
848 + dpaa2_fl_set_final(f_list, QDMA_FL_F); /* single buffer frame or scatter gather frame */
850 + f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE); /* bypass memory translation */
853 +static struct dma_async_tx_descriptor *dpaa2_qdma_prep_memcpy(
854 + struct dma_chan *chan, dma_addr_t dst,
855 + dma_addr_t src, size_t len, unsigned long flags)
857 + struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
858 + struct dpaa2_qdma_comp *dpaa2_comp;
859 + struct dpaa2_fl_entry *f_list;
862 + dpaa2_comp = dpaa2_qdma_request_desc(dpaa2_chan);
865 + format = QDMA_FD_LONG_FORMAT;
867 + format = QDMA_FD_SHORT_FORMAT;
869 + /* populate Frame descriptor */
870 + dpaa2_qdma_populate_fd(format, dpaa2_comp);
872 + f_list = (struct dpaa2_fl_entry *)dpaa2_comp->fl_virt_addr;
875 + /* first frame list for descriptor buffer (logn format) */
876 + dpaa2_qdma_populate_first_framel(f_list, dpaa2_comp);
881 + dpaa2_qdma_populate_frames(f_list, dst, src, len, QDMA_FL_FMT_SBF);
883 + return vchan_tx_prep(&dpaa2_chan->vchan, &dpaa2_comp->vdesc, flags);
886 +static struct qdma_sg_blk *dpaa2_qdma_get_sg_blk(
887 + struct dpaa2_qdma_comp *dpaa2_comp,
888 + struct dpaa2_qdma_chan *dpaa2_chan)
890 + struct qdma_sg_blk *sg_blk = NULL;
891 + dma_addr_t phy_sgb;
892 + unsigned long flags;
894 + spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
895 + if (list_empty(&dpaa2_chan->sgb_free)) {
896 + sg_blk = (struct qdma_sg_blk *)dma_pool_alloc(
897 + dpaa2_chan->sg_blk_pool,
898 + GFP_NOWAIT, &phy_sgb);
900 + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
903 + sg_blk->blk_virt_addr = (void *)(sg_blk + 1);
904 + sg_blk->blk_bus_addr = phy_sgb + sizeof(*sg_blk);
906 + sg_blk = list_first_entry(&dpaa2_chan->sgb_free,
907 + struct qdma_sg_blk, list);
908 + list_del(&sg_blk->list);
910 + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
915 +static uint32_t dpaa2_qdma_populate_sg(struct device *dev,
916 + struct dpaa2_qdma_chan *dpaa2_chan,
917 + struct dpaa2_qdma_comp *dpaa2_comp,
918 + struct scatterlist *dst_sg, u32 dst_nents,
919 + struct scatterlist *src_sg, u32 src_nents)
921 + struct dpaa2_qdma_sg *src_sge;
922 + struct dpaa2_qdma_sg *dst_sge;
923 + struct qdma_sg_blk *sg_blk;
924 + struct qdma_sg_blk *sg_blk_dst;
930 + uint32_t total_len = 0;
933 + num = min(dst_nents, src_nents);
934 + blocks = num / (NUM_SG_PER_BLK - 1);
935 + if (num % (NUM_SG_PER_BLK - 1))
937 + if (dpaa2_comp->sg_blk_num < blocks) {
938 + len = blocks - dpaa2_comp->sg_blk_num;
939 + for (i = 0; i < len; i++) {
940 + /* source sg blocks */
941 + sg_blk = dpaa2_qdma_get_sg_blk(dpaa2_comp, dpaa2_chan);
944 + list_add_tail(&sg_blk->list, &dpaa2_comp->sg_src_head);
945 + /* destination sg blocks */
946 + sg_blk = dpaa2_qdma_get_sg_blk(dpaa2_comp, dpaa2_chan);
949 + list_add_tail(&sg_blk->list, &dpaa2_comp->sg_dst_head);
952 + len = dpaa2_comp->sg_blk_num - blocks;
953 + for (i = 0; i < len; i++) {
954 + spin_lock(&dpaa2_chan->queue_lock);
955 + /* handle source sg blocks */
956 + sg_blk = list_first_entry(&dpaa2_comp->sg_src_head,
957 + struct qdma_sg_blk, list);
958 + list_del(&sg_blk->list);
959 + list_add_tail(&sg_blk->list, &dpaa2_chan->sgb_free);
960 + /* handle destination sg blocks */
961 + sg_blk = list_first_entry(&dpaa2_comp->sg_dst_head,
962 + struct qdma_sg_blk, list);
963 + list_del(&sg_blk->list);
964 + list_add_tail(&sg_blk->list, &dpaa2_chan->sgb_free);
965 + spin_unlock(&dpaa2_chan->queue_lock);
968 + dpaa2_comp->sg_blk_num = blocks;
970 + /* get the first source sg phy address */
971 + sg_blk = list_first_entry(&dpaa2_comp->sg_src_head,
972 + struct qdma_sg_blk, list);
973 + dpaa2_comp->sge_src_bus_addr = sg_blk->blk_bus_addr;
974 + /* get the first destinaiton sg phy address */
975 + sg_blk_dst = list_first_entry(&dpaa2_comp->sg_dst_head,
976 + struct qdma_sg_blk, list);
977 + dpaa2_comp->sge_dst_bus_addr = sg_blk_dst->blk_bus_addr;
979 + for (i = 0; i < blocks; i++) {
980 + src_sge = (struct dpaa2_qdma_sg *)sg_blk->blk_virt_addr;
981 + dst_sge = (struct dpaa2_qdma_sg *)sg_blk_dst->blk_virt_addr;
983 + for (j = 0; j < (NUM_SG_PER_BLK - 1); j++) {
984 + len = min(sg_dma_len(dst_sg), sg_dma_len(src_sg));
988 + src = sg_dma_address(src_sg);
989 + dst = sg_dma_address(dst_sg);
992 + src_sge->addr_lo = src;
993 + src_sge->addr_hi = (src >> 32);
994 + src_sge->data_len.data_len_sl0 = len;
995 + src_sge->ctrl.sl = QDMA_SG_SL_LONG;
996 + src_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
997 + /* destination SG */
998 + dst_sge->addr_lo = dst;
999 + dst_sge->addr_hi = (dst >> 32);
1000 + dst_sge->data_len.data_len_sl0 = len;
1001 + dst_sge->ctrl.sl = QDMA_SG_SL_LONG;
1002 + dst_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
1006 + src_sge->ctrl.f = QDMA_SG_F;
1007 + dst_sge->ctrl.f = QDMA_SG_F;
1010 + dst_sg = sg_next(dst_sg);
1011 + src_sg = sg_next(src_sg);
1014 + if (j == (NUM_SG_PER_BLK - 2)) {
1015 + /* for next blocks, extension */
1016 + sg_blk = list_next_entry(sg_blk, list);
1017 + sg_blk_dst = list_next_entry(sg_blk_dst, list);
1018 + src_sge->addr_lo = sg_blk->blk_bus_addr;
1019 + src_sge->addr_hi = sg_blk->blk_bus_addr >> 32;
1020 + src_sge->ctrl.sl = QDMA_SG_SL_LONG;
1021 + src_sge->ctrl.fmt = QDMA_SG_FMT_SGTE;
1022 + dst_sge->addr_lo = sg_blk_dst->blk_bus_addr;
1023 + dst_sge->addr_hi =
1024 + sg_blk_dst->blk_bus_addr >> 32;
1025 + dst_sge->ctrl.sl = QDMA_SG_SL_LONG;
1026 + dst_sge->ctrl.fmt = QDMA_SG_FMT_SGTE;
1035 +static enum dma_status dpaa2_qdma_tx_status(struct dma_chan *chan,
1036 + dma_cookie_t cookie, struct dma_tx_state *txstate)
1038 + return dma_cookie_status(chan, cookie, txstate);
1041 +static void dpaa2_qdma_free_desc(struct virt_dma_desc *vdesc)
1045 +static void dpaa2_qdma_issue_pending(struct dma_chan *chan)
1047 + struct dpaa2_qdma_comp *dpaa2_comp;
1048 + struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
1049 + struct dpaa2_qdma_engine *dpaa2_qdma = dpaa2_chan->qdma;
1050 + struct dpaa2_qdma_priv *priv = dpaa2_qdma->priv;
1051 + struct virt_dma_desc *vdesc;
1052 + struct dpaa2_fd *fd;
1054 + unsigned long flags;
1056 + spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
1057 + spin_lock(&dpaa2_chan->vchan.lock);
1058 + if (vchan_issue_pending(&dpaa2_chan->vchan)) {
1059 + vdesc = vchan_next_desc(&dpaa2_chan->vchan);
1062 + dpaa2_comp = to_fsl_qdma_comp(vdesc);
1064 + fd = (struct dpaa2_fd *)dpaa2_comp->fd_virt_addr;
1066 + list_del(&vdesc->node);
1067 + list_add_tail(&dpaa2_comp->list, &dpaa2_chan->comp_used);
1069 + /* TOBO: priority hard-coded to zero */
1070 + err = dpaa2_io_service_enqueue_fq(NULL,
1071 + priv->tx_queue_attr[0].fqid, fd);
1073 + list_del(&dpaa2_comp->list);
1074 + list_add_tail(&dpaa2_comp->list,
1075 + &dpaa2_chan->comp_free);
1080 + spin_unlock(&dpaa2_chan->vchan.lock);
1081 + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
1084 +static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
1086 + struct device *dev = &ls_dev->dev;
1087 + struct dpaa2_qdma_priv *priv;
1088 + struct dpaa2_qdma_priv_per_prio *ppriv;
1089 + uint8_t prio_def = DPDMAI_PRIO_NUM;
1093 + priv = dev_get_drvdata(dev);
1096 + priv->dpqdma_id = ls_dev->obj_desc.id;
1098 + /*Get the handle for the DPDMAI this interface is associate with */
1099 + err = dpdmai_open(priv->mc_io, 0, priv->dpqdma_id, &ls_dev->mc_handle);
1101 + dev_err(dev, "dpdmai_open() failed\n");
1104 + dev_info(dev, "Opened dpdmai object successfully\n");
1106 + err = dpdmai_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
1107 + &priv->dpdmai_attr);
1109 + dev_err(dev, "dpdmai_get_attributes() failed\n");
1113 + if (priv->dpdmai_attr.version.major > DPDMAI_VER_MAJOR) {
1114 + dev_err(dev, "DPDMAI major version mismatch\n"
1115 + "Found %u.%u, supported version is %u.%u\n",
1116 + priv->dpdmai_attr.version.major,
1117 + priv->dpdmai_attr.version.minor,
1118 + DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
1121 + if (priv->dpdmai_attr.version.minor > DPDMAI_VER_MINOR) {
1122 + dev_err(dev, "DPDMAI minor version mismatch\n"
1123 + "Found %u.%u, supported version is %u.%u\n",
1124 + priv->dpdmai_attr.version.major,
1125 + priv->dpdmai_attr.version.minor,
1126 + DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
1129 + priv->num_pairs = min(priv->dpdmai_attr.num_of_priorities, prio_def);
1130 + ppriv = kcalloc(priv->num_pairs, sizeof(*ppriv), GFP_KERNEL);
1132 + dev_err(dev, "kzalloc for ppriv failed\n");
1135 + priv->ppriv = ppriv;
1137 + for (i = 0; i < priv->num_pairs; i++) {
1138 + err = dpdmai_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
1139 + i, &priv->rx_queue_attr[i]);
1141 + dev_err(dev, "dpdmai_get_rx_queue() failed\n");
1144 + ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
1146 + err = dpdmai_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle,
1147 + i, &priv->tx_queue_attr[i]);
1149 + dev_err(dev, "dpdmai_get_tx_queue() failed\n");
1152 + ppriv->req_fqid = priv->tx_queue_attr[i].fqid;
1154 + ppriv->priv = priv;
1161 +static void dpaa2_qdma_fqdan_cb(struct dpaa2_io_notification_ctx *ctx)
1163 + struct dpaa2_qdma_priv_per_prio *ppriv = container_of(ctx,
1164 + struct dpaa2_qdma_priv_per_prio, nctx);
1165 + struct dpaa2_qdma_priv *priv = ppriv->priv;
1166 + struct dpaa2_qdma_comp *dpaa2_comp, *_comp_tmp;
1167 + struct dpaa2_qdma_chan *qchan;
1168 + const struct dpaa2_fd *fd;
1169 + const struct dpaa2_fd *fd_eq;
1170 + struct dpaa2_dq *dq;
1176 + uint32_t n_chans = priv->dpaa2_qdma->n_chans;
1179 + err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
1183 + while (!is_last) {
1185 + dq = dpaa2_io_store_next(ppriv->store, &is_last);
1186 + } while (!is_last && !dq);
1188 + dev_err(priv->dev, "FQID returned no valid frames!\n");
1192 + /* obtain FD and process the error */
1193 + fd = dpaa2_dq_fd(dq);
1195 + status = dpaa2_fd_get_ctrl(fd) & 0xff;
1197 + dev_err(priv->dev, "FD error occurred\n");
1199 + for (i = 0; i < n_chans; i++) {
1200 + qchan = &priv->dpaa2_qdma->chans[i];
1201 + spin_lock(&qchan->queue_lock);
1202 + if (list_empty(&qchan->comp_used)) {
1203 + spin_unlock(&qchan->queue_lock);
1206 + list_for_each_entry_safe(dpaa2_comp, _comp_tmp,
1207 + &qchan->comp_used, list) {
1208 + fd_eq = (struct dpaa2_fd *)
1209 + dpaa2_comp->fd_virt_addr;
1211 + if (le64_to_cpu(fd_eq->simple.addr) ==
1212 + le64_to_cpu(fd->simple.addr)) {
1214 + list_del(&dpaa2_comp->list);
1215 + list_add_tail(&dpaa2_comp->list,
1216 + &qchan->comp_free);
1218 + spin_lock(&qchan->vchan.lock);
1219 + vchan_cookie_complete(
1220 + &dpaa2_comp->vdesc);
1221 + spin_unlock(&qchan->vchan.lock);
1226 + spin_unlock(&qchan->queue_lock);
1232 + dpaa2_io_service_rearm(NULL, ctx);
1235 +static int __cold dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv *priv)
1238 + struct device *dev = priv->dev;
1239 + struct dpaa2_qdma_priv_per_prio *ppriv;
1241 + num = priv->num_pairs;
1242 + ppriv = priv->ppriv;
1243 + for (i = 0; i < num; i++) {
1244 + ppriv->nctx.is_cdan = 0;
1245 + ppriv->nctx.desired_cpu = 1;
1246 + ppriv->nctx.id = ppriv->rsp_fqid;
1247 + ppriv->nctx.cb = dpaa2_qdma_fqdan_cb;
1248 + err = dpaa2_io_service_register(NULL, &ppriv->nctx);
1250 + dev_err(dev, "Notification register failed\n");
1254 + ppriv->store = dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE,
1256 + if (!ppriv->store) {
1257 + dev_err(dev, "dpaa2_io_store_create() failed\n");
1266 + dpaa2_io_service_deregister(NULL, &ppriv->nctx);
1269 + while (ppriv >= priv->ppriv) {
1270 + dpaa2_io_service_deregister(NULL, &ppriv->nctx);
1271 + dpaa2_io_store_destroy(ppriv->store);
1277 +static void __cold dpaa2_dpmai_store_free(struct dpaa2_qdma_priv *priv)
1279 + struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
1282 + for (i = 0; i < priv->num_pairs; i++) {
1283 + dpaa2_io_store_destroy(ppriv->store);
1288 +static void __cold dpaa2_dpdmai_dpio_free(struct dpaa2_qdma_priv *priv)
1290 + struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
1293 + for (i = 0; i < priv->num_pairs; i++) {
1294 + dpaa2_io_service_deregister(NULL, &ppriv->nctx);
1299 +static int __cold dpaa2_dpdmai_bind(struct dpaa2_qdma_priv *priv)
1302 + struct dpdmai_rx_queue_cfg rx_queue_cfg;
1303 + struct device *dev = priv->dev;
1304 + struct dpaa2_qdma_priv_per_prio *ppriv;
1305 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
1308 + num = priv->num_pairs;
1309 + ppriv = priv->ppriv;
1310 + for (i = 0; i < num; i++) {
1311 + rx_queue_cfg.options = DPDMAI_QUEUE_OPT_USER_CTX |
1312 + DPDMAI_QUEUE_OPT_DEST;
1313 + rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
1314 + rx_queue_cfg.dest_cfg.dest_type = DPDMAI_DEST_DPIO;
1315 + rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
1316 + rx_queue_cfg.dest_cfg.priority = ppriv->prio;
1317 + err = dpdmai_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
1318 + rx_queue_cfg.dest_cfg.priority, &rx_queue_cfg);
1320 + dev_err(dev, "dpdmai_set_rx_queue() failed\n");
1330 +static int __cold dpaa2_dpdmai_dpio_unbind(struct dpaa2_qdma_priv *priv)
1333 + struct device *dev = priv->dev;
1334 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
1335 + struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
1338 + for (i = 0; i < priv->num_pairs; i++) {
1339 + ppriv->nctx.qman64 = 0;
1340 + ppriv->nctx.dpio_id = 0;
1344 + err = dpdmai_reset(priv->mc_io, 0, ls_dev->mc_handle);
1346 + dev_err(dev, "dpdmai_reset() failed\n");
1351 +static void __cold dpaa2_dpdmai_free_pool(struct dpaa2_qdma_chan *qchan,
1352 + struct list_head *head)
1354 + struct qdma_sg_blk *sgb_tmp, *_sgb_tmp;
1355 + /* free the QDMA SG pool block */
1356 + list_for_each_entry_safe(sgb_tmp, _sgb_tmp, head, list) {
1357 + sgb_tmp->blk_virt_addr = (void *)((struct qdma_sg_blk *)
1358 + sgb_tmp->blk_virt_addr - 1);
1359 + sgb_tmp->blk_bus_addr = sgb_tmp->blk_bus_addr
1360 + - sizeof(*sgb_tmp);
1361 + dma_pool_free(qchan->sg_blk_pool, sgb_tmp->blk_virt_addr,
1362 + sgb_tmp->blk_bus_addr);
1367 +static void __cold dpaa2_dpdmai_free_comp(struct dpaa2_qdma_chan *qchan,
1368 + struct list_head *head)
1370 + struct dpaa2_qdma_comp *comp_tmp, *_comp_tmp;
1371 + /* free the QDMA comp resource */
1372 + list_for_each_entry_safe(comp_tmp, _comp_tmp,
1374 + dma_pool_free(qchan->fd_pool,
1375 + comp_tmp->fd_virt_addr,
1376 + comp_tmp->fd_bus_addr);
1377 + /* free the SG source block on comp */
1378 + dpaa2_dpdmai_free_pool(qchan, &comp_tmp->sg_src_head);
1379 + /* free the SG destination block on comp */
1380 + dpaa2_dpdmai_free_pool(qchan, &comp_tmp->sg_dst_head);
1381 + list_del(&comp_tmp->list);
1387 +static void __cold dpaa2_dpdmai_free_channels(
1388 + struct dpaa2_qdma_engine *dpaa2_qdma)
1390 + struct dpaa2_qdma_chan *qchan;
1393 + num = dpaa2_qdma->n_chans;
1394 + for (i = 0; i < num; i++) {
1395 + qchan = &dpaa2_qdma->chans[i];
1396 + dpaa2_dpdmai_free_comp(qchan, &qchan->comp_used);
1397 + dpaa2_dpdmai_free_comp(qchan, &qchan->comp_free);
1398 + dpaa2_dpdmai_free_pool(qchan, &qchan->sgb_free);
1399 + dma_pool_destroy(qchan->fd_pool);
1400 + dma_pool_destroy(qchan->sg_blk_pool);
1404 +static int dpaa2_dpdmai_alloc_channels(struct dpaa2_qdma_engine *dpaa2_qdma)
1406 + struct dpaa2_qdma_chan *dpaa2_chan;
1407 + struct device *dev = &dpaa2_qdma->priv->dpdmai_dev->dev;
1410 + INIT_LIST_HEAD(&dpaa2_qdma->dma_dev.channels);
1411 + for (i = 0; i < dpaa2_qdma->n_chans; i++) {
1412 + dpaa2_chan = &dpaa2_qdma->chans[i];
1413 + dpaa2_chan->qdma = dpaa2_qdma;
1414 + dpaa2_chan->vchan.desc_free = dpaa2_qdma_free_desc;
1415 + vchan_init(&dpaa2_chan->vchan, &dpaa2_qdma->dma_dev);
1417 + dpaa2_chan->fd_pool = dma_pool_create("fd_pool",
1418 + dev, FD_POOL_SIZE, 32, 0);
1419 + if (!dpaa2_chan->fd_pool)
1421 + dpaa2_chan->sg_blk_pool = dma_pool_create("sg_blk_pool",
1422 + dev, SG_POOL_SIZE, 32, 0);
1423 + if (!dpaa2_chan->sg_blk_pool)
1426 + spin_lock_init(&dpaa2_chan->queue_lock);
1427 + INIT_LIST_HEAD(&dpaa2_chan->comp_used);
1428 + INIT_LIST_HEAD(&dpaa2_chan->comp_free);
1429 + INIT_LIST_HEAD(&dpaa2_chan->sgb_free);
1434 +static int dpaa2_qdma_probe(struct fsl_mc_device *dpdmai_dev)
1436 + struct dpaa2_qdma_priv *priv;
1437 + struct device *dev = &dpdmai_dev->dev;
1438 + struct dpaa2_qdma_engine *dpaa2_qdma;
1441 + priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1444 + dev_set_drvdata(dev, priv);
1445 + priv->dpdmai_dev = dpdmai_dev;
1447 + priv->iommu_domain = iommu_get_domain_for_dev(dev);
1448 + if (priv->iommu_domain)
1449 + smmu_disable = false;
1451 + /* obtain a MC portal */
1452 + err = fsl_mc_portal_allocate(dpdmai_dev, 0, &priv->mc_io);
1454 + dev_err(dev, "MC portal allocation failed\n");
1455 + goto err_mcportal;
1458 + /* DPDMAI initialization */
1459 + err = dpaa2_qdma_setup(dpdmai_dev);
1461 + dev_err(dev, "dpaa2_dpdmai_setup() failed\n");
1462 + goto err_dpdmai_setup;
1466 + err = dpaa2_qdma_dpio_setup(priv);
1468 + dev_err(dev, "dpaa2_dpdmai_dpio_setup() failed\n");
1469 + goto err_dpio_setup;
1472 + /* DPDMAI binding to DPIO */
1473 + err = dpaa2_dpdmai_bind(priv);
1475 + dev_err(dev, "dpaa2_dpdmai_bind() failed\n");
1479 + /* DPDMAI enable */
1480 + err = dpdmai_enable(priv->mc_io, 0, dpdmai_dev->mc_handle);
1482 + dev_err(dev, "dpdmai_enable() faile\n");
1486 + dpaa2_qdma = kzalloc(sizeof(*dpaa2_qdma), GFP_KERNEL);
1487 + if (!dpaa2_qdma) {
1492 + priv->dpaa2_qdma = dpaa2_qdma;
1493 + dpaa2_qdma->priv = priv;
1495 + dpaa2_qdma->n_chans = NUM_CH;
1497 + err = dpaa2_dpdmai_alloc_channels(dpaa2_qdma);
1499 + dev_err(dev, "QDMA alloc channels faile\n");
1503 + dma_cap_set(DMA_PRIVATE, dpaa2_qdma->dma_dev.cap_mask);
1504 + dma_cap_set(DMA_SLAVE, dpaa2_qdma->dma_dev.cap_mask);
1505 + dma_cap_set(DMA_MEMCPY, dpaa2_qdma->dma_dev.cap_mask);
1507 + dpaa2_qdma->dma_dev.dev = dev;
1508 + dpaa2_qdma->dma_dev.device_alloc_chan_resources
1509 + = dpaa2_qdma_alloc_chan_resources;
1510 + dpaa2_qdma->dma_dev.device_free_chan_resources
1511 + = dpaa2_qdma_free_chan_resources;
1512 + dpaa2_qdma->dma_dev.device_tx_status = dpaa2_qdma_tx_status;
1513 + dpaa2_qdma->dma_dev.device_prep_dma_memcpy = dpaa2_qdma_prep_memcpy;
1514 + dpaa2_qdma->dma_dev.device_issue_pending = dpaa2_qdma_issue_pending;
1516 + err = dma_async_device_register(&dpaa2_qdma->dma_dev);
1518 + dev_err(dev, "Can't register NXP QDMA engine.\n");
1525 + dpaa2_dpdmai_free_channels(dpaa2_qdma);
1526 + kfree(dpaa2_qdma);
1528 + dpdmai_disable(priv->mc_io, 0, dpdmai_dev->mc_handle);
1530 + dpaa2_dpdmai_dpio_unbind(priv);
1532 + dpaa2_dpmai_store_free(priv);
1533 + dpaa2_dpdmai_dpio_free(priv);
1535 + dpdmai_close(priv->mc_io, 0, dpdmai_dev->mc_handle);
1537 + fsl_mc_portal_free(priv->mc_io);
1539 + kfree(priv->ppriv);
1541 + dev_set_drvdata(dev, NULL);
1545 +static int dpaa2_qdma_remove(struct fsl_mc_device *ls_dev)
1547 + struct device *dev;
1548 + struct dpaa2_qdma_priv *priv;
1549 + struct dpaa2_qdma_engine *dpaa2_qdma;
1551 + dev = &ls_dev->dev;
1552 + priv = dev_get_drvdata(dev);
1553 + dpaa2_qdma = priv->dpaa2_qdma;
1555 + dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle);
1556 + dpaa2_dpdmai_dpio_unbind(priv);
1557 + dpaa2_dpmai_store_free(priv);
1558 + dpaa2_dpdmai_dpio_free(priv);
1559 + dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
1560 + fsl_mc_portal_free(priv->mc_io);
1561 + dev_set_drvdata(dev, NULL);
1562 + dpaa2_dpdmai_free_channels(dpaa2_qdma);
1564 + dma_async_device_unregister(&dpaa2_qdma->dma_dev);
1566 + kfree(dpaa2_qdma);
1571 +static const struct fsl_mc_device_id dpaa2_qdma_id_table[] = {
1573 + .vendor = FSL_MC_VENDOR_FREESCALE,
1574 + .obj_type = "dpdmai",
1579 +static struct fsl_mc_driver dpaa2_qdma_driver = {
1581 + .name = "dpaa2-qdma",
1582 + .owner = THIS_MODULE,
1584 + .probe = dpaa2_qdma_probe,
1585 + .remove = dpaa2_qdma_remove,
1586 + .match_id_table = dpaa2_qdma_id_table
1589 +static int __init dpaa2_qdma_driver_init(void)
1591 + return fsl_mc_driver_register(&(dpaa2_qdma_driver));
1593 +late_initcall(dpaa2_qdma_driver_init);
1595 +static void __exit fsl_qdma_exit(void)
1597 + fsl_mc_driver_unregister(&(dpaa2_qdma_driver));
1599 +module_exit(fsl_qdma_exit);
1601 +MODULE_DESCRIPTION("NXP DPAA2 qDMA driver");
1602 +MODULE_LICENSE("Dual BSD/GPL");
1604 +++ b/drivers/dma/dpaa2-qdma/dpaa2-qdma.h
1606 +/* Copyright 2015 NXP Semiconductor Inc.
1608 + * Redistribution and use in source and binary forms, with or without
1609 + * modification, are permitted provided that the following conditions are met:
1610 + * * Redistributions of source code must retain the above copyright
1611 + * notice, this list of conditions and the following disclaimer.
1612 + * * Redistributions in binary form must reproduce the above copyright
1613 + * notice, this list of conditions and the following disclaimer in the
1614 + * documentation and/or other materials provided with the distribution.
1615 + * * Neither the name of NXP Semiconductor nor the
1616 + * names of its contributors may be used to endorse or promote products
1617 + * derived from this software without specific prior written permission.
1620 + * ALTERNATIVELY, this software may be distributed under the terms of the
1621 + * GNU General Public License ("GPL") as published by the Free Software
1622 + * Foundation, either version 2 of that License or (at your option) any
1625 + * THIS SOFTWARE IS PROVIDED BY NXP Semiconductor ``AS IS'' AND ANY
1626 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1627 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1628 + * DISCLAIMED. IN NO EVENT SHALL NXP Semiconductor BE LIABLE FOR ANY
1629 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1630 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
1631 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
1632 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1633 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1634 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1637 +#ifndef __DPAA2_QDMA_H
1638 +#define __DPAA2_QDMA_H
1640 +#define LONG_FORMAT 1
1642 +#define DPAA2_QDMA_STORE_SIZE 16
1644 +#define NUM_SG_PER_BLK 16
1646 +#define QDMA_DMR_OFFSET 0x0
1647 +#define QDMA_DQ_EN (0 << 30)
1648 +#define QDMA_DQ_DIS (1 << 30)
1650 +#define QDMA_DSR_M_OFFSET 0x10004
1652 +struct dpaa2_qdma_sd_d {
1656 + uint32_t ssd:12; /* souce stride distance */
1657 + uint32_t sss:12; /* souce stride size */
1661 + uint32_t dsd:12; /* Destination stride distance */
1662 + uint32_t dss:12; /* Destination stride size */
1666 + uint32_t rbpcmd; /* Route-by-port command */
1668 +} __attribute__((__packed__));
1669 +/* Source descriptor command read transaction type for RBP=0:
1670 + coherent copy of cacheable memory */
1671 +#define QDMA_SD_CMD_RDTTYPE_COHERENT (0xb << 28)
1672 +/* Destination descriptor command write transaction type for RBP=0:
1673 + coherent copy of cacheable memory */
1674 +#define QDMA_DD_CMD_WRTTYPE_COHERENT (0x6 << 28)
1676 +#define QDMA_SG_FMT_SDB 0x0 /* single data buffer */
1677 +#define QDMA_SG_FMT_FDS 0x1 /* frame data section */
1678 +#define QDMA_SG_FMT_SGTE 0x2 /* SGT extension */
1679 +#define QDMA_SG_SL_SHORT 0x1 /* short length */
1680 +#define QDMA_SG_SL_LONG 0x0 /* short length */
1681 +#define QDMA_SG_F 0x1 /* last sg entry */
1682 +struct dpaa2_qdma_sg {
1683 + uint32_t addr_lo; /* address 0:31 */
1684 + uint32_t addr_hi:17; /* address 32:48 */
1687 + uint32_t data_len_sl0; /* SL=0, the long format */
1689 + uint32_t len:17; /* SL=1, the short format */
1690 + uint32_t reserve:3;
1693 + uint32_t size:10; /* buff size */
1695 + } data_len; /* AVAIL_LENGTH */
1700 + uint32_t offset:12;
1705 +} __attribute__((__packed__));
1707 +#define QMAN_FD_FMT_ENABLE (1) /* frame list table enable */
1708 +#define QMAN_FD_BMT_ENABLE (1 << 15) /* bypass memory translation */
1709 +#define QMAN_FD_BMT_DISABLE (0 << 15) /* bypass memory translation */
1710 +#define QMAN_FD_SL_DISABLE (0 << 14) /* short lengthe disabled */
1711 +#define QMAN_FD_SL_ENABLE (1 << 14) /* short lengthe enabled */
1713 +#define QDMA_SB_FRAME (0 << 28) /* single frame */
1714 +#define QDMA_SG_FRAME (2 << 28) /* scatter gather frames */
1715 +#define QDMA_FINAL_BIT_DISABLE (0 << 31) /* final bit disable */
1716 +#define QDMA_FINAL_BIT_ENABLE (1 << 31) /* final bit enable */
1718 +#define QDMA_FD_SHORT_FORMAT (1 << 11) /* short format */
1719 +#define QDMA_FD_LONG_FORMAT (0 << 11) /* long format */
1720 +#define QDMA_SER_DISABLE (0 << 8) /* no notification */
1721 +#define QDMA_SER_CTX (1 << 8) /* notification by FQD_CTX[fqid] */
1722 +#define QDMA_SER_DEST (2 << 8) /* notification by destination desc */
1723 +#define QDMA_SER_BOTH (3 << 8) /* soruce and dest notification */
1724 +#define QDMA_FD_SPF_ENALBE (1 << 30) /* source prefetch enable */
1726 +#define QMAN_FD_VA_ENABLE (1 << 14) /* Address used is virtual address */
1727 +#define QMAN_FD_VA_DISABLE (0 << 14)/* Address used is a real address */
1728 +#define QMAN_FD_CBMT_ENABLE (1 << 15) /* Flow Context: 49bit physical address */
1729 +#define QMAN_FD_CBMT_DISABLE (0 << 15) /* Flow Context: 64bit virtual address */
1730 +#define QMAN_FD_SC_DISABLE (0 << 27) /* stashing control */
1732 +#define QDMA_FL_FMT_SBF (0x0) /* Single buffer frame */
1733 +#define QDMA_FL_FMT_SGE 0x2 /* Scatter gather frame */
1734 +#define QDMA_FL_BMT_ENABLE (0x1 << 15)/* enable bypass memory translation */
1735 +#define QDMA_FL_BMT_DISABLE 0x0 /* enable bypass memory translation */
1736 +#define QDMA_FL_SL_LONG (0x0 << 2)/* long length */
1737 +#define QDMA_FL_SL_SHORT 0x1 /* short length */
1738 +#define QDMA_FL_F (0x1)/* last frame list bit */
1739 +/*Description of Frame list table structure*/
1741 +struct dpaa2_qdma_chan {
1742 + struct virt_dma_chan vchan;
1743 + struct virt_dma_desc vdesc;
1744 + enum dma_status status;
1745 + struct dpaa2_qdma_engine *qdma;
1747 + struct mutex dpaa2_queue_mutex;
1748 + spinlock_t queue_lock;
1749 + struct dma_pool *fd_pool;
1750 + struct dma_pool *sg_blk_pool;
1752 + struct list_head comp_used;
1753 + struct list_head comp_free;
1755 + struct list_head sgb_free;
1758 +struct qdma_sg_blk {
1759 + dma_addr_t blk_bus_addr;
1760 + void *blk_virt_addr;
1761 + struct list_head list;
1764 +struct dpaa2_qdma_comp {
1765 + dma_addr_t fd_bus_addr;
1766 + dma_addr_t fl_bus_addr;
1767 + dma_addr_t desc_bus_addr;
1768 + dma_addr_t sge_src_bus_addr;
1769 + dma_addr_t sge_dst_bus_addr;
1770 + void *fd_virt_addr;
1771 + void *fl_virt_addr;
1772 + void *desc_virt_addr;
1773 + void *sg_src_virt_addr;
1774 + void *sg_dst_virt_addr;
1775 + struct qdma_sg_blk *sg_blk;
1776 + uint32_t sg_blk_num;
1777 + struct list_head sg_src_head;
1778 + struct list_head sg_dst_head;
1779 + struct dpaa2_qdma_chan *qchan;
1780 + struct virt_dma_desc vdesc;
1781 + struct list_head list;
1784 +struct dpaa2_qdma_engine {
1785 + struct dma_device dma_dev;
1787 + struct dpaa2_qdma_chan chans[NUM_CH];
1789 + struct dpaa2_qdma_priv *priv;
1793 + * dpaa2_qdma_priv - driver private data
1795 +struct dpaa2_qdma_priv {
1798 + struct iommu_domain *iommu_domain;
1799 + struct dpdmai_attr dpdmai_attr;
1800 + struct device *dev;
1801 + struct fsl_mc_io *mc_io;
1802 + struct fsl_mc_device *dpdmai_dev;
1804 + struct dpdmai_rx_queue_attr rx_queue_attr[DPDMAI_PRIO_NUM];
1805 + struct dpdmai_tx_queue_attr tx_queue_attr[DPDMAI_PRIO_NUM];
1807 + uint8_t num_pairs;
1809 + struct dpaa2_qdma_engine *dpaa2_qdma;
1810 + struct dpaa2_qdma_priv_per_prio *ppriv;
1813 +struct dpaa2_qdma_priv_per_prio {
1818 + struct dpaa2_io_store *store;
1819 + struct dpaa2_io_notification_ctx nctx;
1821 + struct dpaa2_qdma_priv *priv;
1824 +/* FD pool size: one FD + 3 Frame list + 2 source/destination descriptor */
1825 +#define FD_POOL_SIZE (sizeof(struct dpaa2_fd) + \
1826 + sizeof(struct dpaa2_fl_entry) * 3 + \
1827 + sizeof(struct dpaa2_qdma_sd_d) * 2)
1829 +/* qdma_sg_blk + 16 SGs */
1830 +#define SG_POOL_SIZE (sizeof(struct qdma_sg_blk) +\
1831 + sizeof(struct dpaa2_qdma_sg) * NUM_SG_PER_BLK)
1832 +#endif /* __DPAA2_QDMA_H */
1834 +++ b/drivers/dma/dpaa2-qdma/dpdmai.c
1836 +/* Copyright 2013-2015 Freescale Semiconductor Inc.
1838 + * Redistribution and use in source and binary forms, with or without
1839 + * modification, are permitted provided that the following conditions are met:
1840 + * * Redistributions of source code must retain the above copyright
1841 + * notice, this list of conditions and the following disclaimer.
1842 + * * Redistributions in binary form must reproduce the above copyright
1843 + * notice, this list of conditions and the following disclaimer in the
1844 + * documentation and/or other materials provided with the distribution.
1845 + * * Neither the name of the above-listed copyright holders nor the
1846 + * names of any contributors may be used to endorse or promote products
1847 + * derived from this software without specific prior written permission.
1850 + * ALTERNATIVELY, this software may be distributed under the terms of the
1851 + * GNU General Public License ("GPL") as published by the Free Software
1852 + * Foundation, either version 2 of that License or (at your option) any
1855 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
1856 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1857 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1858 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
1859 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
1860 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
1861 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
1862 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
1863 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
1864 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
1865 + * POSSIBILITY OF SUCH DAMAGE.
1867 +#include <linux/types.h>
1868 +#include <linux/io.h>
1869 +#include "fsl_dpdmai.h"
1870 +#include "fsl_dpdmai_cmd.h"
1871 +#include <linux/fsl/mc.h>
1873 +struct dpdmai_cmd_open {
1877 +struct dpdmai_rsp_get_attributes {
1879 + u8 num_of_priorities;
1886 +struct dpdmai_cmd_queue {
1899 +struct dpdmai_rsp_get_tx_queue {
1905 +int dpdmai_open(struct fsl_mc_io *mc_io,
1906 + uint32_t cmd_flags,
1910 + struct fsl_mc_command cmd = { 0 };
1911 + struct dpdmai_cmd_open *cmd_params;
1914 + /* prepare command */
1915 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_OPEN,
1919 + cmd_params = (struct dpdmai_cmd_open *)cmd.params;
1920 + cmd_params->dpdmai_id = cpu_to_le32(dpdmai_id);
1922 + /* send command to mc*/
1923 + err = mc_send_command(mc_io, &cmd);
1927 + /* retrieve response parameters */
1928 + *token = mc_cmd_hdr_read_token(&cmd);
1932 +int dpdmai_close(struct fsl_mc_io *mc_io,
1933 + uint32_t cmd_flags,
1936 + struct fsl_mc_command cmd = { 0 };
1938 + /* prepare command */
1939 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLOSE,
1940 + cmd_flags, token);
1942 + /* send command to mc*/
1943 + return mc_send_command(mc_io, &cmd);
1946 +int dpdmai_create(struct fsl_mc_io *mc_io,
1947 + uint32_t cmd_flags,
1948 + const struct dpdmai_cfg *cfg,
1951 + struct fsl_mc_command cmd = { 0 };
1954 + /* prepare command */
1955 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CREATE,
1958 + DPDMAI_CMD_CREATE(cmd, cfg);
1960 + /* send command to mc*/
1961 + err = mc_send_command(mc_io, &cmd);
1965 + /* retrieve response parameters */
1966 + *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
1971 +int dpdmai_destroy(struct fsl_mc_io *mc_io,
1972 + uint32_t cmd_flags,
1975 + struct fsl_mc_command cmd = { 0 };
1977 + /* prepare command */
1978 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DESTROY,
1982 + /* send command to mc*/
1983 + return mc_send_command(mc_io, &cmd);
1986 +int dpdmai_enable(struct fsl_mc_io *mc_io,
1987 + uint32_t cmd_flags,
1990 + struct fsl_mc_command cmd = { 0 };
1992 + /* prepare command */
1993 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_ENABLE,
1997 + /* send command to mc*/
1998 + return mc_send_command(mc_io, &cmd);
2001 +int dpdmai_disable(struct fsl_mc_io *mc_io,
2002 + uint32_t cmd_flags,
2005 + struct fsl_mc_command cmd = { 0 };
2007 + /* prepare command */
2008 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DISABLE,
2012 + /* send command to mc*/
2013 + return mc_send_command(mc_io, &cmd);
2016 +int dpdmai_is_enabled(struct fsl_mc_io *mc_io,
2017 + uint32_t cmd_flags,
2021 + struct fsl_mc_command cmd = { 0 };
2023 + /* prepare command */
2024 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_IS_ENABLED,
2028 + /* send command to mc*/
2029 + err = mc_send_command(mc_io, &cmd);
2033 + /* retrieve response parameters */
2034 + DPDMAI_RSP_IS_ENABLED(cmd, *en);
2039 +int dpdmai_reset(struct fsl_mc_io *mc_io,
2040 + uint32_t cmd_flags,
2043 + struct fsl_mc_command cmd = { 0 };
2045 + /* prepare command */
2046 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_RESET,
2050 + /* send command to mc*/
2051 + return mc_send_command(mc_io, &cmd);
2054 +int dpdmai_get_irq(struct fsl_mc_io *mc_io,
2055 + uint32_t cmd_flags,
2057 + uint8_t irq_index,
2059 + struct dpdmai_irq_cfg *irq_cfg)
2061 + struct fsl_mc_command cmd = { 0 };
2064 + /* prepare command */
2065 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ,
2068 + DPDMAI_CMD_GET_IRQ(cmd, irq_index);
2070 + /* send command to mc*/
2071 + err = mc_send_command(mc_io, &cmd);
2075 + /* retrieve response parameters */
2076 + DPDMAI_RSP_GET_IRQ(cmd, *type, irq_cfg);
2081 +int dpdmai_set_irq(struct fsl_mc_io *mc_io,
2082 + uint32_t cmd_flags,
2084 + uint8_t irq_index,
2085 + struct dpdmai_irq_cfg *irq_cfg)
2087 + struct fsl_mc_command cmd = { 0 };
2089 + /* prepare command */
2090 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ,
2093 + DPDMAI_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
2095 + /* send command to mc*/
2096 + return mc_send_command(mc_io, &cmd);
2099 +int dpdmai_get_irq_enable(struct fsl_mc_io *mc_io,
2100 + uint32_t cmd_flags,
2102 + uint8_t irq_index,
2105 + struct fsl_mc_command cmd = { 0 };
2108 + /* prepare command */
2109 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_ENABLE,
2112 + DPDMAI_CMD_GET_IRQ_ENABLE(cmd, irq_index);
2114 + /* send command to mc*/
2115 + err = mc_send_command(mc_io, &cmd);
2119 + /* retrieve response parameters */
2120 + DPDMAI_RSP_GET_IRQ_ENABLE(cmd, *en);
2125 +int dpdmai_set_irq_enable(struct fsl_mc_io *mc_io,
2126 + uint32_t cmd_flags,
2128 + uint8_t irq_index,
2131 + struct fsl_mc_command cmd = { 0 };
2133 + /* prepare command */
2134 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ_ENABLE,
2137 + DPDMAI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
2139 + /* send command to mc*/
2140 + return mc_send_command(mc_io, &cmd);
2143 +int dpdmai_get_irq_mask(struct fsl_mc_io *mc_io,
2144 + uint32_t cmd_flags,
2146 + uint8_t irq_index,
2149 + struct fsl_mc_command cmd = { 0 };
2152 + /* prepare command */
2153 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_MASK,
2156 + DPDMAI_CMD_GET_IRQ_MASK(cmd, irq_index);
2158 + /* send command to mc*/
2159 + err = mc_send_command(mc_io, &cmd);
2163 + /* retrieve response parameters */
2164 + DPDMAI_RSP_GET_IRQ_MASK(cmd, *mask);
2169 +int dpdmai_set_irq_mask(struct fsl_mc_io *mc_io,
2170 + uint32_t cmd_flags,
2172 + uint8_t irq_index,
2175 + struct fsl_mc_command cmd = { 0 };
2177 + /* prepare command */
2178 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ_MASK,
2181 + DPDMAI_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
2183 + /* send command to mc*/
2184 + return mc_send_command(mc_io, &cmd);
2187 +int dpdmai_get_irq_status(struct fsl_mc_io *mc_io,
2188 + uint32_t cmd_flags,
2190 + uint8_t irq_index,
2193 + struct fsl_mc_command cmd = { 0 };
2196 + /* prepare command */
2197 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_STATUS,
2200 + DPDMAI_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
2202 + /* send command to mc*/
2203 + err = mc_send_command(mc_io, &cmd);
2207 + /* retrieve response parameters */
2208 + DPDMAI_RSP_GET_IRQ_STATUS(cmd, *status);
2213 +int dpdmai_clear_irq_status(struct fsl_mc_io *mc_io,
2214 + uint32_t cmd_flags,
2216 + uint8_t irq_index,
2219 + struct fsl_mc_command cmd = { 0 };
2221 + /* prepare command */
2222 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLEAR_IRQ_STATUS,
2225 + DPDMAI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
2227 + /* send command to mc*/
2228 + return mc_send_command(mc_io, &cmd);
2231 +int dpdmai_get_attributes(struct fsl_mc_io *mc_io,
2232 + uint32_t cmd_flags,
2234 + struct dpdmai_attr *attr)
2236 + struct fsl_mc_command cmd = { 0 };
2238 + struct dpdmai_rsp_get_attributes *rsp_params;
2240 + /* prepare command */
2241 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_ATTR,
2245 + /* send command to mc*/
2246 + err = mc_send_command(mc_io, &cmd);
2250 + /* retrieve response parameters */
2251 + rsp_params = (struct dpdmai_rsp_get_attributes *)cmd.params;
2252 + attr->id = le32_to_cpu(rsp_params->id);
2253 + attr->version.major = le16_to_cpu(rsp_params->major);
2254 + attr->version.minor = le16_to_cpu(rsp_params->minor);
2255 + attr->num_of_priorities = rsp_params->num_of_priorities;
2261 +int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io,
2262 + uint32_t cmd_flags,
2265 + const struct dpdmai_rx_queue_cfg *cfg)
2267 + struct fsl_mc_command cmd = { 0 };
2268 + struct dpdmai_cmd_queue *cmd_params;
2270 + /* prepare command */
2271 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_RX_QUEUE,
2275 + cmd_params = (struct dpdmai_cmd_queue *)cmd.params;
2276 + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
2277 + cmd_params->priority = cfg->dest_cfg.priority;
2278 + cmd_params->queue = priority;
2279 + cmd_params->dest_type = cfg->dest_cfg.dest_type;
2280 + cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
2281 + cmd_params->options = cpu_to_le32(cfg->options);
2284 + /* send command to mc*/
2285 + return mc_send_command(mc_io, &cmd);
2288 +int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io,
2289 + uint32_t cmd_flags,
2291 + uint8_t priority, struct dpdmai_rx_queue_attr *attr)
2293 + struct fsl_mc_command cmd = { 0 };
2294 + struct dpdmai_cmd_queue *cmd_params;
2297 + /* prepare command */
2298 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_RX_QUEUE,
2302 + cmd_params = (struct dpdmai_cmd_queue *)cmd.params;
2303 + cmd_params->queue = priority;
2305 + /* send command to mc*/
2306 + err = mc_send_command(mc_io, &cmd);
2310 + /* retrieve response parameters */
2311 + attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id);
2312 + attr->dest_cfg.priority = cmd_params->priority;
2313 + attr->dest_cfg.dest_type = cmd_params->dest_type;
2314 + attr->user_ctx = le64_to_cpu(cmd_params->user_ctx);
2315 + attr->fqid = le32_to_cpu(cmd_params->fqid);
2320 +int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io,
2321 + uint32_t cmd_flags,
2324 + struct dpdmai_tx_queue_attr *attr)
2326 + struct fsl_mc_command cmd = { 0 };
2327 + struct dpdmai_cmd_queue *cmd_params;
2328 + struct dpdmai_rsp_get_tx_queue *rsp_params;
2331 + /* prepare command */
2332 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_TX_QUEUE,
2336 + cmd_params = (struct dpdmai_cmd_queue *)cmd.params;
2337 + cmd_params->queue = priority;
2339 + /* send command to mc*/
2340 + err = mc_send_command(mc_io, &cmd);
2344 + /* retrieve response parameters */
2346 + rsp_params = (struct dpdmai_rsp_get_tx_queue *)cmd.params;
2347 + attr->fqid = le32_to_cpu(rsp_params->fqid);
2352 +++ b/drivers/dma/dpaa2-qdma/fsl_dpdmai.h
2354 +/* Copyright 2013-2015 Freescale Semiconductor Inc.
2356 + * Redistribution and use in source and binary forms, with or without
2357 + * modification, are permitted provided that the following conditions are met:
2358 + * * Redistributions of source code must retain the above copyright
2359 + * notice, this list of conditions and the following disclaimer.
2360 + * * Redistributions in binary form must reproduce the above copyright
2361 + * notice, this list of conditions and the following disclaimer in the
2362 + * documentation and/or other materials provided with the distribution.
2363 + * * Neither the name of the above-listed copyright holders nor the
2364 + * names of any contributors may be used to endorse or promote products
2365 + * derived from this software without specific prior written permission.
2368 + * ALTERNATIVELY, this software may be distributed under the terms of the
2369 + * GNU General Public License ("GPL") as published by the Free Software
2370 + * Foundation, either version 2 of that License or (at your option) any
2373 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
2374 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2375 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2376 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
2377 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2378 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
2379 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
2380 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
2381 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
2382 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
2383 + * POSSIBILITY OF SUCH DAMAGE.
2385 +#ifndef __FSL_DPDMAI_H
2386 +#define __FSL_DPDMAI_H
2390 +/* Data Path DMA Interface API
2391 + * Contains initialization APIs and runtime control APIs for DPDMAI
2394 +/* General DPDMAI macros */
2397 + * Maximum number of Tx/Rx priorities per DPDMAI object
2399 +#define DPDMAI_PRIO_NUM 2
2402 + * All queues considered; see dpdmai_set_rx_queue()
2404 +#define DPDMAI_ALL_QUEUES (uint8_t)(-1)
2407 + * dpdmai_open() - Open a control session for the specified object
2408 + * @mc_io: Pointer to MC portal's I/O object
2409 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2410 + * @dpdmai_id: DPDMAI unique ID
2411 + * @token: Returned token; use in subsequent API calls
2413 + * This function can be used to open a control session for an
2414 + * already created object; an object may have been declared in
2415 + * the DPL or by calling the dpdmai_create() function.
2416 + * This function returns a unique authentication token,
2417 + * associated with the specific object ID and the specific MC
2418 + * portal; this token must be used in all subsequent commands for
2419 + * this specific object.
2421 + * Return: '0' on Success; Error code otherwise.
2423 +int dpdmai_open(struct fsl_mc_io *mc_io,
2424 + uint32_t cmd_flags,
2429 + * dpdmai_close() - Close the control session of the object
2430 + * @mc_io: Pointer to MC portal's I/O object
2431 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2432 + * @token: Token of DPDMAI object
2434 + * After this function is called, no further operations are
2435 + * allowed on the object without opening a new control session.
2437 + * Return: '0' on Success; Error code otherwise.
2439 +int dpdmai_close(struct fsl_mc_io *mc_io,
2440 + uint32_t cmd_flags,
2444 + * struct dpdmai_cfg - Structure representing DPDMAI configuration
2445 + * @priorities: Priorities for the DMA hardware processing; valid priorities are
2446 + * configured with values 1-8; the entry following last valid entry
2447 + * should be configured with 0
2449 +struct dpdmai_cfg {
2450 + uint8_t priorities[DPDMAI_PRIO_NUM];
2454 + * dpdmai_create() - Create the DPDMAI object
2455 + * @mc_io: Pointer to MC portal's I/O object
2456 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2457 + * @cfg: Configuration structure
2458 + * @token: Returned token; use in subsequent API calls
2460 + * Create the DPDMAI object, allocate required resources and
2461 + * perform required initialization.
2463 + * The object can be created either by declaring it in the
2464 + * DPL file, or by calling this function.
2466 + * This function returns a unique authentication token,
2467 + * associated with the specific object ID and the specific MC
2468 + * portal; this token must be used in all subsequent calls to
2469 + * this specific object. For objects that are created using the
2470 + * DPL file, call dpdmai_open() function to get an authentication
2473 + * Return: '0' on Success; Error code otherwise.
2475 +int dpdmai_create(struct fsl_mc_io *mc_io,
2476 + uint32_t cmd_flags,
2477 + const struct dpdmai_cfg *cfg,
2481 + * dpdmai_destroy() - Destroy the DPDMAI object and release all its resources.
2482 + * @mc_io: Pointer to MC portal's I/O object
2483 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2484 + * @token: Token of DPDMAI object
2486 + * Return: '0' on Success; error code otherwise.
2488 +int dpdmai_destroy(struct fsl_mc_io *mc_io,
2489 + uint32_t cmd_flags,
2493 + * dpdmai_enable() - Enable the DPDMAI, allow sending and receiving frames.
2494 + * @mc_io: Pointer to MC portal's I/O object
2495 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2496 + * @token: Token of DPDMAI object
2498 + * Return: '0' on Success; Error code otherwise.
2500 +int dpdmai_enable(struct fsl_mc_io *mc_io,
2501 + uint32_t cmd_flags,
2505 + * dpdmai_disable() - Disable the DPDMAI, stop sending and receiving frames.
2506 + * @mc_io: Pointer to MC portal's I/O object
2507 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2508 + * @token: Token of DPDMAI object
2510 + * Return: '0' on Success; Error code otherwise.
2512 +int dpdmai_disable(struct fsl_mc_io *mc_io,
2513 + uint32_t cmd_flags,
2517 + * dpdmai_is_enabled() - Check if the DPDMAI is enabled.
2518 + * @mc_io: Pointer to MC portal's I/O object
2519 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2520 + * @token: Token of DPDMAI object
2521 + * @en: Returns '1' if object is enabled; '0' otherwise
2523 + * Return: '0' on Success; Error code otherwise.
2525 +int dpdmai_is_enabled(struct fsl_mc_io *mc_io,
2526 + uint32_t cmd_flags,
2531 + * dpdmai_reset() - Reset the DPDMAI, returns the object to initial state.
2532 + * @mc_io: Pointer to MC portal's I/O object
2533 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2534 + * @token: Token of DPDMAI object
2536 + * Return: '0' on Success; Error code otherwise.
2538 +int dpdmai_reset(struct fsl_mc_io *mc_io,
2539 + uint32_t cmd_flags,
2543 + * struct dpdmai_irq_cfg - IRQ configuration
2544 + * @addr: Address that must be written to signal a message-based interrupt
2545 + * @val: Value to write into irq_addr address
2546 + * @irq_num: A user defined number associated with this IRQ
2548 +struct dpdmai_irq_cfg {
2555 + * dpdmai_set_irq() - Set IRQ information for the DPDMAI to trigger an interrupt.
2556 + * @mc_io: Pointer to MC portal's I/O object
2557 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2558 + * @token: Token of DPDMAI object
2559 + * @irq_index: Identifies the interrupt index to configure
2560 + * @irq_cfg: IRQ configuration
2562 + * Return: '0' on Success; Error code otherwise.
2564 +int dpdmai_set_irq(struct fsl_mc_io *mc_io,
2565 + uint32_t cmd_flags,
2567 + uint8_t irq_index,
2568 + struct dpdmai_irq_cfg *irq_cfg);
2571 + * dpdmai_get_irq() - Get IRQ information from the DPDMAI
2573 + * @mc_io: Pointer to MC portal's I/O object
2574 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2575 + * @token: Token of DPDMAI object
2576 + * @irq_index: The interrupt index to configure
2577 + * @type: Interrupt type: 0 represents message interrupt
2578 + * type (both irq_addr and irq_val are valid)
2579 + * @irq_cfg: IRQ attributes
2581 + * Return: '0' on Success; Error code otherwise.
2583 +int dpdmai_get_irq(struct fsl_mc_io *mc_io,
2584 + uint32_t cmd_flags,
2586 + uint8_t irq_index,
2588 + struct dpdmai_irq_cfg *irq_cfg);
2591 + * dpdmai_set_irq_enable() - Set overall interrupt state.
2592 + * @mc_io: Pointer to MC portal's I/O object
2593 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2594 + * @token: Token of DPDMAI object
2595 + * @irq_index: The interrupt index to configure
2596 + * @en: Interrupt state - enable = 1, disable = 0
2598 + * Allows GPP software to control when interrupts are generated.
2599 + * Each interrupt can have up to 32 causes. The enable/disable control's the
2600 + * overall interrupt state. if the interrupt is disabled no causes will cause
2603 + * Return: '0' on Success; Error code otherwise.
2605 +int dpdmai_set_irq_enable(struct fsl_mc_io *mc_io,
2606 + uint32_t cmd_flags,
2608 + uint8_t irq_index,
2612 + * dpdmai_get_irq_enable() - Get overall interrupt state
2613 + * @mc_io: Pointer to MC portal's I/O object
2614 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2615 + * @token: Token of DPDMAI object
2616 + * @irq_index: The interrupt index to configure
2617 + * @en: Returned Interrupt state - enable = 1, disable = 0
2619 + * Return: '0' on Success; Error code otherwise.
2621 +int dpdmai_get_irq_enable(struct fsl_mc_io *mc_io,
2622 + uint32_t cmd_flags,
2624 + uint8_t irq_index,
2628 + * dpdmai_set_irq_mask() - Set interrupt mask.
2629 + * @mc_io: Pointer to MC portal's I/O object
2630 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2631 + * @token: Token of DPDMAI object
2632 + * @irq_index: The interrupt index to configure
2633 + * @mask: event mask to trigger interrupt;
2635 + * 0 = ignore event
2636 + * 1 = consider event for asserting IRQ
2638 + * Every interrupt can have up to 32 causes and the interrupt model supports
2639 + * masking/unmasking each cause independently
2641 + * Return: '0' on Success; Error code otherwise.
2643 +int dpdmai_set_irq_mask(struct fsl_mc_io *mc_io,
2644 + uint32_t cmd_flags,
2646 + uint8_t irq_index,
2650 + * dpdmai_get_irq_mask() - Get interrupt mask.
2651 + * @mc_io: Pointer to MC portal's I/O object
2652 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2653 + * @token: Token of DPDMAI object
2654 + * @irq_index: The interrupt index to configure
2655 + * @mask: Returned event mask to trigger interrupt
2657 + * Every interrupt can have up to 32 causes and the interrupt model supports
2658 + * masking/unmasking each cause independently
2660 + * Return: '0' on Success; Error code otherwise.
2662 +int dpdmai_get_irq_mask(struct fsl_mc_io *mc_io,
2663 + uint32_t cmd_flags,
2665 + uint8_t irq_index,
2669 + * dpdmai_get_irq_status() - Get the current status of any pending interrupts
2670 + * @mc_io: Pointer to MC portal's I/O object
2671 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2672 + * @token: Token of DPDMAI object
2673 + * @irq_index: The interrupt index to configure
2674 + * @status: Returned interrupts status - one bit per cause:
2675 + * 0 = no interrupt pending
2676 + * 1 = interrupt pending
2678 + * Return: '0' on Success; Error code otherwise.
2680 +int dpdmai_get_irq_status(struct fsl_mc_io *mc_io,
2681 + uint32_t cmd_flags,
2683 + uint8_t irq_index,
2684 + uint32_t *status);
2687 + * dpdmai_clear_irq_status() - Clear a pending interrupt's status
2688 + * @mc_io: Pointer to MC portal's I/O object
2689 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2690 + * @token: Token of DPDMAI object
2691 + * @irq_index: The interrupt index to configure
2692 + * @status: bits to clear (W1C) - one bit per cause:
2693 + * 0 = don't change
2694 + * 1 = clear status bit
2696 + * Return: '0' on Success; Error code otherwise.
2698 +int dpdmai_clear_irq_status(struct fsl_mc_io *mc_io,
2699 + uint32_t cmd_flags,
2701 + uint8_t irq_index,
2705 + * struct dpdmai_attr - Structure representing DPDMAI attributes
2706 + * @id: DPDMAI object ID
2707 + * @version: DPDMAI version
2708 + * @num_of_priorities: number of priorities
2710 +struct dpdmai_attr {
2713 + * struct version - DPDMAI version
2714 + * @major: DPDMAI major version
2715 + * @minor: DPDMAI minor version
2721 + uint8_t num_of_priorities;
2725 + * dpdmai_get_attributes() - Retrieve DPDMAI attributes.
2726 + * @mc_io: Pointer to MC portal's I/O object
2727 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2728 + * @token: Token of DPDMAI object
2729 + * @attr: Returned object's attributes
2731 + * Return: '0' on Success; Error code otherwise.
2733 +int dpdmai_get_attributes(struct fsl_mc_io *mc_io,
2734 + uint32_t cmd_flags,
2736 + struct dpdmai_attr *attr);
2739 + * enum dpdmai_dest - DPDMAI destination types
2740 + * @DPDMAI_DEST_NONE: Unassigned destination; The queue is set in parked mode
2741 + * and does not generate FQDAN notifications; user is expected to dequeue
2742 + * from the queue based on polling or other user-defined method
2743 + * @DPDMAI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
2744 + * notifications to the specified DPIO; user is expected to dequeue
2745 + * from the queue only after notification is received
2746 + * @DPDMAI_DEST_DPCON: The queue is set in schedule mode and does not generate
2747 + * FQDAN notifications, but is connected to the specified DPCON object;
2748 + * user is expected to dequeue from the DPCON channel
2751 + DPDMAI_DEST_NONE = 0,
2752 + DPDMAI_DEST_DPIO = 1,
2753 + DPDMAI_DEST_DPCON = 2
2757 + * struct dpdmai_dest_cfg - Structure representing DPDMAI destination parameters
2758 + * @dest_type: Destination type
2759 + * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
2760 + * @priority: Priority selection within the DPIO or DPCON channel; valid values
2761 + * are 0-1 or 0-7, depending on the number of priorities in that
2762 + * channel; not relevant for 'DPDMAI_DEST_NONE' option
2764 +struct dpdmai_dest_cfg {
2765 + enum dpdmai_dest dest_type;
2770 +/* DPDMAI queue modification options */
2773 + * Select to modify the user's context associated with the queue
2775 +#define DPDMAI_QUEUE_OPT_USER_CTX 0x00000001
2778 + * Select to modify the queue's destination
2780 +#define DPDMAI_QUEUE_OPT_DEST 0x00000002
2783 + * struct dpdmai_rx_queue_cfg - DPDMAI RX queue configuration
2784 + * @options: Flags representing the suggested modifications to the queue;
2785 + * Use any combination of 'DPDMAI_QUEUE_OPT_<X>' flags
2786 + * @user_ctx: User context value provided in the frame descriptor of each
2788 + * valid only if 'DPDMAI_QUEUE_OPT_USER_CTX' is contained in 'options'
2789 + * @dest_cfg: Queue destination parameters;
2790 + * valid only if 'DPDMAI_QUEUE_OPT_DEST' is contained in 'options'
2792 +struct dpdmai_rx_queue_cfg {
2794 + uint64_t user_ctx;
2795 + struct dpdmai_dest_cfg dest_cfg;
2800 + * dpdmai_set_rx_queue() - Set Rx queue configuration
2801 + * @mc_io: Pointer to MC portal's I/O object
2802 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2803 + * @token: Token of DPDMAI object
2804 + * @priority: Select the queue relative to number of
2805 + * priorities configured at DPDMAI creation; use
2806 + * DPDMAI_ALL_QUEUES to configure all Rx queues
2808 + * @cfg: Rx queue configuration
2810 + * Return: '0' on Success; Error code otherwise.
2812 +int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io,
2813 + uint32_t cmd_flags,
2816 + const struct dpdmai_rx_queue_cfg *cfg);
2819 + * struct dpdmai_rx_queue_attr - Structure representing attributes of Rx queues
2820 + * @user_ctx: User context value provided in the frame descriptor of each
2822 + * @dest_cfg: Queue destination configuration
2823 + * @fqid: Virtual FQID value to be used for dequeue operations
2825 +struct dpdmai_rx_queue_attr {
2826 + uint64_t user_ctx;
2827 + struct dpdmai_dest_cfg dest_cfg;
2832 + * dpdmai_get_rx_queue() - Retrieve Rx queue attributes.
2833 + * @mc_io: Pointer to MC portal's I/O object
2834 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2835 + * @token: Token of DPDMAI object
2836 + * @priority: Select the queue relative to number of
2837 + * priorities configured at DPDMAI creation
2838 + * @attr: Returned Rx queue attributes
2840 + * Return: '0' on Success; Error code otherwise.
2842 +int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io,
2843 + uint32_t cmd_flags,
2846 + struct dpdmai_rx_queue_attr *attr);
2849 + * struct dpdmai_tx_queue_attr - Structure representing attributes of Tx queues
2850 + * @fqid: Virtual FQID to be used for sending frames to DMA hardware
2853 +struct dpdmai_tx_queue_attr {
2858 + * dpdmai_get_tx_queue() - Retrieve Tx queue attributes.
2859 + * @mc_io: Pointer to MC portal's I/O object
2860 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2861 + * @token: Token of DPDMAI object
2862 + * @priority: Select the queue relative to number of
2863 + * priorities configured at DPDMAI creation
2864 + * @attr: Returned Tx queue attributes
2866 + * Return: '0' on Success; Error code otherwise.
2868 +int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io,
2869 + uint32_t cmd_flags,
2872 + struct dpdmai_tx_queue_attr *attr);
2874 +#endif /* __FSL_DPDMAI_H */
2876 +++ b/drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h
2878 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
2880 + * Redistribution and use in source and binary forms, with or without
2881 + * modification, are permitted provided that the following conditions are met:
2882 + * * Redistributions of source code must retain the above copyright
2883 + * notice, this list of conditions and the following disclaimer.
2884 + * * Redistributions in binary form must reproduce the above copyright
2885 + * notice, this list of conditions and the following disclaimer in the
2886 + * documentation and/or other materials provided with the distribution.
2887 + * * Neither the name of the above-listed copyright holders nor the
2888 + * names of any contributors may be used to endorse or promote products
2889 + * derived from this software without specific prior written permission.
2892 + * ALTERNATIVELY, this software may be distributed under the terms of the
2893 + * GNU General Public License ("GPL") as published by the Free Software
2894 + * Foundation, either version 2 of that License or (at your option) any
2897 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
2898 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2899 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2900 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
2901 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2902 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
2903 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
2904 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
2905 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
2906 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
2907 + * POSSIBILITY OF SUCH DAMAGE.
2909 +#ifndef _FSL_DPDMAI_CMD_H
2910 +#define _FSL_DPDMAI_CMD_H
2912 +/* DPDMAI Version */
2913 +#define DPDMAI_VER_MAJOR 2
2914 +#define DPDMAI_VER_MINOR 2
2916 +#define DPDMAI_CMD_BASE_VERSION 0
2917 +#define DPDMAI_CMD_ID_OFFSET 4
2920 +#define DPDMAI_CMDID_CLOSE ((0x800 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2921 +#define DPDMAI_CMDID_OPEN ((0x80E << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2922 +#define DPDMAI_CMDID_CREATE ((0x90E << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2923 +#define DPDMAI_CMDID_DESTROY ((0x900 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2925 +#define DPDMAI_CMDID_ENABLE ((0x002 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2926 +#define DPDMAI_CMDID_DISABLE ((0x003 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2927 +#define DPDMAI_CMDID_GET_ATTR ((0x004 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2928 +#define DPDMAI_CMDID_RESET ((0x005 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2929 +#define DPDMAI_CMDID_IS_ENABLED ((0x006 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2931 +#define DPDMAI_CMDID_SET_IRQ ((0x010 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2932 +#define DPDMAI_CMDID_GET_IRQ ((0x011 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2933 +#define DPDMAI_CMDID_SET_IRQ_ENABLE ((0x012 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2934 +#define DPDMAI_CMDID_GET_IRQ_ENABLE ((0x013 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2935 +#define DPDMAI_CMDID_SET_IRQ_MASK ((0x014 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2936 +#define DPDMAI_CMDID_GET_IRQ_MASK ((0x015 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2937 +#define DPDMAI_CMDID_GET_IRQ_STATUS ((0x016 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2938 +#define DPDMAI_CMDID_CLEAR_IRQ_STATUS ((0x017 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2940 +#define DPDMAI_CMDID_SET_RX_QUEUE ((0x1A0 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2941 +#define DPDMAI_CMDID_GET_RX_QUEUE ((0x1A1 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2942 +#define DPDMAI_CMDID_GET_TX_QUEUE ((0x1A2 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2945 +#define MC_CMD_HDR_TOKEN_O 32 /* Token field offset */
2946 +#define MC_CMD_HDR_TOKEN_S 16 /* Token field size */
2949 +#define MAKE_UMASK64(_width) \
2950 + ((uint64_t)((_width) < 64 ? ((uint64_t)1 << (_width)) - 1 : \
2953 +static inline uint64_t mc_enc(int lsoffset, int width, uint64_t val)
2955 + return (uint64_t)(((uint64_t)val & MAKE_UMASK64(width)) << lsoffset);
2958 +static inline uint64_t mc_dec(uint64_t val, int lsoffset, int width)
2960 + return (uint64_t)((val >> lsoffset) & MAKE_UMASK64(width));
2963 +#define MC_CMD_OP(_cmd, _param, _offset, _width, _type, _arg) \
2964 + ((_cmd).params[_param] |= mc_enc((_offset), (_width), _arg))
2966 +#define MC_RSP_OP(_cmd, _param, _offset, _width, _type, _arg) \
2967 + (_arg = (_type)mc_dec(_cmd.params[_param], (_offset), (_width)))
2969 +#define MC_CMD_HDR_READ_TOKEN(_hdr) \
2970 + ((uint16_t)mc_dec((_hdr), MC_CMD_HDR_TOKEN_O, MC_CMD_HDR_TOKEN_S))
2972 +/* cmd, param, offset, width, type, arg_name */
2973 +#define DPDMAI_CMD_OPEN(cmd, dpdmai_id) \
2974 + MC_CMD_OP(cmd, 0, 0, 32, int, dpdmai_id)
2976 +/* cmd, param, offset, width, type, arg_name */
2977 +#define DPDMAI_CMD_CREATE(cmd, cfg) \
2979 + MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->priorities[0]);\
2980 + MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->priorities[1]);\
2983 +/* cmd, param, offset, width, type, arg_name */
2984 +#define DPDMAI_RSP_IS_ENABLED(cmd, en) \
2985 + MC_RSP_OP(cmd, 0, 0, 1, int, en)
2987 +/* cmd, param, offset, width, type, arg_name */
2988 +#define DPDMAI_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
2990 + MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
2991 + MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
2992 + MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
2993 + MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
2996 +/* cmd, param, offset, width, type, arg_name */
2997 +#define DPDMAI_CMD_GET_IRQ(cmd, irq_index) \
2998 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
3000 +/* cmd, param, offset, width, type, arg_name */
3001 +#define DPDMAI_RSP_GET_IRQ(cmd, type, irq_cfg) \
3003 + MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
3004 + MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
3005 + MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
3006 + MC_RSP_OP(cmd, 2, 32, 32, int, type); \
3009 +/* cmd, param, offset, width, type, arg_name */
3010 +#define DPDMAI_CMD_SET_IRQ_ENABLE(cmd, irq_index, enable_state) \
3012 + MC_CMD_OP(cmd, 0, 0, 8, uint8_t, enable_state); \
3013 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
3016 +/* cmd, param, offset, width, type, arg_name */
3017 +#define DPDMAI_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
3018 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
3020 +/* cmd, param, offset, width, type, arg_name */
3021 +#define DPDMAI_RSP_GET_IRQ_ENABLE(cmd, enable_state) \
3022 + MC_RSP_OP(cmd, 0, 0, 8, uint8_t, enable_state)
3024 +/* cmd, param, offset, width, type, arg_name */
3025 +#define DPDMAI_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
3027 + MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \
3028 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
3031 +/* cmd, param, offset, width, type, arg_name */
3032 +#define DPDMAI_CMD_GET_IRQ_MASK(cmd, irq_index) \
3033 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
3035 +/* cmd, param, offset, width, type, arg_name */
3036 +#define DPDMAI_RSP_GET_IRQ_MASK(cmd, mask) \
3037 + MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
3039 +/* cmd, param, offset, width, type, arg_name */
3040 +#define DPDMAI_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
3042 + MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
3043 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
3046 +/* cmd, param, offset, width, type, arg_name */
3047 +#define DPDMAI_RSP_GET_IRQ_STATUS(cmd, status) \
3048 + MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
3050 +/* cmd, param, offset, width, type, arg_name */
3051 +#define DPDMAI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
3053 + MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
3054 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
3057 +/* cmd, param, offset, width, type, arg_name */
3058 +#define DPDMAI_RSP_GET_ATTR(cmd, attr) \
3060 + MC_RSP_OP(cmd, 0, 0, 32, int, attr->id); \
3061 + MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->num_of_priorities); \
3062 + MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\
3063 + MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\
3066 +/* cmd, param, offset, width, type, arg_name */
3067 +#define DPDMAI_CMD_SET_RX_QUEUE(cmd, priority, cfg) \
3069 + MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \
3070 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority); \
3071 + MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority); \
3072 + MC_CMD_OP(cmd, 0, 48, 4, enum dpdmai_dest, cfg->dest_cfg.dest_type); \
3073 + MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \
3074 + MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\
3077 +/* cmd, param, offset, width, type, arg_name */
3078 +#define DPDMAI_CMD_GET_RX_QUEUE(cmd, priority) \
3079 + MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority)
3081 +/* cmd, param, offset, width, type, arg_name */
3082 +#define DPDMAI_RSP_GET_RX_QUEUE(cmd, attr) \
3084 + MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id);\
3085 + MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\
3086 + MC_RSP_OP(cmd, 0, 48, 4, enum dpdmai_dest, attr->dest_cfg.dest_type);\
3087 + MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->user_ctx);\
3088 + MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->fqid);\
3091 +/* cmd, param, offset, width, type, arg_name */
3092 +#define DPDMAI_CMD_GET_TX_QUEUE(cmd, priority) \
3093 + MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority)
3095 +/* cmd, param, offset, width, type, arg_name */
3096 +#define DPDMAI_RSP_GET_TX_QUEUE(cmd, attr) \
3097 + MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->fqid)
3099 +#endif /* _FSL_DPDMAI_CMD_H */
3101 +++ b/drivers/dma/fsl-qdma.c
3104 + * Driver for NXP Layerscape Queue direct memory access controller (qDMA)
3106 + * Copyright 2017 NXP
3109 + * Jiaheng Fan <jiaheng.fan@nxp.com>
3110 + * Wen He <wen.he_1@nxp.com>
3112 + * SPDX-License-Identifier: GPL-2.0+
3115 +#include <linux/interrupt.h>
3116 +#include <linux/module.h>
3117 +#include <linux/delay.h>
3118 +#include <linux/of_irq.h>
3119 +#include <linux/of_address.h>
3120 +#include <linux/of_platform.h>
3121 +#include <linux/of_dma.h>
3122 +#include <linux/dma-mapping.h>
3123 +#include <linux/dmapool.h>
3124 +#include <linux/dmaengine.h>
3125 +#include <linux/slab.h>
3126 +#include <linux/spinlock.h>
3128 +#include "virt-dma.h"
3130 +#define FSL_QDMA_DMR 0x0
3131 +#define FSL_QDMA_DSR 0x4
3132 +#define FSL_QDMA_DEIER 0xe00
3133 +#define FSL_QDMA_DEDR 0xe04
3134 +#define FSL_QDMA_DECFDW0R 0xe10
3135 +#define FSL_QDMA_DECFDW1R 0xe14
3136 +#define FSL_QDMA_DECFDW2R 0xe18
3137 +#define FSL_QDMA_DECFDW3R 0xe1c
3138 +#define FSL_QDMA_DECFQIDR 0xe30
3139 +#define FSL_QDMA_DECBR 0xe34
3141 +#define FSL_QDMA_BCQMR(x) (0xc0 + 0x100 * (x))
3142 +#define FSL_QDMA_BCQSR(x) (0xc4 + 0x100 * (x))
3143 +#define FSL_QDMA_BCQEDPA_SADDR(x) (0xc8 + 0x100 * (x))
3144 +#define FSL_QDMA_BCQDPA_SADDR(x) (0xcc + 0x100 * (x))
3145 +#define FSL_QDMA_BCQEEPA_SADDR(x) (0xd0 + 0x100 * (x))
3146 +#define FSL_QDMA_BCQEPA_SADDR(x) (0xd4 + 0x100 * (x))
3147 +#define FSL_QDMA_BCQIER(x) (0xe0 + 0x100 * (x))
3148 +#define FSL_QDMA_BCQIDR(x) (0xe4 + 0x100 * (x))
3150 +#define FSL_QDMA_SQDPAR 0x80c
3151 +#define FSL_QDMA_SQEPAR 0x814
3152 +#define FSL_QDMA_BSQMR 0x800
3153 +#define FSL_QDMA_BSQSR 0x804
3154 +#define FSL_QDMA_BSQICR 0x828
3155 +#define FSL_QDMA_CQMR 0xa00
3156 +#define FSL_QDMA_CQDSCR1 0xa08
3157 +#define FSL_QDMA_CQDSCR2 0xa0c
3158 +#define FSL_QDMA_CQIER 0xa10
3159 +#define FSL_QDMA_CQEDR 0xa14
3160 +#define FSL_QDMA_SQCCMR 0xa20
3162 +#define FSL_QDMA_SQICR_ICEN
3164 +#define FSL_QDMA_CQIDR_CQT 0xff000000
3165 +#define FSL_QDMA_CQIDR_SQPE 0x800000
3166 +#define FSL_QDMA_CQIDR_SQT 0x8000
3168 +#define FSL_QDMA_BCQIER_CQTIE 0x8000
3169 +#define FSL_QDMA_BCQIER_CQPEIE 0x800000
3170 +#define FSL_QDMA_BSQICR_ICEN 0x80000000
3171 +#define FSL_QDMA_BSQICR_ICST(x) ((x) << 16)
3172 +#define FSL_QDMA_CQIER_MEIE 0x80000000
3173 +#define FSL_QDMA_CQIER_TEIE 0x1
3174 +#define FSL_QDMA_SQCCMR_ENTER_WM 0x200000
3176 +#define FSL_QDMA_QUEUE_MAX 8
3178 +#define FSL_QDMA_BCQMR_EN 0x80000000
3179 +#define FSL_QDMA_BCQMR_EI 0x40000000
3180 +#define FSL_QDMA_BCQMR_CD_THLD(x) ((x) << 20)
3181 +#define FSL_QDMA_BCQMR_CQ_SIZE(x) ((x) << 16)
3183 +#define FSL_QDMA_BCQSR_QF 0x10000
3184 +#define FSL_QDMA_BCQSR_XOFF 0x1
3186 +#define FSL_QDMA_BSQMR_EN 0x80000000
3187 +#define FSL_QDMA_BSQMR_DI 0x40000000
3188 +#define FSL_QDMA_BSQMR_CQ_SIZE(x) ((x) << 16)
3190 +#define FSL_QDMA_BSQSR_QE 0x20000
3192 +#define FSL_QDMA_DMR_DQD 0x40000000
3193 +#define FSL_QDMA_DSR_DB 0x80000000
3195 +#define FSL_QDMA_COMMAND_BUFFER_SIZE 64
3196 +#define FSL_QDMA_DESCRIPTOR_BUFFER_SIZE 32
3197 +#define FSL_QDMA_CIRCULAR_DESC_SIZE_MIN 64
3198 +#define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX 16384
3199 +#define FSL_QDMA_QUEUE_NUM_MAX 8
3201 +#define FSL_QDMA_CMD_RWTTYPE 0x4
3202 +#define FSL_QDMA_CMD_LWC 0x2
3204 +#define FSL_QDMA_CMD_RWTTYPE_OFFSET 28
3205 +#define FSL_QDMA_CMD_NS_OFFSET 27
3206 +#define FSL_QDMA_CMD_DQOS_OFFSET 24
3207 +#define FSL_QDMA_CMD_WTHROTL_OFFSET 20
3208 +#define FSL_QDMA_CMD_DSEN_OFFSET 19
3209 +#define FSL_QDMA_CMD_LWC_OFFSET 16
3211 +#define QDMA_CCDF_STATUS 20
3212 +#define QDMA_CCDF_OFFSET 20
3213 +#define QDMA_CCDF_MASK GENMASK(28, 20)
3214 +#define QDMA_CCDF_FOTMAT BIT(29)
3215 +#define QDMA_CCDF_SER BIT(30)
3217 +#define QDMA_SG_FIN BIT(30)
3218 +#define QDMA_SG_EXT BIT(31)
3219 +#define QDMA_SG_LEN_MASK GENMASK(29, 0)
3221 +#define QDMA_BIG_ENDIAN 0x00000001
3222 +#define COMP_TIMEOUT 1000
3223 +#define COMMAND_QUEUE_OVERFLLOW 10
3225 +#define QDMA_IN(fsl_qdma_engine, addr) \
3226 + (((fsl_qdma_engine)->big_endian & QDMA_BIG_ENDIAN) ? \
3227 + ioread32be(addr) : ioread32(addr))
3228 +#define QDMA_OUT(fsl_qdma_engine, addr, val) \
3229 + (((fsl_qdma_engine)->big_endian & QDMA_BIG_ENDIAN) ? \
3230 + iowrite32be(val, addr) : iowrite32(val, addr))
3232 +#define FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma_engine, x) \
3233 + (((fsl_qdma_engine)->block_offset) * (x))
3235 +static DEFINE_PER_CPU(u64, pre_addr);
3236 +static DEFINE_PER_CPU(u64, pre_queue);
3238 +/* qDMA Command Descriptor Fotmats */
3240 +struct fsl_qdma_format {
3241 + __le32 status; /* ser, status */
3242 + __le32 cfg; /* format, offset */
3245 + __le32 addr_lo; /* low 32-bits of 40-bit address */
3246 + u8 addr_hi; /* high 8-bits of 40-bit address */
3247 + u8 __reserved1[2];
3248 + u8 cfg8b_w1; /* dd, queue */
3255 +qdma_ccdf_addr_get64(const struct fsl_qdma_format *ccdf)
3257 + return le64_to_cpu(ccdf->data) & 0xffffffffffLLU;
3261 +qdma_desc_addr_set64(struct fsl_qdma_format *ccdf, u64 addr)
3263 + ccdf->addr_hi = upper_32_bits(addr);
3264 + ccdf->addr_lo = cpu_to_le32(lower_32_bits(addr));
3268 +qdma_ccdf_get_queue(const struct fsl_qdma_format *ccdf)
3270 + return ccdf->cfg8b_w1 & 0xff;
3274 +qdma_ccdf_get_offset(const struct fsl_qdma_format *ccdf)
3276 + return (le32_to_cpu(ccdf->cfg) & QDMA_CCDF_MASK) >> QDMA_CCDF_OFFSET;
3280 +qdma_ccdf_set_format(struct fsl_qdma_format *ccdf, int offset)
3282 + ccdf->cfg = cpu_to_le32(QDMA_CCDF_FOTMAT | offset);
3286 +qdma_ccdf_get_status(const struct fsl_qdma_format *ccdf)
3288 + return (le32_to_cpu(ccdf->status) & QDMA_CCDF_MASK) >> QDMA_CCDF_STATUS;
3292 +qdma_ccdf_set_ser(struct fsl_qdma_format *ccdf, int status)
3294 + ccdf->status = cpu_to_le32(QDMA_CCDF_SER | status);
3297 +static inline void qdma_csgf_set_len(struct fsl_qdma_format *csgf, int len)
3299 + csgf->cfg = cpu_to_le32(len & QDMA_SG_LEN_MASK);
3302 +static inline void qdma_csgf_set_f(struct fsl_qdma_format *csgf, int len)
3304 + csgf->cfg = cpu_to_le32(QDMA_SG_FIN | (len & QDMA_SG_LEN_MASK));
3307 +static inline void qdma_csgf_set_e(struct fsl_qdma_format *csgf, int len)
3309 + csgf->cfg = cpu_to_le32(QDMA_SG_EXT | (len & QDMA_SG_LEN_MASK));
3312 +/* qDMA Source Descriptor Format */
3313 +struct fsl_qdma_sdf {
3315 + __le32 cfg; /* rev4, bit[0-11] - ssd, bit[12-23] sss */
3320 +/* qDMA Destination Descriptor Format */
3321 +struct fsl_qdma_ddf {
3323 + __le32 cfg; /* rev2, bit[0-11] - dsd, bit[12-23] - dss */
3328 +struct fsl_qdma_chan {
3329 + struct virt_dma_chan vchan;
3330 + struct virt_dma_desc vdesc;
3331 + enum dma_status status;
3332 + struct fsl_qdma_engine *qdma;
3333 + struct fsl_qdma_queue *queue;
3336 +struct fsl_qdma_queue {
3337 + struct fsl_qdma_format *virt_head;
3338 + struct fsl_qdma_format *virt_tail;
3339 + struct list_head comp_used;
3340 + struct list_head comp_free;
3341 + struct dma_pool *comp_pool;
3342 + struct dma_pool *desc_pool;
3343 + spinlock_t queue_lock;
3344 + dma_addr_t bus_addr;
3347 + struct fsl_qdma_format *cq;
3348 + void __iomem *block_base;
3351 +struct fsl_qdma_comp {
3352 + dma_addr_t bus_addr;
3353 + dma_addr_t desc_bus_addr;
3355 + void *desc_virt_addr;
3356 + struct fsl_qdma_chan *qchan;
3357 + struct virt_dma_desc vdesc;
3358 + struct list_head list;
3361 +struct fsl_qdma_engine {
3362 + struct dma_device dma_dev;
3363 + void __iomem *ctrl_base;
3364 + void __iomem *status_base;
3365 + void __iomem *block_base;
3368 + struct mutex fsl_qdma_mutex;
3372 + struct fsl_qdma_queue *queue;
3373 + struct fsl_qdma_queue **status;
3374 + struct fsl_qdma_chan *chans;
3378 + int desc_allocated;
3382 +static u32 qdma_readl(struct fsl_qdma_engine *qdma, void __iomem *addr)
3384 + return QDMA_IN(qdma, addr);
3387 +static void qdma_writel(struct fsl_qdma_engine *qdma, u32 val,
3388 + void __iomem *addr)
3390 + QDMA_OUT(qdma, addr, val);
3393 +static struct fsl_qdma_chan *to_fsl_qdma_chan(struct dma_chan *chan)
3395 + return container_of(chan, struct fsl_qdma_chan, vchan.chan);
3398 +static struct fsl_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
3400 + return container_of(vd, struct fsl_qdma_comp, vdesc);
3403 +static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
3405 + struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
3406 + struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
3407 + struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
3408 + struct fsl_qdma_comp *comp_temp, *_comp_temp;
3409 + unsigned long flags;
3412 + spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
3413 + vchan_get_all_descriptors(&fsl_chan->vchan, &head);
3414 + spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
3416 + vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
3418 + if (!fsl_queue->comp_pool && !fsl_queue->comp_pool)
3421 + list_for_each_entry_safe(comp_temp, _comp_temp,
3422 + &fsl_queue->comp_used, list) {
3423 + dma_pool_free(fsl_queue->comp_pool,
3424 + comp_temp->virt_addr,
3425 + comp_temp->bus_addr);
3426 + dma_pool_free(fsl_queue->desc_pool,
3427 + comp_temp->desc_virt_addr,
3428 + comp_temp->desc_bus_addr);
3429 + list_del(&comp_temp->list);
3433 + list_for_each_entry_safe(comp_temp, _comp_temp,
3434 + &fsl_queue->comp_free, list) {
3435 + dma_pool_free(fsl_queue->comp_pool,
3436 + comp_temp->virt_addr,
3437 + comp_temp->bus_addr);
3438 + dma_pool_free(fsl_queue->desc_pool,
3439 + comp_temp->desc_virt_addr,
3440 + comp_temp->desc_bus_addr);
3441 + list_del(&comp_temp->list);
3445 + dma_pool_destroy(fsl_queue->comp_pool);
3446 + dma_pool_destroy(fsl_queue->desc_pool);
3448 + fsl_qdma->desc_allocated--;
3449 + fsl_queue->comp_pool = NULL;
3450 + fsl_queue->desc_pool = NULL;
3453 +static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
3454 + dma_addr_t dst, dma_addr_t src, u32 len)
3456 + struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest;
3457 + struct fsl_qdma_sdf *sdf;
3458 + struct fsl_qdma_ddf *ddf;
3460 + ccdf = (struct fsl_qdma_format *)fsl_comp->virt_addr;
3461 + csgf_desc = (struct fsl_qdma_format *)fsl_comp->virt_addr + 1;
3462 + csgf_src = (struct fsl_qdma_format *)fsl_comp->virt_addr + 2;
3463 + csgf_dest = (struct fsl_qdma_format *)fsl_comp->virt_addr + 3;
3464 + sdf = (struct fsl_qdma_sdf *)fsl_comp->desc_virt_addr;
3465 + ddf = (struct fsl_qdma_ddf *)fsl_comp->desc_virt_addr + 1;
3467 + memset(fsl_comp->virt_addr, 0, FSL_QDMA_COMMAND_BUFFER_SIZE);
3468 + memset(fsl_comp->desc_virt_addr, 0, FSL_QDMA_DESCRIPTOR_BUFFER_SIZE);
3469 + /* Head Command Descriptor(Frame Descriptor) */
3470 + qdma_desc_addr_set64(ccdf, fsl_comp->bus_addr + 16);
3471 + qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(ccdf));
3472 + qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(ccdf));
3473 + /* Status notification is enqueued to status queue. */
3474 + /* Compound Command Descriptor(Frame List Table) */
3475 + qdma_desc_addr_set64(csgf_desc, fsl_comp->desc_bus_addr);
3476 + /* It must be 32 as Compound S/G Descriptor */
3477 + qdma_csgf_set_len(csgf_desc, 32);
3478 + qdma_desc_addr_set64(csgf_src, src);
3479 + qdma_csgf_set_len(csgf_src, len);
3480 + qdma_desc_addr_set64(csgf_dest, dst);
3481 + qdma_csgf_set_len(csgf_dest, len);
3482 + /* This entry is the last entry. */
3483 + qdma_csgf_set_f(csgf_dest, len);
3484 + /* Descriptor Buffer */
3485 + sdf->cmd = cpu_to_le32(
3486 + FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET);
3487 + ddf->cmd = cpu_to_le32(
3488 + FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET);
3489 + ddf->cmd |= cpu_to_le32(
3490 + FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET);
3494 + * Pre-request command descriptor and compound S/G for enqueue.
3496 +static int fsl_qdma_pre_request_enqueue_comp_desc(struct fsl_qdma_queue *queue)
3498 + struct fsl_qdma_comp *comp_temp;
3501 + for (i = 0; i < queue->n_cq + COMMAND_QUEUE_OVERFLLOW; i++) {
3502 + comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
3505 + comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool,
3507 + &comp_temp->bus_addr);
3509 + if (!comp_temp->virt_addr) {
3514 + list_add_tail(&comp_temp->list, &queue->comp_free);
3521 + * Pre-request source and destination descriptor for enqueue.
3523 +static int fsl_qdma_pre_request_enqueue_sd_desc(struct fsl_qdma_queue *queue)
3525 + struct fsl_qdma_comp *comp_temp, *_comp_temp;
3527 + list_for_each_entry_safe(comp_temp, _comp_temp,
3528 + &queue->comp_free, list) {
3529 + comp_temp->desc_virt_addr = dma_pool_alloc(queue->desc_pool,
3531 + &comp_temp->desc_bus_addr);
3532 + if (!comp_temp->desc_virt_addr)
3540 + * Request a command descriptor for enqueue.
3542 +static struct fsl_qdma_comp *fsl_qdma_request_enqueue_desc(
3543 + struct fsl_qdma_chan *fsl_chan)
3545 + struct fsl_qdma_comp *comp_temp;
3546 + struct fsl_qdma_queue *queue = fsl_chan->queue;
3547 + unsigned long flags;
3548 + int timeout = COMP_TIMEOUT;
3551 + spin_lock_irqsave(&queue->queue_lock, flags);
3552 + if (!list_empty(&queue->comp_free)) {
3553 + comp_temp = list_first_entry(&queue->comp_free,
3554 + struct fsl_qdma_comp,
3556 + list_del(&comp_temp->list);
3558 + spin_unlock_irqrestore(&queue->queue_lock, flags);
3559 + comp_temp->qchan = fsl_chan;
3562 + spin_unlock_irqrestore(&queue->queue_lock, flags);
3570 +static struct fsl_qdma_queue *fsl_qdma_alloc_queue_resources(
3571 + struct platform_device *pdev,
3572 + struct fsl_qdma_engine *fsl_qdma)
3574 + struct fsl_qdma_queue *queue_head, *queue_temp;
3575 + int ret, len, i, j;
3576 + unsigned int queue_size[FSL_QDMA_QUEUE_MAX];
3580 + queue_num = fsl_qdma->n_queues;
3581 + block_number = fsl_qdma->block_number;
3583 + if (queue_num > FSL_QDMA_QUEUE_MAX)
3584 + queue_num = FSL_QDMA_QUEUE_MAX;
3585 + len = sizeof(*queue_head) * queue_num * block_number;
3586 + queue_head = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
3590 + ret = device_property_read_u32_array(&pdev->dev, "queue-sizes",
3591 + queue_size, queue_num);
3593 + dev_err(&pdev->dev, "Can't get queue-sizes.\n");
3596 + for (j = 0; j < block_number; j++) {
3597 + for (i = 0; i < queue_num; i++) {
3598 + if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
3599 + queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
3600 + dev_err(&pdev->dev,
3601 + "Get wrong queue-sizes.\n");
3604 + queue_temp = queue_head + i + (j * queue_num);
3607 + dma_alloc_coherent(&pdev->dev,
3608 + sizeof(struct fsl_qdma_format) *
3610 + &queue_temp->bus_addr,
3612 + if (!queue_temp->cq)
3614 + queue_temp->block_base = fsl_qdma->block_base +
3615 + FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
3616 + queue_temp->n_cq = queue_size[i];
3617 + queue_temp->id = i;
3618 + queue_temp->virt_head = queue_temp->cq;
3619 + queue_temp->virt_tail = queue_temp->cq;
3621 + * List for queue command buffer
3623 + INIT_LIST_HEAD(&queue_temp->comp_used);
3624 + spin_lock_init(&queue_temp->queue_lock);
3627 + return queue_head;
3630 +static struct fsl_qdma_queue *fsl_qdma_prep_status_queue(
3631 + struct platform_device *pdev)
3633 + struct device_node *np = pdev->dev.of_node;
3634 + struct fsl_qdma_queue *status_head;
3635 + unsigned int status_size;
3638 + ret = of_property_read_u32(np, "status-sizes", &status_size);
3640 + dev_err(&pdev->dev, "Can't get status-sizes.\n");
3643 + if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX
3644 + || status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
3645 + dev_err(&pdev->dev, "Get wrong status_size.\n");
3648 + status_head = devm_kzalloc(&pdev->dev, sizeof(*status_head),
3654 + * Buffer for queue command
3656 + status_head->cq = dma_alloc_coherent(&pdev->dev,
3657 + sizeof(struct fsl_qdma_format) *
3659 + &status_head->bus_addr,
3661 + if (!status_head->cq)
3663 + status_head->n_cq = status_size;
3664 + status_head->virt_head = status_head->cq;
3665 + status_head->virt_tail = status_head->cq;
3666 + status_head->comp_pool = NULL;
3668 + return status_head;
3671 +static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
3673 + void __iomem *ctrl = fsl_qdma->ctrl_base;
3674 + void __iomem *block;
3679 + /* Disable the command queue and wait for idle state. */
3680 + reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
3681 + reg |= FSL_QDMA_DMR_DQD;
3682 + qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
3683 + for (j = 0; j < fsl_qdma->block_number; j++) {
3684 + block = fsl_qdma->block_base +
3685 + FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
3686 + for (i = 0; i < FSL_QDMA_QUEUE_NUM_MAX; i++)
3687 + qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQMR(i));
3690 + reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DSR);
3691 + if (!(reg & FSL_QDMA_DSR_DB))
3698 + for (j = 0; j < fsl_qdma->block_number; j++) {
3700 + block = fsl_qdma->block_base +
3701 + FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
3703 + /* Disable status queue. */
3704 + qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BSQMR);
3707 + * clear the command queue interrupt detect register for
3710 + qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
3716 +static int fsl_qdma_queue_transfer_complete(
3717 + struct fsl_qdma_engine *fsl_qdma,
3721 + struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
3722 + struct fsl_qdma_queue *fsl_status = fsl_qdma->status[id];
3723 + struct fsl_qdma_queue *temp_queue;
3724 + struct fsl_qdma_format *status_addr;
3725 + struct fsl_qdma_comp *fsl_comp = NULL;
3727 + bool duplicate, duplicate_handle;
3731 + duplicate_handle = 0;
3732 + reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQSR);
3733 + if (reg & FSL_QDMA_BSQSR_QE)
3736 + status_addr = fsl_status->virt_head;
3738 + if (qdma_ccdf_get_queue(status_addr) ==
3739 + __this_cpu_read(pre_queue) &&
3740 + qdma_ccdf_addr_get64(status_addr) ==
3741 + __this_cpu_read(pre_addr))
3743 + i = qdma_ccdf_get_queue(status_addr) +
3744 + id * fsl_qdma->n_queues;
3745 + __this_cpu_write(pre_addr, qdma_ccdf_addr_get64(status_addr));
3746 + __this_cpu_write(pre_queue, qdma_ccdf_get_queue(status_addr));
3747 + temp_queue = fsl_queue + i;
3749 + spin_lock(&temp_queue->queue_lock);
3750 + if (list_empty(&temp_queue->comp_used)) {
3752 + duplicate_handle = 1;
3754 + spin_unlock(&temp_queue->queue_lock);
3758 + fsl_comp = list_first_entry(&temp_queue->comp_used,
3759 + struct fsl_qdma_comp,
3761 + if (fsl_comp->bus_addr + 16 !=
3762 + __this_cpu_read(pre_addr)) {
3764 + duplicate_handle = 1;
3766 + spin_unlock(&temp_queue->queue_lock);
3773 + if (duplicate_handle) {
3774 + reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
3775 + reg |= FSL_QDMA_BSQMR_DI;
3776 + qdma_desc_addr_set64(status_addr, 0x0);
3777 + fsl_status->virt_head++;
3778 + if (fsl_status->virt_head == fsl_status->cq
3779 + + fsl_status->n_cq)
3780 + fsl_status->virt_head = fsl_status->cq;
3781 + qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
3782 + spin_unlock(&temp_queue->queue_lock);
3785 + list_del(&fsl_comp->list);
3787 + reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
3788 + reg |= FSL_QDMA_BSQMR_DI;
3789 + qdma_desc_addr_set64(status_addr, 0x0);
3790 + fsl_status->virt_head++;
3791 + if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq)
3792 + fsl_status->virt_head = fsl_status->cq;
3793 + qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
3794 + spin_unlock(&temp_queue->queue_lock);
3796 + spin_lock(&fsl_comp->qchan->vchan.lock);
3797 + vchan_cookie_complete(&fsl_comp->vdesc);
3798 + fsl_comp->qchan->status = DMA_COMPLETE;
3799 + spin_unlock(&fsl_comp->qchan->vchan.lock);
3804 +static irqreturn_t fsl_qdma_error_handler(int irq, void *dev_id)
3806 + struct fsl_qdma_engine *fsl_qdma = dev_id;
3807 + unsigned int intr;
3808 + void __iomem *status = fsl_qdma->status_base;
3810 + intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR);
3813 + dev_err(fsl_qdma->dma_dev.dev, "DMA transaction error!\n");
3815 + qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEDR);
3816 + return IRQ_HANDLED;
3819 +static irqreturn_t fsl_qdma_queue_handler(int irq, void *dev_id)
3821 + struct fsl_qdma_engine *fsl_qdma = dev_id;
3822 + unsigned int intr, reg;
3823 + void __iomem *ctrl = fsl_qdma->ctrl_base;
3824 + void __iomem *block;
3827 + id = irq - fsl_qdma->irq_base;
3828 + if (id < 0 && id > fsl_qdma->block_number) {
3829 + dev_err(fsl_qdma->dma_dev.dev,
3830 + "irq %d is wrong irq_base is %d\n",
3831 + irq, fsl_qdma->irq_base);
3834 + block = fsl_qdma->block_base +
3835 + FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
3837 + intr = qdma_readl(fsl_qdma, block + FSL_QDMA_BCQIDR(0));
3839 + if ((intr & FSL_QDMA_CQIDR_SQT) != 0)
3840 + intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id);
3843 + reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
3844 + reg |= FSL_QDMA_DMR_DQD;
3845 + qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
3846 + qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQIER(0));
3847 + dev_err(fsl_qdma->dma_dev.dev, "QDMA: status err!\n");
3850 + qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
3852 + return IRQ_HANDLED;
3856 +fsl_qdma_irq_init(struct platform_device *pdev,
3857 + struct fsl_qdma_engine *fsl_qdma)
3859 + char irq_name[20];
3864 + fsl_qdma->error_irq = platform_get_irq_byname(pdev,
3866 + if (fsl_qdma->error_irq < 0) {
3867 + dev_err(&pdev->dev, "Can't get qdma controller irq.\n");
3868 + return fsl_qdma->error_irq;
3871 + ret = devm_request_irq(&pdev->dev, fsl_qdma->error_irq,
3872 + fsl_qdma_error_handler, 0, "qDMA error", fsl_qdma);
3874 + dev_err(&pdev->dev, "Can't register qDMA controller IRQ.\n");
3878 + for (i = 0; i < fsl_qdma->block_number; i++) {
3879 + sprintf(irq_name, "qdma-queue%d", i);
3880 + fsl_qdma->queue_irq[i] = platform_get_irq_byname(pdev,
3883 + if (fsl_qdma->queue_irq[i] < 0) {
3884 + dev_err(&pdev->dev,
3885 + "Can't get qdma queue %d irq.\n",
3887 + return fsl_qdma->queue_irq[i];
3890 + ret = devm_request_irq(&pdev->dev,
3891 + fsl_qdma->queue_irq[i],
3892 + fsl_qdma_queue_handler,
3897 + dev_err(&pdev->dev,
3898 + "Can't register qDMA queue IRQ.\n");
3902 + cpu = i % num_online_cpus();
3903 + ret = irq_set_affinity_hint(fsl_qdma->queue_irq[i],
3904 + get_cpu_mask(cpu));
3906 + dev_err(&pdev->dev,
3907 + "Can't set cpu %d affinity to IRQ %d.\n",
3909 + fsl_qdma->queue_irq[i]);
3918 +static void fsl_qdma_irq_exit(
3919 + struct platform_device *pdev, struct fsl_qdma_engine *fsl_qdma)
3921 + if (fsl_qdma->queue_irq[0] == fsl_qdma->error_irq) {
3922 + devm_free_irq(&pdev->dev, fsl_qdma->queue_irq[0], fsl_qdma);
3924 + devm_free_irq(&pdev->dev, fsl_qdma->queue_irq[0], fsl_qdma);
3925 + devm_free_irq(&pdev->dev, fsl_qdma->error_irq, fsl_qdma);
3929 +static int fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
3931 + struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
3932 + struct fsl_qdma_queue *temp;
3933 + void __iomem *ctrl = fsl_qdma->ctrl_base;
3934 + void __iomem *status = fsl_qdma->status_base;
3935 + void __iomem *block;
3939 + /* Try to halt the qDMA engine first. */
3940 + ret = fsl_qdma_halt(fsl_qdma);
3942 + dev_err(fsl_qdma->dma_dev.dev, "DMA halt failed!");
3946 + for (i = 0; i < fsl_qdma->block_number; i++) {
3948 + * Clear the command queue interrupt detect register for
3952 + block = fsl_qdma->block_base +
3953 + FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, i);
3954 + qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
3957 + for (j = 0; j < fsl_qdma->block_number; j++) {
3958 + block = fsl_qdma->block_base +
3959 + FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
3960 + for (i = 0; i < fsl_qdma->n_queues; i++) {
3961 + temp = fsl_queue + i + (j * fsl_qdma->n_queues);
3963 + * Initialize Command Queue registers to
3964 + * point to the first
3965 + * command descriptor in memory.
3966 + * Dequeue Pointer Address Registers
3967 + * Enqueue Pointer Address Registers
3970 + qdma_writel(fsl_qdma, temp->bus_addr,
3971 + block + FSL_QDMA_BCQDPA_SADDR(i));
3972 + qdma_writel(fsl_qdma, temp->bus_addr,
3973 + block + FSL_QDMA_BCQEPA_SADDR(i));
3975 + /* Initialize the queue mode. */
3976 + reg = FSL_QDMA_BCQMR_EN;
3977 + reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq) - 4);
3978 + reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq) - 6);
3979 + qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BCQMR(i));
3983 + * Workaround for erratum: ERR010812.
3984 + * We must enable XOFF to avoid the enqueue rejection occurs.
3985 + * Setting SQCCMR ENTER_WM to 0x20.
3988 + qdma_writel(fsl_qdma, FSL_QDMA_SQCCMR_ENTER_WM,
3989 + block + FSL_QDMA_SQCCMR);
3992 + * Initialize status queue registers to point to the first
3993 + * command descriptor in memory.
3994 + * Dequeue Pointer Address Registers
3995 + * Enqueue Pointer Address Registers
3998 + qdma_writel(fsl_qdma, fsl_qdma->status[j]->bus_addr,
3999 + block + FSL_QDMA_SQEPAR);
4000 + qdma_writel(fsl_qdma, fsl_qdma->status[j]->bus_addr,
4001 + block + FSL_QDMA_SQDPAR);
4002 + /* Initialize status queue interrupt. */
4003 + qdma_writel(fsl_qdma, FSL_QDMA_BCQIER_CQTIE,
4004 + block + FSL_QDMA_BCQIER(0));
4005 + qdma_writel(fsl_qdma, FSL_QDMA_BSQICR_ICEN |
4006 + FSL_QDMA_BSQICR_ICST(5) | 0x8000,
4007 + block + FSL_QDMA_BSQICR);
4008 + qdma_writel(fsl_qdma, FSL_QDMA_CQIER_MEIE |
4009 + FSL_QDMA_CQIER_TEIE,
4010 + block + FSL_QDMA_CQIER);
4012 + /* Initialize the status queue mode. */
4013 + reg = FSL_QDMA_BSQMR_EN;
4014 + reg |= FSL_QDMA_BSQMR_CQ_SIZE(ilog2(
4015 + fsl_qdma->status[j]->n_cq) - 6);
4017 + qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
4018 + reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
4022 + /* Initialize controller interrupt register. */
4023 + qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEDR);
4024 + qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEIER);
4026 + reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
4027 + reg &= ~FSL_QDMA_DMR_DQD;
4028 + qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
4033 +static struct dma_async_tx_descriptor *
4034 +fsl_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
4035 + dma_addr_t src, size_t len, unsigned long flags)
4037 + struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
4038 + struct fsl_qdma_comp *fsl_comp;
4040 + fsl_comp = fsl_qdma_request_enqueue_desc(fsl_chan);
4045 + fsl_qdma_comp_fill_memcpy(fsl_comp, dst, src, len);
4047 + return vchan_tx_prep(&fsl_chan->vchan, &fsl_comp->vdesc, flags);
4050 +static void fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
4052 + struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
4053 + struct fsl_qdma_comp *fsl_comp;
4054 + struct virt_dma_desc *vdesc;
4055 + void __iomem *block = fsl_queue->block_base;
4058 + reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQSR(fsl_queue->id));
4059 + if (reg & (FSL_QDMA_BCQSR_QF | FSL_QDMA_BCQSR_XOFF))
4061 + vdesc = vchan_next_desc(&fsl_chan->vchan);
4064 + list_del(&vdesc->node);
4065 + fsl_comp = to_fsl_qdma_comp(vdesc);
4067 + memcpy(fsl_queue->virt_head++, fsl_comp->virt_addr, 16);
4068 + if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq)
4069 + fsl_queue->virt_head = fsl_queue->cq;
4071 + list_add_tail(&fsl_comp->list, &fsl_queue->comp_used);
4073 + reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQMR(fsl_queue->id));
4074 + reg |= FSL_QDMA_BCQMR_EI;
4075 + qdma_writel(fsl_chan->qdma, reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
4076 + fsl_chan->status = DMA_IN_PROGRESS;
4079 +static enum dma_status fsl_qdma_tx_status(struct dma_chan *chan,
4080 + dma_cookie_t cookie, struct dma_tx_state *txstate)
4082 + return dma_cookie_status(chan, cookie, txstate);
4085 +static void fsl_qdma_free_desc(struct virt_dma_desc *vdesc)
4087 + struct fsl_qdma_comp *fsl_comp;
4088 + struct fsl_qdma_queue *fsl_queue;
4089 + unsigned long flags;
4091 + fsl_comp = to_fsl_qdma_comp(vdesc);
4092 + fsl_queue = fsl_comp->qchan->queue;
4094 + spin_lock_irqsave(&fsl_queue->queue_lock, flags);
4095 + list_add_tail(&fsl_comp->list, &fsl_queue->comp_free);
4096 + spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
4099 +static void fsl_qdma_issue_pending(struct dma_chan *chan)
4101 + struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
4102 + struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
4103 + unsigned long flags;
4105 + spin_lock_irqsave(&fsl_queue->queue_lock, flags);
4106 + spin_lock(&fsl_chan->vchan.lock);
4107 + if (vchan_issue_pending(&fsl_chan->vchan))
4108 + fsl_qdma_enqueue_desc(fsl_chan);
4109 + spin_unlock(&fsl_chan->vchan.lock);
4110 + spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
4113 +static void fsl_qdma_synchronize(struct dma_chan *chan)
4115 + struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
4117 + vchan_synchronize(&fsl_chan->vchan);
4120 +static int fsl_qdma_terminate_all(struct dma_chan *chan)
4122 + struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
4123 + unsigned long flags;
4126 + spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
4127 + vchan_get_all_descriptors(&fsl_chan->vchan, &head);
4128 + spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
4129 + vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
4133 +static int fsl_qdma_alloc_chan_resources(struct dma_chan *chan)
4135 + struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
4136 + struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
4137 + struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
4140 + if (fsl_queue->comp_pool && fsl_queue->desc_pool)
4141 + return fsl_qdma->desc_allocated;
4143 + INIT_LIST_HEAD(&fsl_queue->comp_free);
4146 + * The dma pool for queue command buffer
4148 + fsl_queue->comp_pool =
4149 + dma_pool_create("comp_pool",
4150 + chan->device->dev,
4151 + FSL_QDMA_COMMAND_BUFFER_SIZE,
4153 + if (!fsl_queue->comp_pool)
4157 + * The dma pool for Descriptor(SD/DD) buffer
4159 + fsl_queue->desc_pool =
4160 + dma_pool_create("desc_pool",
4161 + chan->device->dev,
4162 + FSL_QDMA_DESCRIPTOR_BUFFER_SIZE,
4164 + if (!fsl_queue->desc_pool)
4165 + goto err_desc_pool;
4167 + ret = fsl_qdma_pre_request_enqueue_comp_desc(fsl_queue);
4169 + dev_err(chan->device->dev, "failed to alloc dma buffer for "
4170 + "comp S/G descriptor\n");
4174 + ret = fsl_qdma_pre_request_enqueue_sd_desc(fsl_queue);
4176 + dev_err(chan->device->dev, "failed to alloc dma buffer for "
4177 + "S/D descriptor\n");
4181 + fsl_qdma->desc_allocated++;
4182 + return fsl_qdma->desc_allocated;
4185 + dma_pool_destroy(fsl_queue->desc_pool);
4187 + dma_pool_destroy(fsl_queue->comp_pool);
4191 +static int fsl_qdma_probe(struct platform_device *pdev)
4193 + struct device_node *np = pdev->dev.of_node;
4194 + struct fsl_qdma_engine *fsl_qdma;
4195 + struct fsl_qdma_chan *fsl_chan;
4196 + struct resource *res;
4197 + unsigned int len, chans, queues;
4202 + ret = of_property_read_u32(np, "channels", &chans);
4204 + dev_err(&pdev->dev, "Can't get channels.\n");
4208 + ret = of_property_read_u32(np, "block-offset", &blk_off);
4210 + dev_err(&pdev->dev, "Can't get block-offset.\n");
4214 + ret = of_property_read_u32(np, "block-number", &blk_num);
4216 + dev_err(&pdev->dev, "Can't get block-number.\n");
4220 + blk_num = min_t(int, blk_num, num_online_cpus());
4222 + len = sizeof(*fsl_qdma);
4223 + fsl_qdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
4227 + len = sizeof(*fsl_chan) * chans;
4228 + fsl_qdma->chans = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
4229 + if (!fsl_qdma->chans)
4232 + len = sizeof(struct fsl_qdma_queue *) * blk_num;
4233 + fsl_qdma->status = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
4234 + if (!fsl_qdma->status)
4237 + len = sizeof(int) * blk_num;
4238 + fsl_qdma->queue_irq = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
4239 + if (!fsl_qdma->queue_irq)
4242 + ret = of_property_read_u32(np, "queues", &queues);
4244 + dev_err(&pdev->dev, "Can't get queues.\n");
4248 + fsl_qdma->desc_allocated = 0;
4249 + fsl_qdma->n_chans = chans;
4250 + fsl_qdma->n_queues = queues;
4251 + fsl_qdma->block_number = blk_num;
4252 + fsl_qdma->block_offset = blk_off;
4254 + mutex_init(&fsl_qdma->fsl_qdma_mutex);
4256 + for (i = 0; i < fsl_qdma->block_number; i++) {
4257 + fsl_qdma->status[i] = fsl_qdma_prep_status_queue(pdev);
4258 + if (!fsl_qdma->status[i])
4261 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4262 + fsl_qdma->ctrl_base = devm_ioremap_resource(&pdev->dev, res);
4263 + if (IS_ERR(fsl_qdma->ctrl_base))
4264 + return PTR_ERR(fsl_qdma->ctrl_base);
4266 + res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
4267 + fsl_qdma->status_base = devm_ioremap_resource(&pdev->dev, res);
4268 + if (IS_ERR(fsl_qdma->status_base))
4269 + return PTR_ERR(fsl_qdma->status_base);
4271 + res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
4272 + fsl_qdma->block_base = devm_ioremap_resource(&pdev->dev, res);
4273 + if (IS_ERR(fsl_qdma->block_base))
4274 + return PTR_ERR(fsl_qdma->block_base);
4275 + fsl_qdma->queue = fsl_qdma_alloc_queue_resources(pdev, fsl_qdma);
4276 + if (!fsl_qdma->queue)
4279 + ret = fsl_qdma_irq_init(pdev, fsl_qdma);
4283 + fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0");
4284 + fsl_qdma->big_endian = of_property_read_bool(np, "big-endian");
4285 + INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels);
4287 + for (i = 0; i < fsl_qdma->n_chans; i++) {
4288 + struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
4290 + fsl_chan->qdma = fsl_qdma;
4291 + fsl_chan->queue = fsl_qdma->queue + i % (fsl_qdma->n_queues *
4292 + fsl_qdma->block_number);
4293 + fsl_chan->vchan.desc_free = fsl_qdma_free_desc;
4294 + vchan_init(&fsl_chan->vchan, &fsl_qdma->dma_dev);
4297 + dma_cap_set(DMA_MEMCPY, fsl_qdma->dma_dev.cap_mask);
4299 + fsl_qdma->dma_dev.dev = &pdev->dev;
4300 + fsl_qdma->dma_dev.device_free_chan_resources
4301 + = fsl_qdma_free_chan_resources;
4302 + fsl_qdma->dma_dev.device_alloc_chan_resources
4303 + = fsl_qdma_alloc_chan_resources;
4304 + fsl_qdma->dma_dev.device_tx_status = fsl_qdma_tx_status;
4305 + fsl_qdma->dma_dev.device_prep_dma_memcpy = fsl_qdma_prep_memcpy;
4306 + fsl_qdma->dma_dev.device_issue_pending = fsl_qdma_issue_pending;
4307 + fsl_qdma->dma_dev.device_synchronize = fsl_qdma_synchronize;
4308 + fsl_qdma->dma_dev.device_terminate_all = fsl_qdma_terminate_all;
4310 + dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
4312 + platform_set_drvdata(pdev, fsl_qdma);
4314 + ret = dma_async_device_register(&fsl_qdma->dma_dev);
4316 + dev_err(&pdev->dev,
4317 + "Can't register NXP Layerscape qDMA engine.\n");
4321 + ret = fsl_qdma_reg_init(fsl_qdma);
4323 + dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
4330 +static void fsl_qdma_cleanup_vchan(struct dma_device *dmadev)
4332 + struct fsl_qdma_chan *chan, *_chan;
4334 + list_for_each_entry_safe(chan, _chan,
4335 + &dmadev->channels, vchan.chan.device_node) {
4336 + list_del(&chan->vchan.chan.device_node);
4337 + tasklet_kill(&chan->vchan.task);
4341 +static int fsl_qdma_remove(struct platform_device *pdev)
4343 + struct device_node *np = pdev->dev.of_node;
4344 + struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev);
4345 + struct fsl_qdma_queue *status;
4348 + fsl_qdma_irq_exit(pdev, fsl_qdma);
4349 + fsl_qdma_cleanup_vchan(&fsl_qdma->dma_dev);
4350 + of_dma_controller_free(np);
4351 + dma_async_device_unregister(&fsl_qdma->dma_dev);
4353 + for (i = 0; i < fsl_qdma->block_number; i++) {
4354 + status = fsl_qdma->status[i];
4355 + dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_format) *
4356 + status->n_cq, status->cq, status->bus_addr);
4361 +static const struct of_device_id fsl_qdma_dt_ids[] = {
4362 + { .compatible = "fsl,ls1021a-qdma", },
4363 + { /* sentinel */ }
4365 +MODULE_DEVICE_TABLE(of, fsl_qdma_dt_ids);
4367 +static struct platform_driver fsl_qdma_driver = {
4369 + .name = "fsl-qdma",
4370 + .of_match_table = fsl_qdma_dt_ids,
4372 + .probe = fsl_qdma_probe,
4373 + .remove = fsl_qdma_remove,
4376 +module_platform_driver(fsl_qdma_driver);
4378 +MODULE_ALIAS("platform:fsl-qdma");
4379 +MODULE_DESCRIPTION("NXP Layerscape qDMA engine driver");
4380 +MODULE_LICENSE("GPL v2");