1 From 5cb4bc977d933323429050033da9c701b24df43e Mon Sep 17 00:00:00 2001
2 From: Biwen Li <biwen.li@nxp.com>
3 Date: Wed, 17 Apr 2019 18:58:23 +0800
4 Subject: [PATCH] dma: support layerscape
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
9 This is an integrated patch of dma for layerscape
11 Signed-off-by: Biwen Li <biwen.li@nxp.com>
12 Signed-off-by: Catalin Horghidan <catalin.horghidan@nxp.com>
13 Signed-off-by: Changming Huang <jerry.huang@nxp.com>
14 Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
15 Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
16 Signed-off-by: jiaheng.fan <jiaheng.fan@nxp.com>
17 Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
18 Signed-off-by: Peng Ma <peng.ma@nxp.com>
19 Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
20 Signed-off-by: Rajiv Vishwakarma <rajiv.vishwakarma@nxp.com>
21 Signed-off-by: Tudor Ambarus <tudor-dan.ambarus@nxp.com>
22 Signed-off-by: Wen He <wen.he_1@nxp.com>
23 Signed-off-by: Yuan Yao <yao.yuan@nxp.com>
25 drivers/dma/Kconfig | 33 +-
26 drivers/dma/Makefile | 3 +
27 drivers/dma/caam_dma.c | 462 ++++++++
28 drivers/dma/dpaa2-qdma/Kconfig | 8 +
29 drivers/dma/dpaa2-qdma/Makefile | 8 +
30 drivers/dma/dpaa2-qdma/dpaa2-qdma.c | 781 ++++++++++++++
31 drivers/dma/dpaa2-qdma/dpaa2-qdma.h | 181 ++++
32 drivers/dma/dpaa2-qdma/dpdmai.c | 515 +++++++++
33 drivers/dma/dpaa2-qdma/fsl_dpdmai.h | 521 +++++++++
34 drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h | 222 ++++
35 drivers/dma/fsl-edma.c | 66 +-
36 drivers/dma/fsl-qdma.c | 1278 +++++++++++++++++++++++
37 12 files changed, 4073 insertions(+), 5 deletions(-)
38 create mode 100644 drivers/dma/caam_dma.c
39 create mode 100644 drivers/dma/dpaa2-qdma/Kconfig
40 create mode 100644 drivers/dma/dpaa2-qdma/Makefile
41 create mode 100644 drivers/dma/dpaa2-qdma/dpaa2-qdma.c
42 create mode 100644 drivers/dma/dpaa2-qdma/dpaa2-qdma.h
43 create mode 100644 drivers/dma/dpaa2-qdma/dpdmai.c
44 create mode 100644 drivers/dma/dpaa2-qdma/fsl_dpdmai.h
45 create mode 100644 drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h
46 create mode 100644 drivers/dma/fsl-qdma.c
48 --- a/drivers/dma/Kconfig
49 +++ b/drivers/dma/Kconfig
50 @@ -129,6 +129,24 @@ config COH901318
52 Enable support for ST-Ericsson COH 901 318 DMA.
54 +config CRYPTO_DEV_FSL_CAAM_DMA
55 + tristate "CAAM DMA engine support"
56 + depends on CRYPTO_DEV_FSL_CAAM_JR
60 + select ASYNC_TX_ENABLE_CHANNEL_SWITCH
62 + Selecting this will offload the DMA operations for users of
63 + the scatter gather memcopy API to the CAAM via job rings. The
64 + CAAM is a hardware module that provides hardware acceleration to
65 + cryptographic operations. It has a built-in DMA controller that can
66 + be programmed to read/write cryptographic data. This module defines
67 + a DMA driver that uses the DMA capabilities of the CAAM.
69 + To compile this as a module, choose M here: the module
70 + will be called caam_dma.
73 tristate "BCM2835 DMA engine support"
74 depends on ARCH_BCM2835
75 @@ -215,6 +233,20 @@ config FSL_EDMA
76 multiplexing capability for DMA request sources(slot).
77 This module can be found on Freescale Vybrid and LS-1 SoCs.
80 + tristate "NXP Layerscape qDMA engine support"
82 + select DMA_VIRTUAL_CHANNELS
83 + select DMA_ENGINE_RAID
84 + select ASYNC_TX_ENABLE_CHANNEL_SWITCH
86 + Support the NXP Layerscape qDMA engine with command queue and legacy mode.
87 + Channel virtualization is supported through enqueuing of DMA jobs to,
88 + or dequeuing DMA jobs from, different work queues.
89 + This module can be found on NXP Layerscape SoCs.
91 +source drivers/dma/dpaa2-qdma/Kconfig
94 tristate "Freescale RAID engine Support"
95 depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH
96 @@ -600,7 +632,6 @@ config ZX_DMA
98 Support the DMA engine for ZTE ZX family platform devices.
102 source "drivers/dma/bestcomm/Kconfig"
104 --- a/drivers/dma/Makefile
105 +++ b/drivers/dma/Makefile
106 @@ -31,7 +31,9 @@ obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o
107 obj-$(CONFIG_DW_DMAC_CORE) += dw/
108 obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
109 obj-$(CONFIG_FSL_DMA) += fsldma.o
110 +obj-$(CONFIG_FSL_DPAA2_QDMA) += dpaa2-qdma/
111 obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
112 +obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o
113 obj-$(CONFIG_FSL_RAID) += fsl_raid.o
114 obj-$(CONFIG_HSU_DMA) += hsu/
115 obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o
116 @@ -71,6 +73,7 @@ obj-$(CONFIG_TI_EDMA) += edma.o
117 obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
118 obj-$(CONFIG_ZX_DMA) += zx_dma.o
119 obj-$(CONFIG_ST_FDMA) += st_fdma.o
120 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_DMA) += caam_dma.o
125 +++ b/drivers/dma/caam_dma.c
128 + * caam support for SG DMA
130 + * Copyright 2016 Freescale Semiconductor, Inc
131 + * Copyright 2017 NXP
133 + * Redistribution and use in source and binary forms, with or without
134 + * modification, are permitted provided that the following conditions are met:
135 + * * Redistributions of source code must retain the above copyright
136 + * notice, this list of conditions and the following disclaimer.
137 + * * Redistributions in binary form must reproduce the above copyright
138 + * notice, this list of conditions and the following disclaimer in the
139 + * documentation and/or other materials provided with the distribution.
140 + * * Neither the names of the above-listed copyright holders nor the
141 + * names of any contributors may be used to endorse or promote products
142 + * derived from this software without specific prior written permission.
145 + * ALTERNATIVELY, this software may be distributed under the terms of the
146 + * GNU General Public License ("GPL") as published by the Free Software
147 + * Foundation, either version 2 of that License or (at your option) any
150 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
151 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
152 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
153 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
154 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
155 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
156 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
157 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
158 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
159 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
160 + * POSSIBILITY OF SUCH DAMAGE.
163 +#include <linux/dma-mapping.h>
164 +#include <linux/dmaengine.h>
165 +#include <linux/module.h>
166 +#include <linux/platform_device.h>
167 +#include <linux/slab.h>
169 +#include "dmaengine.h"
171 +#include "../crypto/caam/regs.h"
172 +#include "../crypto/caam/jr.h"
173 +#include "../crypto/caam/error.h"
174 +#include "../crypto/caam/desc_constr.h"
176 +#define DESC_DMA_MEMCPY_LEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / \
180 + * This is max chunk size of a DMA transfer. If a buffer is larger than this
181 + * value it is internally broken into chunks of max CAAM_DMA_CHUNK_SIZE bytes
182 + * and for each chunk a DMA transfer request is issued.
183 + * This value is the largest number on 16 bits that is a multiple of 256 bytes
184 + * (the largest configurable CAAM DMA burst size).
186 +#define CAAM_DMA_CHUNK_SIZE 65280
188 +struct caam_dma_sh_desc {
189 + u32 desc[DESC_DMA_MEMCPY_LEN] ____cacheline_aligned;
190 + dma_addr_t desc_dma;
193 +/* caam dma extended descriptor */
194 +struct caam_dma_edesc {
195 + struct dma_async_tx_descriptor async_tx;
196 + struct list_head node;
197 + struct caam_dma_ctx *ctx;
198 + dma_addr_t src_dma;
199 + dma_addr_t dst_dma;
200 + unsigned int src_len;
201 + unsigned int dst_len;
202 + u32 jd[] ____cacheline_aligned;
206 + * caam_dma_ctx - per jr/channel context
207 + * @chan: dma channel used by async_tx API
208 + * @node: list_head used to attach to the global dma_ctx_list
209 + * @jrdev: Job Ring device
210 + * @pending_q: queue of pending (submitted, but not enqueued) jobs
211 + * @done_not_acked: jobs that have been completed by jr, but maybe not acked
212 + * @edesc_lock: protects extended descriptor
214 +struct caam_dma_ctx {
215 + struct dma_chan chan;
216 + struct list_head node;
217 + struct device *jrdev;
218 + struct list_head pending_q;
219 + struct list_head done_not_acked;
220 + spinlock_t edesc_lock;
223 +static struct dma_device *dma_dev;
224 +static struct caam_dma_sh_desc *dma_sh_desc;
225 +static LIST_HEAD(dma_ctx_list);
227 +static dma_cookie_t caam_dma_tx_submit(struct dma_async_tx_descriptor *tx)
229 + struct caam_dma_edesc *edesc = NULL;
230 + struct caam_dma_ctx *ctx = NULL;
231 + dma_cookie_t cookie;
233 + edesc = container_of(tx, struct caam_dma_edesc, async_tx);
234 + ctx = container_of(tx->chan, struct caam_dma_ctx, chan);
236 + spin_lock_bh(&ctx->edesc_lock);
238 + cookie = dma_cookie_assign(tx);
239 + list_add_tail(&edesc->node, &ctx->pending_q);
241 + spin_unlock_bh(&ctx->edesc_lock);
246 +static void caam_jr_chan_free_edesc(struct caam_dma_edesc *edesc)
248 + struct caam_dma_ctx *ctx = edesc->ctx;
249 + struct caam_dma_edesc *_edesc = NULL;
251 + spin_lock_bh(&ctx->edesc_lock);
253 + list_add_tail(&edesc->node, &ctx->done_not_acked);
254 + list_for_each_entry_safe(edesc, _edesc, &ctx->done_not_acked, node) {
255 + if (async_tx_test_ack(&edesc->async_tx)) {
256 + list_del(&edesc->node);
261 + spin_unlock_bh(&ctx->edesc_lock);
264 +static void caam_dma_done(struct device *dev, u32 *hwdesc, u32 err,
267 + struct caam_dma_edesc *edesc = context;
268 + struct caam_dma_ctx *ctx = edesc->ctx;
269 + dma_async_tx_callback callback;
270 + void *callback_param;
273 + caam_jr_strstatus(ctx->jrdev, err);
275 + dma_run_dependencies(&edesc->async_tx);
277 + spin_lock_bh(&ctx->edesc_lock);
278 + dma_cookie_complete(&edesc->async_tx);
279 + spin_unlock_bh(&ctx->edesc_lock);
281 + callback = edesc->async_tx.callback;
282 + callback_param = edesc->async_tx.callback_param;
284 + dma_descriptor_unmap(&edesc->async_tx);
286 + caam_jr_chan_free_edesc(edesc);
289 + callback(callback_param);
292 +static void caam_dma_memcpy_init_job_desc(struct caam_dma_edesc *edesc)
294 + u32 *jd = edesc->jd;
295 + u32 *sh_desc = dma_sh_desc->desc;
296 + dma_addr_t desc_dma = dma_sh_desc->desc_dma;
298 + /* init the job descriptor */
299 + init_job_desc_shared(jd, desc_dma, desc_len(sh_desc), HDR_REVERSE);
301 + /* set SEQIN PTR */
302 + append_seq_in_ptr(jd, edesc->src_dma, edesc->src_len, 0);
304 + /* set SEQOUT PTR */
305 + append_seq_out_ptr(jd, edesc->dst_dma, edesc->dst_len, 0);
307 + print_hex_dump_debug("caam dma desc@" __stringify(__LINE__) ": ",
308 + DUMP_PREFIX_ADDRESS, 16, 4, jd, desc_bytes(jd), 1);
311 +static struct dma_async_tx_descriptor *
312 +caam_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
313 + size_t len, unsigned long flags)
315 + struct caam_dma_edesc *edesc;
316 + struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
319 + edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN, GFP_DMA | GFP_NOWAIT);
321 + return ERR_PTR(-ENOMEM);
323 + dma_async_tx_descriptor_init(&edesc->async_tx, chan);
324 + edesc->async_tx.tx_submit = caam_dma_tx_submit;
325 + edesc->async_tx.flags = flags;
326 + edesc->async_tx.cookie = -EBUSY;
328 + edesc->src_dma = src;
329 + edesc->src_len = len;
330 + edesc->dst_dma = dst;
331 + edesc->dst_len = len;
334 + caam_dma_memcpy_init_job_desc(edesc);
336 + return &edesc->async_tx;
339 +/* This function can be called in an interrupt context */
340 +static void caam_dma_issue_pending(struct dma_chan *chan)
342 + struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
344 + struct caam_dma_edesc *edesc, *_edesc;
346 + spin_lock_bh(&ctx->edesc_lock);
347 + list_for_each_entry_safe(edesc, _edesc, &ctx->pending_q, node) {
348 + if (caam_jr_enqueue(ctx->jrdev, edesc->jd,
349 + caam_dma_done, edesc) < 0)
351 + list_del(&edesc->node);
353 + spin_unlock_bh(&ctx->edesc_lock);
356 +static void caam_dma_free_chan_resources(struct dma_chan *chan)
358 + struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
360 + struct caam_dma_edesc *edesc, *_edesc;
362 + spin_lock_bh(&ctx->edesc_lock);
363 + list_for_each_entry_safe(edesc, _edesc, &ctx->pending_q, node) {
364 + list_del(&edesc->node);
367 + list_for_each_entry_safe(edesc, _edesc, &ctx->done_not_acked, node) {
368 + list_del(&edesc->node);
371 + spin_unlock_bh(&ctx->edesc_lock);
374 +static int caam_dma_jr_chan_bind(void)
376 + struct device *jrdev;
377 + struct caam_dma_ctx *ctx;
381 + for (i = 0; i < caam_jr_driver_probed(); i++) {
382 + jrdev = caam_jridx_alloc(i);
383 + if (IS_ERR(jrdev)) {
384 + pr_err("job ring device %d allocation failed\n", i);
388 + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
390 + caam_jr_free(jrdev);
394 + ctx->chan.device = dma_dev;
395 + ctx->chan.private = ctx;
397 + ctx->jrdev = jrdev;
399 + INIT_LIST_HEAD(&ctx->pending_q);
400 + INIT_LIST_HEAD(&ctx->done_not_acked);
401 + INIT_LIST_HEAD(&ctx->node);
402 + spin_lock_init(&ctx->edesc_lock);
404 + dma_cookie_init(&ctx->chan);
406 + /* add the context of this channel to the context list */
407 + list_add_tail(&ctx->node, &dma_ctx_list);
409 + /* add this channel to the device chan list */
410 + list_add_tail(&ctx->chan.device_node, &dma_dev->channels);
418 +static inline void caam_jr_dma_free(struct dma_chan *chan)
420 + struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
423 + list_del(&ctx->node);
424 + list_del(&chan->device_node);
425 + caam_jr_free(ctx->jrdev);
429 +static void set_caam_dma_desc(u32 *desc)
433 + /* dma shared descriptor */
434 + init_sh_desc(desc, HDR_SHARE_NEVER | (1 << HDR_START_IDX_SHIFT));
436 + /* REG1 = CAAM_DMA_CHUNK_SIZE */
437 + append_math_add_imm_u32(desc, REG1, ZERO, IMM, CAAM_DMA_CHUNK_SIZE);
439 + /* REG0 = SEQINLEN - CAAM_DMA_CHUNK_SIZE */
440 + append_math_sub_imm_u32(desc, REG0, SEQINLEN, IMM, CAAM_DMA_CHUNK_SIZE);
446 + jmp_cmd = append_jump(desc, JUMP_TEST_INVALL | JUMP_COND_MATH_N |
449 + /* REG1 = SEQINLEN */
450 + append_math_sub(desc, REG1, SEQINLEN, ZERO, CAAM_CMD_SZ);
453 + set_jump_tgt_here(desc, jmp_cmd);
455 + /* VARSEQINLEN = REG1 */
456 + append_math_add(desc, VARSEQINLEN, REG1, ZERO, CAAM_CMD_SZ);
458 + /* VARSEQOUTLEN = REG1 */
459 + append_math_add(desc, VARSEQOUTLEN, REG1, ZERO, CAAM_CMD_SZ);
461 + /* do FIFO STORE */
462 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_METADATA | LDST_VLF);
465 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
466 + FIFOLD_TYPE_IFIFO | LDST_VLF);
470 + * jmp 0xF8 (after shared desc header)
472 + append_jump(desc, JUMP_TEST_INVALL | JUMP_COND_MATH_N |
473 + JUMP_COND_MATH_Z | 0xF8);
475 + print_hex_dump_debug("caam dma shdesc@" __stringify(__LINE__) ": ",
476 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
480 +static int __init caam_dma_probe(struct platform_device *pdev)
482 + struct device *dev = &pdev->dev;
483 + struct device *ctrldev = dev->parent;
484 + struct dma_chan *chan, *_chan;
489 + if (!caam_jr_driver_probed()) {
490 + dev_info(dev, "Defer probing after JR driver probing\n");
491 + return -EPROBE_DEFER;
494 + dma_dev = kzalloc(sizeof(*dma_dev), GFP_KERNEL);
498 + dma_sh_desc = kzalloc(sizeof(*dma_sh_desc), GFP_KERNEL | GFP_DMA);
502 + sh_desc = dma_sh_desc->desc;
503 + set_caam_dma_desc(sh_desc);
504 + dma_sh_desc->desc_dma = dma_map_single(ctrldev, sh_desc,
505 + desc_bytes(sh_desc),
507 + if (dma_mapping_error(ctrldev, dma_sh_desc->desc_dma)) {
508 + dev_err(dev, "unable to map dma descriptor\n");
512 + INIT_LIST_HEAD(&dma_dev->channels);
514 + bonds = caam_dma_jr_chan_bind();
520 + dma_dev->dev = dev;
521 + dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
522 + dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
523 + dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
524 + dma_dev->device_tx_status = dma_cookie_status;
525 + dma_dev->device_issue_pending = caam_dma_issue_pending;
526 + dma_dev->device_prep_dma_memcpy = caam_dma_prep_memcpy;
527 + dma_dev->device_free_chan_resources = caam_dma_free_chan_resources;
529 + err = dma_async_device_register(dma_dev);
531 + dev_err(dev, "Failed to register CAAM DMA engine\n");
535 + dev_info(dev, "caam dma support with %d job rings\n", bonds);
540 + list_for_each_entry_safe(chan, _chan, &dma_dev->channels, device_node)
541 + caam_jr_dma_free(chan);
543 + dma_unmap_single(ctrldev, dma_sh_desc->desc_dma, desc_bytes(sh_desc),
546 + kfree(dma_sh_desc);
552 +static int caam_dma_remove(struct platform_device *pdev)
554 + struct device *dev = &pdev->dev;
555 + struct device *ctrldev = dev->parent;
556 + struct caam_dma_ctx *ctx, *_ctx;
558 + dma_async_device_unregister(dma_dev);
560 + list_for_each_entry_safe(ctx, _ctx, &dma_ctx_list, node) {
561 + list_del(&ctx->node);
562 + caam_jr_free(ctx->jrdev);
566 + dma_unmap_single(ctrldev, dma_sh_desc->desc_dma,
567 + desc_bytes(dma_sh_desc->desc), DMA_TO_DEVICE);
569 + kfree(dma_sh_desc);
572 + dev_info(dev, "caam dma support disabled\n");
576 +static struct platform_driver caam_dma_driver = {
578 + .name = "caam-dma",
580 + .probe = caam_dma_probe,
581 + .remove = caam_dma_remove,
583 +module_platform_driver(caam_dma_driver);
585 +MODULE_LICENSE("Dual BSD/GPL");
586 +MODULE_DESCRIPTION("NXP CAAM support for DMA engine");
587 +MODULE_AUTHOR("NXP Semiconductors");
588 +MODULE_ALIAS("platform:caam-dma");
590 +++ b/drivers/dma/dpaa2-qdma/Kconfig
592 +menuconfig FSL_DPAA2_QDMA
593 + tristate "NXP DPAA2 QDMA"
594 + depends on FSL_MC_BUS && FSL_MC_DPIO
596 + select DMA_VIRTUAL_CHANNELS
598 + NXP Data Path Acceleration Architecture 2 QDMA driver,
599 + using the NXP MC bus driver.
601 +++ b/drivers/dma/dpaa2-qdma/Makefile
604 +# Makefile for the NXP DPAA2 CAAM controllers
606 +ccflags-y += -DVERSION=\"\"
608 +obj-$(CONFIG_FSL_DPAA2_QDMA) += fsl-dpaa2-qdma.o
610 +fsl-dpaa2-qdma-objs := dpaa2-qdma.o dpdmai.o
612 +++ b/drivers/dma/dpaa2-qdma/dpaa2-qdma.c
615 + * drivers/dma/dpaa2-qdma/dpaa2-qdma.c
617 + * Copyright 2015-2017 NXP Semiconductor, Inc.
618 + * Author: Changming Huang <jerry.huang@nxp.com>
620 + * Driver for the NXP QDMA engine with QMan mode.
621 + * Channel virtualization is supported through enqueuing of DMA jobs to,
622 + * or dequeuing DMA jobs from different work queues with QMan portal.
623 + * This module can be found on NXP LS2 SoCs.
625 + * This program is free software; you can redistribute it and/or modify it
626 + * under the terms of the GNU General Public License as published by the
627 + * Free Software Foundation; either version 2 of the License, or (at your
628 + * option) any later version.
631 +#include <linux/init.h>
632 +#include <linux/module.h>
633 +#include <linux/interrupt.h>
634 +#include <linux/clk.h>
635 +#include <linux/dma-mapping.h>
636 +#include <linux/dmapool.h>
637 +#include <linux/slab.h>
638 +#include <linux/spinlock.h>
639 +#include <linux/of.h>
640 +#include <linux/of_device.h>
641 +#include <linux/of_address.h>
642 +#include <linux/of_irq.h>
643 +#include <linux/of_dma.h>
644 +#include <linux/types.h>
645 +#include <linux/delay.h>
646 +#include <linux/iommu.h>
647 +#include <linux/sys_soc.h>
649 +#include "../virt-dma.h"
651 +#include <linux/fsl/mc.h>
652 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
653 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
654 +#include "fsl_dpdmai_cmd.h"
655 +#include "fsl_dpdmai.h"
656 +#include "dpaa2-qdma.h"
658 +static bool smmu_disable = true;
660 +static struct dpaa2_qdma_chan *to_dpaa2_qdma_chan(struct dma_chan *chan)
662 + return container_of(chan, struct dpaa2_qdma_chan, vchan.chan);
665 +static struct dpaa2_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
667 + return container_of(vd, struct dpaa2_qdma_comp, vdesc);
670 +static int dpaa2_qdma_alloc_chan_resources(struct dma_chan *chan)
675 +static void dpaa2_qdma_free_chan_resources(struct dma_chan *chan)
677 + struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
678 + unsigned long flags;
681 + spin_lock_irqsave(&dpaa2_chan->vchan.lock, flags);
682 + vchan_get_all_descriptors(&dpaa2_chan->vchan, &head);
683 + spin_unlock_irqrestore(&dpaa2_chan->vchan.lock, flags);
685 + vchan_dma_desc_free_list(&dpaa2_chan->vchan, &head);
689 + * Request a command descriptor for enqueue.
691 +static struct dpaa2_qdma_comp *
692 +dpaa2_qdma_request_desc(struct dpaa2_qdma_chan *dpaa2_chan)
694 + struct dpaa2_qdma_comp *comp_temp = NULL;
695 + unsigned long flags;
697 + spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
698 + if (list_empty(&dpaa2_chan->comp_free)) {
699 + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
700 + comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
703 + comp_temp->fd_virt_addr = dma_pool_alloc(dpaa2_chan->fd_pool,
704 + GFP_NOWAIT, &comp_temp->fd_bus_addr);
705 + if (!comp_temp->fd_virt_addr)
708 + comp_temp->fl_virt_addr =
709 + (void *)((struct dpaa2_fd *)
710 + comp_temp->fd_virt_addr + 1);
711 + comp_temp->fl_bus_addr = comp_temp->fd_bus_addr +
712 + sizeof(struct dpaa2_fd);
713 + comp_temp->desc_virt_addr =
714 + (void *)((struct dpaa2_fl_entry *)
715 + comp_temp->fl_virt_addr + 3);
716 + comp_temp->desc_bus_addr = comp_temp->fl_bus_addr +
717 + sizeof(struct dpaa2_fl_entry) * 3;
719 + comp_temp->qchan = dpaa2_chan;
722 + comp_temp = list_first_entry(&dpaa2_chan->comp_free,
723 + struct dpaa2_qdma_comp, list);
724 + list_del(&comp_temp->list);
725 + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
727 + comp_temp->qchan = dpaa2_chan;
732 +static void dpaa2_qdma_populate_fd(uint32_t format,
733 + struct dpaa2_qdma_comp *dpaa2_comp)
735 + struct dpaa2_fd *fd;
737 + fd = (struct dpaa2_fd *)dpaa2_comp->fd_virt_addr;
738 + memset(fd, 0, sizeof(struct dpaa2_fd));
741 + dpaa2_fd_set_addr(fd, dpaa2_comp->fl_bus_addr);
742 + /* Bypass memory translation, Frame list format, short length disable */
743 + /* we need to disable BMT if fsl-mc use iova addr */
745 + dpaa2_fd_set_bpid(fd, QMAN_FD_BMT_ENABLE);
746 + dpaa2_fd_set_format(fd, QMAN_FD_FMT_ENABLE | QMAN_FD_SL_DISABLE);
748 + dpaa2_fd_set_frc(fd, format | QDMA_SER_CTX);
751 +/* first frame list for descriptor buffer */
752 +static void dpaa2_qdma_populate_first_framel(
753 + struct dpaa2_fl_entry *f_list,
754 + struct dpaa2_qdma_comp *dpaa2_comp,
757 + struct dpaa2_qdma_sd_d *sdd;
759 + sdd = (struct dpaa2_qdma_sd_d *)dpaa2_comp->desc_virt_addr;
760 + memset(sdd, 0, 2 * (sizeof(*sdd)));
761 + /* source and destination descriptor */
762 + sdd->cmd = cpu_to_le32(QDMA_SD_CMD_RDTTYPE_COHERENT); /* source descriptor CMD */
765 + /* dest descriptor CMD */
767 + sdd->cmd = cpu_to_le32(LX2160_QDMA_DD_CMD_WRTTYPE_COHERENT);
769 + sdd->cmd = cpu_to_le32(QDMA_DD_CMD_WRTTYPE_COHERENT);
771 + memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
772 + /* first frame list to source descriptor */
774 + dpaa2_fl_set_addr(f_list, dpaa2_comp->desc_bus_addr);
775 + dpaa2_fl_set_len(f_list, 0x20);
776 + dpaa2_fl_set_format(f_list, QDMA_FL_FMT_SBF | QDMA_FL_SL_LONG);
779 + f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE); /* bypass memory translation */
782 +/* source and destination frame list */
783 +static void dpaa2_qdma_populate_frames(struct dpaa2_fl_entry *f_list,
784 + dma_addr_t dst, dma_addr_t src, size_t len, uint8_t fmt)
786 + /* source frame list to source buffer */
787 + memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
790 + dpaa2_fl_set_addr(f_list, src);
791 + dpaa2_fl_set_len(f_list, len);
792 + dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG)); /* single buffer frame or scatter gather frame */
794 + f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE); /* bypass memory translation */
797 + /* destination frame list to destination buffer */
798 + memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
800 + dpaa2_fl_set_addr(f_list, dst);
801 + dpaa2_fl_set_len(f_list, len);
802 + dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG));
803 + dpaa2_fl_set_final(f_list, QDMA_FL_F); /* single buffer frame or scatter gather frame */
805 + f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE); /* bypass memory translation */
808 +static struct dma_async_tx_descriptor *dpaa2_qdma_prep_memcpy(
809 + struct dma_chan *chan, dma_addr_t dst,
810 + dma_addr_t src, size_t len, unsigned long flags)
812 + struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
813 + struct dpaa2_qdma_engine *dpaa2_qdma;
814 + struct dpaa2_qdma_comp *dpaa2_comp;
815 + struct dpaa2_fl_entry *f_list;
819 + dpaa2_qdma = dpaa2_chan->qdma;
820 + dpaa2_comp = dpaa2_qdma_request_desc(dpaa2_chan);
821 + wrt_changed = dpaa2_qdma->qdma_wrtype_fixup;
824 + format = QDMA_FD_LONG_FORMAT;
826 + format = QDMA_FD_SHORT_FORMAT;
828 + /* populate Frame descriptor */
829 + dpaa2_qdma_populate_fd(format, dpaa2_comp);
831 + f_list = (struct dpaa2_fl_entry *)dpaa2_comp->fl_virt_addr;
834 + /* first frame list for descriptor buffer (logn format) */
835 + dpaa2_qdma_populate_first_framel(f_list, dpaa2_comp, wrt_changed);
840 + dpaa2_qdma_populate_frames(f_list, dst, src, len, QDMA_FL_FMT_SBF);
842 + return vchan_tx_prep(&dpaa2_chan->vchan, &dpaa2_comp->vdesc, flags);
845 +static enum dma_status dpaa2_qdma_tx_status(struct dma_chan *chan,
846 + dma_cookie_t cookie, struct dma_tx_state *txstate)
848 + return dma_cookie_status(chan, cookie, txstate);
851 +static void dpaa2_qdma_free_desc(struct virt_dma_desc *vdesc)
855 +static void dpaa2_qdma_issue_pending(struct dma_chan *chan)
857 + struct dpaa2_qdma_comp *dpaa2_comp;
858 + struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
859 + struct dpaa2_qdma_engine *dpaa2_qdma = dpaa2_chan->qdma;
860 + struct dpaa2_qdma_priv *priv = dpaa2_qdma->priv;
861 + struct virt_dma_desc *vdesc;
862 + struct dpaa2_fd *fd;
864 + unsigned long flags;
866 + spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
867 + spin_lock(&dpaa2_chan->vchan.lock);
868 + if (vchan_issue_pending(&dpaa2_chan->vchan)) {
869 + vdesc = vchan_next_desc(&dpaa2_chan->vchan);
872 + dpaa2_comp = to_fsl_qdma_comp(vdesc);
874 + fd = (struct dpaa2_fd *)dpaa2_comp->fd_virt_addr;
876 + list_del(&vdesc->node);
877 + list_add_tail(&dpaa2_comp->list, &dpaa2_chan->comp_used);
879 + /* TOBO: priority hard-coded to zero */
880 + err = dpaa2_io_service_enqueue_fq(NULL,
881 + priv->tx_queue_attr[0].fqid, fd);
883 + list_del(&dpaa2_comp->list);
884 + list_add_tail(&dpaa2_comp->list,
885 + &dpaa2_chan->comp_free);
890 + spin_unlock(&dpaa2_chan->vchan.lock);
891 + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
894 +static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
896 + struct device *dev = &ls_dev->dev;
897 + struct dpaa2_qdma_priv *priv;
898 + struct dpaa2_qdma_priv_per_prio *ppriv;
899 + uint8_t prio_def = DPDMAI_PRIO_NUM;
903 + priv = dev_get_drvdata(dev);
906 + priv->dpqdma_id = ls_dev->obj_desc.id;
908 + /*Get the handle for the DPDMAI this interface is associate with */
909 + err = dpdmai_open(priv->mc_io, 0, priv->dpqdma_id, &ls_dev->mc_handle);
911 + dev_err(dev, "dpdmai_open() failed\n");
914 + dev_info(dev, "Opened dpdmai object successfully\n");
916 + err = dpdmai_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
917 + &priv->dpdmai_attr);
919 + dev_err(dev, "dpdmai_get_attributes() failed\n");
923 + if (priv->dpdmai_attr.version.major > DPDMAI_VER_MAJOR) {
924 + dev_err(dev, "DPDMAI major version mismatch\n"
925 + "Found %u.%u, supported version is %u.%u\n",
926 + priv->dpdmai_attr.version.major,
927 + priv->dpdmai_attr.version.minor,
928 + DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
931 + if (priv->dpdmai_attr.version.minor > DPDMAI_VER_MINOR) {
932 + dev_err(dev, "DPDMAI minor version mismatch\n"
933 + "Found %u.%u, supported version is %u.%u\n",
934 + priv->dpdmai_attr.version.major,
935 + priv->dpdmai_attr.version.minor,
936 + DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
939 + priv->num_pairs = min(priv->dpdmai_attr.num_of_priorities, prio_def);
940 + ppriv = kcalloc(priv->num_pairs, sizeof(*ppriv), GFP_KERNEL);
942 + dev_err(dev, "kzalloc for ppriv failed\n");
945 + priv->ppriv = ppriv;
947 + for (i = 0; i < priv->num_pairs; i++) {
948 + err = dpdmai_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
949 + i, &priv->rx_queue_attr[i]);
951 + dev_err(dev, "dpdmai_get_rx_queue() failed\n");
954 + ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
956 + err = dpdmai_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle,
957 + i, &priv->tx_queue_attr[i]);
959 + dev_err(dev, "dpdmai_get_tx_queue() failed\n");
962 + ppriv->req_fqid = priv->tx_queue_attr[i].fqid;
964 + ppriv->priv = priv;
971 +static void dpaa2_qdma_fqdan_cb(struct dpaa2_io_notification_ctx *ctx)
973 + struct dpaa2_qdma_priv_per_prio *ppriv = container_of(ctx,
974 + struct dpaa2_qdma_priv_per_prio, nctx);
975 + struct dpaa2_qdma_priv *priv = ppriv->priv;
976 + struct dpaa2_qdma_comp *dpaa2_comp, *_comp_tmp;
977 + struct dpaa2_qdma_chan *qchan;
978 + const struct dpaa2_fd *fd;
979 + const struct dpaa2_fd *fd_eq;
980 + struct dpaa2_dq *dq;
986 + uint32_t n_chans = priv->dpaa2_qdma->n_chans;
989 + err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
995 + dq = dpaa2_io_store_next(ppriv->store, &is_last);
996 + } while (!is_last && !dq);
998 + dev_err(priv->dev, "FQID returned no valid frames!\n");
1002 + /* obtain FD and process the error */
1003 + fd = dpaa2_dq_fd(dq);
1005 + status = dpaa2_fd_get_ctrl(fd) & 0xff;
1007 + dev_err(priv->dev, "FD error occurred\n");
1009 + for (i = 0; i < n_chans; i++) {
1010 + qchan = &priv->dpaa2_qdma->chans[i];
1011 + spin_lock(&qchan->queue_lock);
1012 + if (list_empty(&qchan->comp_used)) {
1013 + spin_unlock(&qchan->queue_lock);
1016 + list_for_each_entry_safe(dpaa2_comp, _comp_tmp,
1017 + &qchan->comp_used, list) {
1018 + fd_eq = (struct dpaa2_fd *)
1019 + dpaa2_comp->fd_virt_addr;
1021 + if (le64_to_cpu(fd_eq->simple.addr) ==
1022 + le64_to_cpu(fd->simple.addr)) {
1024 + list_del(&dpaa2_comp->list);
1025 + list_add_tail(&dpaa2_comp->list,
1026 + &qchan->comp_free);
1028 + spin_lock(&qchan->vchan.lock);
1029 + vchan_cookie_complete(
1030 + &dpaa2_comp->vdesc);
1031 + spin_unlock(&qchan->vchan.lock);
1036 + spin_unlock(&qchan->queue_lock);
1042 + dpaa2_io_service_rearm(NULL, ctx);
1045 +static int __cold dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv *priv)
1048 + struct device *dev = priv->dev;
1049 + struct dpaa2_qdma_priv_per_prio *ppriv;
1051 + num = priv->num_pairs;
1052 + ppriv = priv->ppriv;
1053 + for (i = 0; i < num; i++) {
1054 + ppriv->nctx.is_cdan = 0;
1055 + ppriv->nctx.desired_cpu = 1;
1056 + ppriv->nctx.id = ppriv->rsp_fqid;
1057 + ppriv->nctx.cb = dpaa2_qdma_fqdan_cb;
1058 + err = dpaa2_io_service_register(NULL, &ppriv->nctx, dev);
1060 + dev_err(dev, "Notification register failed\n");
1064 + ppriv->store = dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE,
1066 + if (!ppriv->store) {
1067 + dev_err(dev, "dpaa2_io_store_create() failed\n");
1076 + dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
1079 + while (ppriv >= priv->ppriv) {
1080 + dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
1081 + dpaa2_io_store_destroy(ppriv->store);
1087 +static void __cold dpaa2_dpmai_store_free(struct dpaa2_qdma_priv *priv)
1089 + struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
1092 + for (i = 0; i < priv->num_pairs; i++) {
1093 + dpaa2_io_store_destroy(ppriv->store);
1098 +static void __cold dpaa2_dpdmai_dpio_free(struct dpaa2_qdma_priv *priv)
1100 + struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
1101 + struct device *dev = priv->dev;
1104 + for (i = 0; i < priv->num_pairs; i++) {
1105 + dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
1110 +static int __cold dpaa2_dpdmai_bind(struct dpaa2_qdma_priv *priv)
1113 + struct dpdmai_rx_queue_cfg rx_queue_cfg;
1114 + struct device *dev = priv->dev;
1115 + struct dpaa2_qdma_priv_per_prio *ppriv;
1116 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
1119 + num = priv->num_pairs;
1120 + ppriv = priv->ppriv;
1121 + for (i = 0; i < num; i++) {
1122 + rx_queue_cfg.options = DPDMAI_QUEUE_OPT_USER_CTX |
1123 + DPDMAI_QUEUE_OPT_DEST;
1124 + rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
1125 + rx_queue_cfg.dest_cfg.dest_type = DPDMAI_DEST_DPIO;
1126 + rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
1127 + rx_queue_cfg.dest_cfg.priority = ppriv->prio;
1128 + err = dpdmai_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
1129 + rx_queue_cfg.dest_cfg.priority, &rx_queue_cfg);
1131 + dev_err(dev, "dpdmai_set_rx_queue() failed\n");
1141 +static int __cold dpaa2_dpdmai_dpio_unbind(struct dpaa2_qdma_priv *priv)
1144 + struct device *dev = priv->dev;
1145 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
1146 + struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
1149 + for (i = 0; i < priv->num_pairs; i++) {
1150 + ppriv->nctx.qman64 = 0;
1151 + ppriv->nctx.dpio_id = 0;
1155 + err = dpdmai_reset(priv->mc_io, 0, ls_dev->mc_handle);
1157 + dev_err(dev, "dpdmai_reset() failed\n");
1162 +static void __cold dpaa2_dpdmai_free_comp(struct dpaa2_qdma_chan *qchan,
1163 + struct list_head *head)
1165 + struct dpaa2_qdma_comp *comp_tmp, *_comp_tmp;
1166 + /* free the QDMA comp resource */
1167 + list_for_each_entry_safe(comp_tmp, _comp_tmp,
1169 + dma_pool_free(qchan->fd_pool,
1170 + comp_tmp->fd_virt_addr,
1171 + comp_tmp->fd_bus_addr);
1172 + list_del(&comp_tmp->list);
1178 +static void __cold dpaa2_dpdmai_free_channels(
1179 + struct dpaa2_qdma_engine *dpaa2_qdma)
1181 + struct dpaa2_qdma_chan *qchan;
1184 + num = dpaa2_qdma->n_chans;
1185 + for (i = 0; i < num; i++) {
1186 + qchan = &dpaa2_qdma->chans[i];
1187 + dpaa2_dpdmai_free_comp(qchan, &qchan->comp_used);
1188 + dpaa2_dpdmai_free_comp(qchan, &qchan->comp_free);
1189 + dma_pool_destroy(qchan->fd_pool);
1193 +static int dpaa2_dpdmai_alloc_channels(struct dpaa2_qdma_engine *dpaa2_qdma)
1195 + struct dpaa2_qdma_chan *dpaa2_chan;
1196 + struct device *dev = &dpaa2_qdma->priv->dpdmai_dev->dev;
1199 + INIT_LIST_HEAD(&dpaa2_qdma->dma_dev.channels);
1200 + for (i = 0; i < dpaa2_qdma->n_chans; i++) {
1201 + dpaa2_chan = &dpaa2_qdma->chans[i];
1202 + dpaa2_chan->qdma = dpaa2_qdma;
1203 + dpaa2_chan->vchan.desc_free = dpaa2_qdma_free_desc;
1204 + vchan_init(&dpaa2_chan->vchan, &dpaa2_qdma->dma_dev);
1206 + dpaa2_chan->fd_pool = dma_pool_create("fd_pool",
1207 + dev, FD_POOL_SIZE, 32, 0);
1208 + if (!dpaa2_chan->fd_pool)
1211 + spin_lock_init(&dpaa2_chan->queue_lock);
1212 + INIT_LIST_HEAD(&dpaa2_chan->comp_used);
1213 + INIT_LIST_HEAD(&dpaa2_chan->comp_free);
1218 +static int dpaa2_qdma_probe(struct fsl_mc_device *dpdmai_dev)
1220 + struct dpaa2_qdma_priv *priv;
1221 + struct device *dev = &dpdmai_dev->dev;
1222 + struct dpaa2_qdma_engine *dpaa2_qdma;
1225 + priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1228 + dev_set_drvdata(dev, priv);
1229 + priv->dpdmai_dev = dpdmai_dev;
1231 + priv->iommu_domain = iommu_get_domain_for_dev(dev);
1232 + if (priv->iommu_domain)
1233 + smmu_disable = false;
1235 + /* obtain a MC portal */
1236 + err = fsl_mc_portal_allocate(dpdmai_dev, 0, &priv->mc_io);
1238 + if (err == -ENXIO)
1239 + err = -EPROBE_DEFER;
1241 + dev_err(dev, "MC portal allocation failed\n");
1242 + goto err_mcportal;
1245 + /* DPDMAI initialization */
1246 + err = dpaa2_qdma_setup(dpdmai_dev);
1248 + dev_err(dev, "dpaa2_dpdmai_setup() failed\n");
1249 + goto err_dpdmai_setup;
1253 + err = dpaa2_qdma_dpio_setup(priv);
1255 + dev_err(dev, "dpaa2_dpdmai_dpio_setup() failed\n");
1256 + goto err_dpio_setup;
1259 + /* DPDMAI binding to DPIO */
1260 + err = dpaa2_dpdmai_bind(priv);
1262 + dev_err(dev, "dpaa2_dpdmai_bind() failed\n");
1266 + /* DPDMAI enable */
1267 + err = dpdmai_enable(priv->mc_io, 0, dpdmai_dev->mc_handle);
1269 + dev_err(dev, "dpdmai_enable() faile\n");
1273 + dpaa2_qdma = kzalloc(sizeof(*dpaa2_qdma), GFP_KERNEL);
1274 + if (!dpaa2_qdma) {
1279 + priv->dpaa2_qdma = dpaa2_qdma;
1280 + dpaa2_qdma->priv = priv;
1282 + dpaa2_qdma->n_chans = NUM_CH;
1284 + err = dpaa2_dpdmai_alloc_channels(dpaa2_qdma);
1286 + dev_err(dev, "QDMA alloc channels faile\n");
1290 + if (soc_device_match(soc_fixup_tuning))
1291 + dpaa2_qdma->qdma_wrtype_fixup = true;
1293 + dpaa2_qdma->qdma_wrtype_fixup = false;
1295 + dma_cap_set(DMA_PRIVATE, dpaa2_qdma->dma_dev.cap_mask);
1296 + dma_cap_set(DMA_SLAVE, dpaa2_qdma->dma_dev.cap_mask);
1297 + dma_cap_set(DMA_MEMCPY, dpaa2_qdma->dma_dev.cap_mask);
1299 + dpaa2_qdma->dma_dev.dev = dev;
1300 + dpaa2_qdma->dma_dev.device_alloc_chan_resources
1301 + = dpaa2_qdma_alloc_chan_resources;
1302 + dpaa2_qdma->dma_dev.device_free_chan_resources
1303 + = dpaa2_qdma_free_chan_resources;
1304 + dpaa2_qdma->dma_dev.device_tx_status = dpaa2_qdma_tx_status;
1305 + dpaa2_qdma->dma_dev.device_prep_dma_memcpy = dpaa2_qdma_prep_memcpy;
1306 + dpaa2_qdma->dma_dev.device_issue_pending = dpaa2_qdma_issue_pending;
1308 + err = dma_async_device_register(&dpaa2_qdma->dma_dev);
1310 + dev_err(dev, "Can't register NXP QDMA engine.\n");
1317 + dpaa2_dpdmai_free_channels(dpaa2_qdma);
1318 + kfree(dpaa2_qdma);
1320 + dpdmai_disable(priv->mc_io, 0, dpdmai_dev->mc_handle);
1322 + dpaa2_dpdmai_dpio_unbind(priv);
1324 + dpaa2_dpmai_store_free(priv);
1325 + dpaa2_dpdmai_dpio_free(priv);
1327 + dpdmai_close(priv->mc_io, 0, dpdmai_dev->mc_handle);
1329 + fsl_mc_portal_free(priv->mc_io);
1331 + kfree(priv->ppriv);
1333 + dev_set_drvdata(dev, NULL);
1337 +static int dpaa2_qdma_remove(struct fsl_mc_device *ls_dev)
1339 + struct device *dev;
1340 + struct dpaa2_qdma_priv *priv;
1341 + struct dpaa2_qdma_engine *dpaa2_qdma;
1343 + dev = &ls_dev->dev;
1344 + priv = dev_get_drvdata(dev);
1345 + dpaa2_qdma = priv->dpaa2_qdma;
1347 + dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle);
1348 + dpaa2_dpdmai_dpio_unbind(priv);
1349 + dpaa2_dpmai_store_free(priv);
1350 + dpaa2_dpdmai_dpio_free(priv);
1351 + dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
1352 + fsl_mc_portal_free(priv->mc_io);
1353 + dev_set_drvdata(dev, NULL);
1354 + dpaa2_dpdmai_free_channels(dpaa2_qdma);
1356 + dma_async_device_unregister(&dpaa2_qdma->dma_dev);
1358 + kfree(dpaa2_qdma);
1363 +static const struct fsl_mc_device_id dpaa2_qdma_id_table[] = {
1365 + .vendor = FSL_MC_VENDOR_FREESCALE,
1366 + .obj_type = "dpdmai",
1371 +static struct fsl_mc_driver dpaa2_qdma_driver = {
1373 + .name = "dpaa2-qdma",
1374 + .owner = THIS_MODULE,
1376 + .probe = dpaa2_qdma_probe,
1377 + .remove = dpaa2_qdma_remove,
1378 + .match_id_table = dpaa2_qdma_id_table
1381 +static int __init dpaa2_qdma_driver_init(void)
1383 + return fsl_mc_driver_register(&(dpaa2_qdma_driver));
1385 +late_initcall(dpaa2_qdma_driver_init);
1387 +static void __exit fsl_qdma_exit(void)
1389 + fsl_mc_driver_unregister(&(dpaa2_qdma_driver));
1391 +module_exit(fsl_qdma_exit);
1393 +MODULE_DESCRIPTION("NXP DPAA2 qDMA driver");
1394 +MODULE_LICENSE("Dual BSD/GPL");
1396 +++ b/drivers/dma/dpaa2-qdma/dpaa2-qdma.h
1398 +/* Copyright 2015 NXP Semiconductor Inc.
1400 + * Redistribution and use in source and binary forms, with or without
1401 + * modification, are permitted provided that the following conditions are met:
1402 + * * Redistributions of source code must retain the above copyright
1403 + * notice, this list of conditions and the following disclaimer.
1404 + * * Redistributions in binary form must reproduce the above copyright
1405 + * notice, this list of conditions and the following disclaimer in the
1406 + * documentation and/or other materials provided with the distribution.
1407 + * * Neither the name of NXP Semiconductor nor the
1408 + * names of its contributors may be used to endorse or promote products
1409 + * derived from this software without specific prior written permission.
1412 + * ALTERNATIVELY, this software may be distributed under the terms of the
1413 + * GNU General Public License ("GPL") as published by the Free Software
1414 + * Foundation, either version 2 of that License or (at your option) any
1417 + * THIS SOFTWARE IS PROVIDED BY NXP Semiconductor ``AS IS'' AND ANY
1418 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1419 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1420 + * DISCLAIMED. IN NO EVENT SHALL NXP Semiconductor BE LIABLE FOR ANY
1421 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1422 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
1423 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
1424 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1425 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1426 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1429 +#ifndef __DPAA2_QDMA_H
1430 +#define __DPAA2_QDMA_H
1432 +#define LONG_FORMAT 1
1434 +#define DPAA2_QDMA_STORE_SIZE 16
1437 +#define QDMA_DMR_OFFSET 0x0
1438 +#define QDMA_DQ_EN (0 << 30)
1439 +#define QDMA_DQ_DIS (1 << 30)
1441 +#define QDMA_DSR_M_OFFSET 0x10004
1443 +struct dpaa2_qdma_sd_d {
1447 + uint32_t ssd:12; /* souce stride distance */
1448 + uint32_t sss:12; /* souce stride size */
1452 + uint32_t dsd:12; /* Destination stride distance */
1453 + uint32_t dss:12; /* Destination stride size */
1457 + uint32_t rbpcmd; /* Route-by-port command */
1459 +} __attribute__((__packed__));
1460 +/* Source descriptor command read transaction type for RBP=0:
1461 + coherent copy of cacheable memory */
1462 +#define QDMA_SD_CMD_RDTTYPE_COHERENT (0xb << 28)
1463 +/* Destination descriptor command write transaction type for RBP=0:
1464 + coherent copy of cacheable memory */
1465 +#define QDMA_DD_CMD_WRTTYPE_COHERENT (0x6 << 28)
1466 +#define LX2160_QDMA_DD_CMD_WRTTYPE_COHERENT (0xb << 28)
1468 +#define QMAN_FD_FMT_ENABLE (1) /* frame list table enable */
1469 +#define QMAN_FD_BMT_ENABLE (1 << 15) /* bypass memory translation */
1470 +#define QMAN_FD_BMT_DISABLE (0 << 15) /* bypass memory translation */
1471 +#define QMAN_FD_SL_DISABLE (0 << 14) /* short lengthe disabled */
1472 +#define QMAN_FD_SL_ENABLE (1 << 14) /* short lengthe enabled */
1474 +#define QDMA_FINAL_BIT_DISABLE (0 << 31) /* final bit disable */
1475 +#define QDMA_FINAL_BIT_ENABLE (1 << 31) /* final bit enable */
1477 +#define QDMA_FD_SHORT_FORMAT (1 << 11) /* short format */
1478 +#define QDMA_FD_LONG_FORMAT (0 << 11) /* long format */
1479 +#define QDMA_SER_DISABLE (0 << 8) /* no notification */
1480 +#define QDMA_SER_CTX (1 << 8) /* notification by FQD_CTX[fqid] */
1481 +#define QDMA_SER_DEST (2 << 8) /* notification by destination desc */
1482 +#define QDMA_SER_BOTH (3 << 8) /* soruce and dest notification */
1483 +#define QDMA_FD_SPF_ENALBE (1 << 30) /* source prefetch enable */
1485 +#define QMAN_FD_VA_ENABLE (1 << 14) /* Address used is virtual address */
1486 +#define QMAN_FD_VA_DISABLE (0 << 14)/* Address used is a real address */
1487 +#define QMAN_FD_CBMT_ENABLE (1 << 15) /* Flow Context: 49bit physical address */
1488 +#define QMAN_FD_CBMT_DISABLE (0 << 15) /* Flow Context: 64bit virtual address */
1489 +#define QMAN_FD_SC_DISABLE (0 << 27) /* stashing control */
1491 +#define QDMA_FL_FMT_SBF (0x0) /* Single buffer frame */
1492 +#define QDMA_FL_FMT_SGE 0x2 /* Scatter gather frame */
1493 +#define QDMA_FL_BMT_ENABLE (0x1 << 15)/* enable bypass memory translation */
1494 +#define QDMA_FL_BMT_DISABLE 0x0 /* enable bypass memory translation */
1495 +#define QDMA_FL_SL_LONG (0x0 << 2)/* long length */
1496 +#define QDMA_FL_SL_SHORT 0x1 /* short length */
1497 +#define QDMA_FL_F (0x1)/* last frame list bit */
1498 +/*Description of Frame list table structure*/
1500 +struct dpaa2_qdma_chan {
1501 + struct virt_dma_chan vchan;
1502 + struct virt_dma_desc vdesc;
1503 + enum dma_status status;
1504 + struct dpaa2_qdma_engine *qdma;
1506 + struct mutex dpaa2_queue_mutex;
1507 + spinlock_t queue_lock;
1508 + struct dma_pool *fd_pool;
1510 + struct list_head comp_used;
1511 + struct list_head comp_free;
1515 +struct dpaa2_qdma_comp {
1516 + dma_addr_t fd_bus_addr;
1517 + dma_addr_t fl_bus_addr;
1518 + dma_addr_t desc_bus_addr;
1519 + void *fd_virt_addr;
1520 + void *fl_virt_addr;
1521 + void *desc_virt_addr;
1522 + struct dpaa2_qdma_chan *qchan;
1523 + struct virt_dma_desc vdesc;
1524 + struct list_head list;
1527 +struct dpaa2_qdma_engine {
1528 + struct dma_device dma_dev;
1530 + struct dpaa2_qdma_chan chans[NUM_CH];
1531 + bool qdma_wrtype_fixup;
1533 + struct dpaa2_qdma_priv *priv;
1537 + * dpaa2_qdma_priv - driver private data
1539 +struct dpaa2_qdma_priv {
1542 + struct iommu_domain *iommu_domain;
1543 + struct dpdmai_attr dpdmai_attr;
1544 + struct device *dev;
1545 + struct fsl_mc_io *mc_io;
1546 + struct fsl_mc_device *dpdmai_dev;
1548 + struct dpdmai_rx_queue_attr rx_queue_attr[DPDMAI_PRIO_NUM];
1549 + struct dpdmai_tx_queue_attr tx_queue_attr[DPDMAI_PRIO_NUM];
1551 + uint8_t num_pairs;
1553 + struct dpaa2_qdma_engine *dpaa2_qdma;
1554 + struct dpaa2_qdma_priv_per_prio *ppriv;
1557 +struct dpaa2_qdma_priv_per_prio {
1562 + struct dpaa2_io_store *store;
1563 + struct dpaa2_io_notification_ctx nctx;
1565 + struct dpaa2_qdma_priv *priv;
1568 +static struct soc_device_attribute soc_fixup_tuning[] = {
1569 + { .family = "QorIQ LX2160A"},
1573 +/* FD pool size: one FD + 3 Frame list + 2 source/destination descriptor */
1574 +#define FD_POOL_SIZE (sizeof(struct dpaa2_fd) + \
1575 + sizeof(struct dpaa2_fl_entry) * 3 + \
1576 + sizeof(struct dpaa2_qdma_sd_d) * 2)
1578 +#endif /* __DPAA2_QDMA_H */
1580 +++ b/drivers/dma/dpaa2-qdma/dpdmai.c
1582 +/* Copyright 2013-2015 Freescale Semiconductor Inc.
1584 + * Redistribution and use in source and binary forms, with or without
1585 + * modification, are permitted provided that the following conditions are met:
1586 + * * Redistributions of source code must retain the above copyright
1587 + * notice, this list of conditions and the following disclaimer.
1588 + * * Redistributions in binary form must reproduce the above copyright
1589 + * notice, this list of conditions and the following disclaimer in the
1590 + * documentation and/or other materials provided with the distribution.
1591 + * * Neither the name of the above-listed copyright holders nor the
1592 + * names of any contributors may be used to endorse or promote products
1593 + * derived from this software without specific prior written permission.
1596 + * ALTERNATIVELY, this software may be distributed under the terms of the
1597 + * GNU General Public License ("GPL") as published by the Free Software
1598 + * Foundation, either version 2 of that License or (at your option) any
1601 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
1602 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1603 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1604 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
1605 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
1606 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
1607 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
1608 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
1609 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
1610 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
1611 + * POSSIBILITY OF SUCH DAMAGE.
1613 +#include <linux/types.h>
1614 +#include <linux/io.h>
1615 +#include "fsl_dpdmai.h"
1616 +#include "fsl_dpdmai_cmd.h"
1617 +#include <linux/fsl/mc.h>
1619 +struct dpdmai_cmd_open {
1623 +struct dpdmai_rsp_get_attributes {
1625 + u8 num_of_priorities;
1632 +struct dpdmai_cmd_queue {
1645 +struct dpdmai_rsp_get_tx_queue {
1651 +int dpdmai_open(struct fsl_mc_io *mc_io,
1652 + uint32_t cmd_flags,
1656 + struct fsl_mc_command cmd = { 0 };
1657 + struct dpdmai_cmd_open *cmd_params;
1660 + /* prepare command */
1661 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_OPEN,
1665 + cmd_params = (struct dpdmai_cmd_open *)cmd.params;
1666 + cmd_params->dpdmai_id = cpu_to_le32(dpdmai_id);
1668 + /* send command to mc*/
1669 + err = mc_send_command(mc_io, &cmd);
1673 + /* retrieve response parameters */
1674 + *token = mc_cmd_hdr_read_token(&cmd);
1678 +int dpdmai_close(struct fsl_mc_io *mc_io,
1679 + uint32_t cmd_flags,
1682 + struct fsl_mc_command cmd = { 0 };
1684 + /* prepare command */
1685 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLOSE,
1686 + cmd_flags, token);
1688 + /* send command to mc*/
1689 + return mc_send_command(mc_io, &cmd);
1692 +int dpdmai_create(struct fsl_mc_io *mc_io,
1693 + uint32_t cmd_flags,
1694 + const struct dpdmai_cfg *cfg,
1697 + struct fsl_mc_command cmd = { 0 };
1700 + /* prepare command */
1701 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CREATE,
1704 + DPDMAI_CMD_CREATE(cmd, cfg);
1706 + /* send command to mc*/
1707 + err = mc_send_command(mc_io, &cmd);
1711 + /* retrieve response parameters */
1712 + *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
1717 +int dpdmai_destroy(struct fsl_mc_io *mc_io,
1718 + uint32_t cmd_flags,
1721 + struct fsl_mc_command cmd = { 0 };
1723 + /* prepare command */
1724 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DESTROY,
1728 + /* send command to mc*/
1729 + return mc_send_command(mc_io, &cmd);
1732 +int dpdmai_enable(struct fsl_mc_io *mc_io,
1733 + uint32_t cmd_flags,
1736 + struct fsl_mc_command cmd = { 0 };
1738 + /* prepare command */
1739 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_ENABLE,
1743 + /* send command to mc*/
1744 + return mc_send_command(mc_io, &cmd);
1747 +int dpdmai_disable(struct fsl_mc_io *mc_io,
1748 + uint32_t cmd_flags,
1751 + struct fsl_mc_command cmd = { 0 };
1753 + /* prepare command */
1754 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DISABLE,
1758 + /* send command to mc*/
1759 + return mc_send_command(mc_io, &cmd);
1762 +int dpdmai_is_enabled(struct fsl_mc_io *mc_io,
1763 + uint32_t cmd_flags,
1767 + struct fsl_mc_command cmd = { 0 };
1769 + /* prepare command */
1770 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_IS_ENABLED,
1774 + /* send command to mc*/
1775 + err = mc_send_command(mc_io, &cmd);
1779 + /* retrieve response parameters */
1780 + DPDMAI_RSP_IS_ENABLED(cmd, *en);
1785 +int dpdmai_reset(struct fsl_mc_io *mc_io,
1786 + uint32_t cmd_flags,
1789 + struct fsl_mc_command cmd = { 0 };
1791 + /* prepare command */
1792 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_RESET,
1796 + /* send command to mc*/
1797 + return mc_send_command(mc_io, &cmd);
1800 +int dpdmai_get_irq(struct fsl_mc_io *mc_io,
1801 + uint32_t cmd_flags,
1803 + uint8_t irq_index,
1805 + struct dpdmai_irq_cfg *irq_cfg)
1807 + struct fsl_mc_command cmd = { 0 };
1810 + /* prepare command */
1811 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ,
1814 + DPDMAI_CMD_GET_IRQ(cmd, irq_index);
1816 + /* send command to mc*/
1817 + err = mc_send_command(mc_io, &cmd);
1821 + /* retrieve response parameters */
1822 + DPDMAI_RSP_GET_IRQ(cmd, *type, irq_cfg);
1827 +int dpdmai_set_irq(struct fsl_mc_io *mc_io,
1828 + uint32_t cmd_flags,
1830 + uint8_t irq_index,
1831 + struct dpdmai_irq_cfg *irq_cfg)
1833 + struct fsl_mc_command cmd = { 0 };
1835 + /* prepare command */
1836 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ,
1839 + DPDMAI_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
1841 + /* send command to mc*/
1842 + return mc_send_command(mc_io, &cmd);
1845 +int dpdmai_get_irq_enable(struct fsl_mc_io *mc_io,
1846 + uint32_t cmd_flags,
1848 + uint8_t irq_index,
1851 + struct fsl_mc_command cmd = { 0 };
1854 + /* prepare command */
1855 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_ENABLE,
1858 + DPDMAI_CMD_GET_IRQ_ENABLE(cmd, irq_index);
1860 + /* send command to mc*/
1861 + err = mc_send_command(mc_io, &cmd);
1865 + /* retrieve response parameters */
1866 + DPDMAI_RSP_GET_IRQ_ENABLE(cmd, *en);
1871 +int dpdmai_set_irq_enable(struct fsl_mc_io *mc_io,
1872 + uint32_t cmd_flags,
1874 + uint8_t irq_index,
1877 + struct fsl_mc_command cmd = { 0 };
1879 + /* prepare command */
1880 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ_ENABLE,
1883 + DPDMAI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
1885 + /* send command to mc*/
1886 + return mc_send_command(mc_io, &cmd);
1889 +int dpdmai_get_irq_mask(struct fsl_mc_io *mc_io,
1890 + uint32_t cmd_flags,
1892 + uint8_t irq_index,
1895 + struct fsl_mc_command cmd = { 0 };
1898 + /* prepare command */
1899 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_MASK,
1902 + DPDMAI_CMD_GET_IRQ_MASK(cmd, irq_index);
1904 + /* send command to mc*/
1905 + err = mc_send_command(mc_io, &cmd);
1909 + /* retrieve response parameters */
1910 + DPDMAI_RSP_GET_IRQ_MASK(cmd, *mask);
1915 +int dpdmai_set_irq_mask(struct fsl_mc_io *mc_io,
1916 + uint32_t cmd_flags,
1918 + uint8_t irq_index,
1921 + struct fsl_mc_command cmd = { 0 };
1923 + /* prepare command */
1924 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ_MASK,
1927 + DPDMAI_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
1929 + /* send command to mc*/
1930 + return mc_send_command(mc_io, &cmd);
1933 +int dpdmai_get_irq_status(struct fsl_mc_io *mc_io,
1934 + uint32_t cmd_flags,
1936 + uint8_t irq_index,
1939 + struct fsl_mc_command cmd = { 0 };
1942 + /* prepare command */
1943 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_STATUS,
1946 + DPDMAI_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
1948 + /* send command to mc*/
1949 + err = mc_send_command(mc_io, &cmd);
1953 + /* retrieve response parameters */
1954 + DPDMAI_RSP_GET_IRQ_STATUS(cmd, *status);
1959 +int dpdmai_clear_irq_status(struct fsl_mc_io *mc_io,
1960 + uint32_t cmd_flags,
1962 + uint8_t irq_index,
1965 + struct fsl_mc_command cmd = { 0 };
1967 + /* prepare command */
1968 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLEAR_IRQ_STATUS,
1971 + DPDMAI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
1973 + /* send command to mc*/
1974 + return mc_send_command(mc_io, &cmd);
1977 +int dpdmai_get_attributes(struct fsl_mc_io *mc_io,
1978 + uint32_t cmd_flags,
1980 + struct dpdmai_attr *attr)
1982 + struct fsl_mc_command cmd = { 0 };
1984 + struct dpdmai_rsp_get_attributes *rsp_params;
1986 + /* prepare command */
1987 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_ATTR,
1991 + /* send command to mc*/
1992 + err = mc_send_command(mc_io, &cmd);
1996 + /* retrieve response parameters */
1997 + rsp_params = (struct dpdmai_rsp_get_attributes *)cmd.params;
1998 + attr->id = le32_to_cpu(rsp_params->id);
1999 + attr->version.major = le16_to_cpu(rsp_params->major);
2000 + attr->version.minor = le16_to_cpu(rsp_params->minor);
2001 + attr->num_of_priorities = rsp_params->num_of_priorities;
2007 +int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io,
2008 + uint32_t cmd_flags,
2011 + const struct dpdmai_rx_queue_cfg *cfg)
2013 + struct fsl_mc_command cmd = { 0 };
2014 + struct dpdmai_cmd_queue *cmd_params;
2016 + /* prepare command */
2017 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_RX_QUEUE,
2021 + cmd_params = (struct dpdmai_cmd_queue *)cmd.params;
2022 + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
2023 + cmd_params->priority = cfg->dest_cfg.priority;
2024 + cmd_params->queue = priority;
2025 + cmd_params->dest_type = cfg->dest_cfg.dest_type;
2026 + cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
2027 + cmd_params->options = cpu_to_le32(cfg->options);
2030 + /* send command to mc*/
2031 + return mc_send_command(mc_io, &cmd);
2034 +int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io,
2035 + uint32_t cmd_flags,
2037 + uint8_t priority, struct dpdmai_rx_queue_attr *attr)
2039 + struct fsl_mc_command cmd = { 0 };
2040 + struct dpdmai_cmd_queue *cmd_params;
2043 + /* prepare command */
2044 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_RX_QUEUE,
2048 + cmd_params = (struct dpdmai_cmd_queue *)cmd.params;
2049 + cmd_params->queue = priority;
2051 + /* send command to mc*/
2052 + err = mc_send_command(mc_io, &cmd);
2056 + /* retrieve response parameters */
2057 + attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id);
2058 + attr->dest_cfg.priority = cmd_params->priority;
2059 + attr->dest_cfg.dest_type = cmd_params->dest_type;
2060 + attr->user_ctx = le64_to_cpu(cmd_params->user_ctx);
2061 + attr->fqid = le32_to_cpu(cmd_params->fqid);
2066 +int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io,
2067 + uint32_t cmd_flags,
2070 + struct dpdmai_tx_queue_attr *attr)
2072 + struct fsl_mc_command cmd = { 0 };
2073 + struct dpdmai_cmd_queue *cmd_params;
2074 + struct dpdmai_rsp_get_tx_queue *rsp_params;
2077 + /* prepare command */
2078 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_TX_QUEUE,
2082 + cmd_params = (struct dpdmai_cmd_queue *)cmd.params;
2083 + cmd_params->queue = priority;
2085 + /* send command to mc*/
2086 + err = mc_send_command(mc_io, &cmd);
2090 + /* retrieve response parameters */
2092 + rsp_params = (struct dpdmai_rsp_get_tx_queue *)cmd.params;
2093 + attr->fqid = le32_to_cpu(rsp_params->fqid);
2098 +++ b/drivers/dma/dpaa2-qdma/fsl_dpdmai.h
2100 +/* Copyright 2013-2015 Freescale Semiconductor Inc.
2102 + * Redistribution and use in source and binary forms, with or without
2103 + * modification, are permitted provided that the following conditions are met:
2104 + * * Redistributions of source code must retain the above copyright
2105 + * notice, this list of conditions and the following disclaimer.
2106 + * * Redistributions in binary form must reproduce the above copyright
2107 + * notice, this list of conditions and the following disclaimer in the
2108 + * documentation and/or other materials provided with the distribution.
2109 + * * Neither the name of the above-listed copyright holders nor the
2110 + * names of any contributors may be used to endorse or promote products
2111 + * derived from this software without specific prior written permission.
2114 + * ALTERNATIVELY, this software may be distributed under the terms of the
2115 + * GNU General Public License ("GPL") as published by the Free Software
2116 + * Foundation, either version 2 of that License or (at your option) any
2119 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
2120 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2121 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2122 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
2123 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2124 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
2125 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
2126 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
2127 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
2128 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
2129 + * POSSIBILITY OF SUCH DAMAGE.
2131 +#ifndef __FSL_DPDMAI_H
2132 +#define __FSL_DPDMAI_H
2136 +/* Data Path DMA Interface API
2137 + * Contains initialization APIs and runtime control APIs for DPDMAI
2140 +/* General DPDMAI macros */
2143 + * Maximum number of Tx/Rx priorities per DPDMAI object
2145 +#define DPDMAI_PRIO_NUM 2
2148 + * All queues considered; see dpdmai_set_rx_queue()
2150 +#define DPDMAI_ALL_QUEUES (uint8_t)(-1)
2153 + * dpdmai_open() - Open a control session for the specified object
2154 + * @mc_io: Pointer to MC portal's I/O object
2155 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2156 + * @dpdmai_id: DPDMAI unique ID
2157 + * @token: Returned token; use in subsequent API calls
2159 + * This function can be used to open a control session for an
2160 + * already created object; an object may have been declared in
2161 + * the DPL or by calling the dpdmai_create() function.
2162 + * This function returns a unique authentication token,
2163 + * associated with the specific object ID and the specific MC
2164 + * portal; this token must be used in all subsequent commands for
2165 + * this specific object.
2167 + * Return: '0' on Success; Error code otherwise.
2169 +int dpdmai_open(struct fsl_mc_io *mc_io,
2170 + uint32_t cmd_flags,
2175 + * dpdmai_close() - Close the control session of the object
2176 + * @mc_io: Pointer to MC portal's I/O object
2177 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2178 + * @token: Token of DPDMAI object
2180 + * After this function is called, no further operations are
2181 + * allowed on the object without opening a new control session.
2183 + * Return: '0' on Success; Error code otherwise.
2185 +int dpdmai_close(struct fsl_mc_io *mc_io,
2186 + uint32_t cmd_flags,
2190 + * struct dpdmai_cfg - Structure representing DPDMAI configuration
2191 + * @priorities: Priorities for the DMA hardware processing; valid priorities are
2192 + * configured with values 1-8; the entry following last valid entry
2193 + * should be configured with 0
2195 +struct dpdmai_cfg {
2196 + uint8_t priorities[DPDMAI_PRIO_NUM];
2200 + * dpdmai_create() - Create the DPDMAI object
2201 + * @mc_io: Pointer to MC portal's I/O object
2202 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2203 + * @cfg: Configuration structure
2204 + * @token: Returned token; use in subsequent API calls
2206 + * Create the DPDMAI object, allocate required resources and
2207 + * perform required initialization.
2209 + * The object can be created either by declaring it in the
2210 + * DPL file, or by calling this function.
2212 + * This function returns a unique authentication token,
2213 + * associated with the specific object ID and the specific MC
2214 + * portal; this token must be used in all subsequent calls to
2215 + * this specific object. For objects that are created using the
2216 + * DPL file, call dpdmai_open() function to get an authentication
2219 + * Return: '0' on Success; Error code otherwise.
2221 +int dpdmai_create(struct fsl_mc_io *mc_io,
2222 + uint32_t cmd_flags,
2223 + const struct dpdmai_cfg *cfg,
2227 + * dpdmai_destroy() - Destroy the DPDMAI object and release all its resources.
2228 + * @mc_io: Pointer to MC portal's I/O object
2229 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2230 + * @token: Token of DPDMAI object
2232 + * Return: '0' on Success; error code otherwise.
2234 +int dpdmai_destroy(struct fsl_mc_io *mc_io,
2235 + uint32_t cmd_flags,
2239 + * dpdmai_enable() - Enable the DPDMAI, allow sending and receiving frames.
2240 + * @mc_io: Pointer to MC portal's I/O object
2241 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2242 + * @token: Token of DPDMAI object
2244 + * Return: '0' on Success; Error code otherwise.
2246 +int dpdmai_enable(struct fsl_mc_io *mc_io,
2247 + uint32_t cmd_flags,
2251 + * dpdmai_disable() - Disable the DPDMAI, stop sending and receiving frames.
2252 + * @mc_io: Pointer to MC portal's I/O object
2253 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2254 + * @token: Token of DPDMAI object
2256 + * Return: '0' on Success; Error code otherwise.
2258 +int dpdmai_disable(struct fsl_mc_io *mc_io,
2259 + uint32_t cmd_flags,
2263 + * dpdmai_is_enabled() - Check if the DPDMAI is enabled.
2264 + * @mc_io: Pointer to MC portal's I/O object
2265 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2266 + * @token: Token of DPDMAI object
2267 + * @en: Returns '1' if object is enabled; '0' otherwise
2269 + * Return: '0' on Success; Error code otherwise.
2271 +int dpdmai_is_enabled(struct fsl_mc_io *mc_io,
2272 + uint32_t cmd_flags,
2277 + * dpdmai_reset() - Reset the DPDMAI, returns the object to initial state.
2278 + * @mc_io: Pointer to MC portal's I/O object
2279 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2280 + * @token: Token of DPDMAI object
2282 + * Return: '0' on Success; Error code otherwise.
2284 +int dpdmai_reset(struct fsl_mc_io *mc_io,
2285 + uint32_t cmd_flags,
2289 + * struct dpdmai_irq_cfg - IRQ configuration
2290 + * @addr: Address that must be written to signal a message-based interrupt
2291 + * @val: Value to write into irq_addr address
2292 + * @irq_num: A user defined number associated with this IRQ
2294 +struct dpdmai_irq_cfg {
2301 + * dpdmai_set_irq() - Set IRQ information for the DPDMAI to trigger an interrupt.
2302 + * @mc_io: Pointer to MC portal's I/O object
2303 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2304 + * @token: Token of DPDMAI object
2305 + * @irq_index: Identifies the interrupt index to configure
2306 + * @irq_cfg: IRQ configuration
2308 + * Return: '0' on Success; Error code otherwise.
2310 +int dpdmai_set_irq(struct fsl_mc_io *mc_io,
2311 + uint32_t cmd_flags,
2313 + uint8_t irq_index,
2314 + struct dpdmai_irq_cfg *irq_cfg);
2317 + * dpdmai_get_irq() - Get IRQ information from the DPDMAI
2319 + * @mc_io: Pointer to MC portal's I/O object
2320 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2321 + * @token: Token of DPDMAI object
2322 + * @irq_index: The interrupt index to configure
2323 + * @type: Interrupt type: 0 represents message interrupt
2324 + * type (both irq_addr and irq_val are valid)
2325 + * @irq_cfg: IRQ attributes
2327 + * Return: '0' on Success; Error code otherwise.
2329 +int dpdmai_get_irq(struct fsl_mc_io *mc_io,
2330 + uint32_t cmd_flags,
2332 + uint8_t irq_index,
2334 + struct dpdmai_irq_cfg *irq_cfg);
2337 + * dpdmai_set_irq_enable() - Set overall interrupt state.
2338 + * @mc_io: Pointer to MC portal's I/O object
2339 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2340 + * @token: Token of DPDMAI object
2341 + * @irq_index: The interrupt index to configure
2342 + * @en: Interrupt state - enable = 1, disable = 0
2344 + * Allows GPP software to control when interrupts are generated.
2345 + * Each interrupt can have up to 32 causes. The enable/disable control's the
2346 + * overall interrupt state. if the interrupt is disabled no causes will cause
2349 + * Return: '0' on Success; Error code otherwise.
2351 +int dpdmai_set_irq_enable(struct fsl_mc_io *mc_io,
2352 + uint32_t cmd_flags,
2354 + uint8_t irq_index,
2358 + * dpdmai_get_irq_enable() - Get overall interrupt state
2359 + * @mc_io: Pointer to MC portal's I/O object
2360 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2361 + * @token: Token of DPDMAI object
2362 + * @irq_index: The interrupt index to configure
2363 + * @en: Returned Interrupt state - enable = 1, disable = 0
2365 + * Return: '0' on Success; Error code otherwise.
2367 +int dpdmai_get_irq_enable(struct fsl_mc_io *mc_io,
2368 + uint32_t cmd_flags,
2370 + uint8_t irq_index,
2374 + * dpdmai_set_irq_mask() - Set interrupt mask.
2375 + * @mc_io: Pointer to MC portal's I/O object
2376 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2377 + * @token: Token of DPDMAI object
2378 + * @irq_index: The interrupt index to configure
2379 + * @mask: event mask to trigger interrupt;
2381 + * 0 = ignore event
2382 + * 1 = consider event for asserting IRQ
2384 + * Every interrupt can have up to 32 causes and the interrupt model supports
2385 + * masking/unmasking each cause independently
2387 + * Return: '0' on Success; Error code otherwise.
2389 +int dpdmai_set_irq_mask(struct fsl_mc_io *mc_io,
2390 + uint32_t cmd_flags,
2392 + uint8_t irq_index,
2396 + * dpdmai_get_irq_mask() - Get interrupt mask.
2397 + * @mc_io: Pointer to MC portal's I/O object
2398 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2399 + * @token: Token of DPDMAI object
2400 + * @irq_index: The interrupt index to configure
2401 + * @mask: Returned event mask to trigger interrupt
2403 + * Every interrupt can have up to 32 causes and the interrupt model supports
2404 + * masking/unmasking each cause independently
2406 + * Return: '0' on Success; Error code otherwise.
2408 +int dpdmai_get_irq_mask(struct fsl_mc_io *mc_io,
2409 + uint32_t cmd_flags,
2411 + uint8_t irq_index,
2415 + * dpdmai_get_irq_status() - Get the current status of any pending interrupts
2416 + * @mc_io: Pointer to MC portal's I/O object
2417 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2418 + * @token: Token of DPDMAI object
2419 + * @irq_index: The interrupt index to configure
2420 + * @status: Returned interrupts status - one bit per cause:
2421 + * 0 = no interrupt pending
2422 + * 1 = interrupt pending
2424 + * Return: '0' on Success; Error code otherwise.
2426 +int dpdmai_get_irq_status(struct fsl_mc_io *mc_io,
2427 + uint32_t cmd_flags,
2429 + uint8_t irq_index,
2430 + uint32_t *status);
2433 + * dpdmai_clear_irq_status() - Clear a pending interrupt's status
2434 + * @mc_io: Pointer to MC portal's I/O object
2435 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2436 + * @token: Token of DPDMAI object
2437 + * @irq_index: The interrupt index to configure
2438 + * @status: bits to clear (W1C) - one bit per cause:
2439 + * 0 = don't change
2440 + * 1 = clear status bit
2442 + * Return: '0' on Success; Error code otherwise.
2444 +int dpdmai_clear_irq_status(struct fsl_mc_io *mc_io,
2445 + uint32_t cmd_flags,
2447 + uint8_t irq_index,
2451 + * struct dpdmai_attr - Structure representing DPDMAI attributes
2452 + * @id: DPDMAI object ID
2453 + * @version: DPDMAI version
2454 + * @num_of_priorities: number of priorities
2456 +struct dpdmai_attr {
2459 + * struct version - DPDMAI version
2460 + * @major: DPDMAI major version
2461 + * @minor: DPDMAI minor version
2467 + uint8_t num_of_priorities;
2471 + * dpdmai_get_attributes() - Retrieve DPDMAI attributes.
2472 + * @mc_io: Pointer to MC portal's I/O object
2473 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2474 + * @token: Token of DPDMAI object
2475 + * @attr: Returned object's attributes
2477 + * Return: '0' on Success; Error code otherwise.
2479 +int dpdmai_get_attributes(struct fsl_mc_io *mc_io,
2480 + uint32_t cmd_flags,
2482 + struct dpdmai_attr *attr);
2485 + * enum dpdmai_dest - DPDMAI destination types
2486 + * @DPDMAI_DEST_NONE: Unassigned destination; The queue is set in parked mode
2487 + * and does not generate FQDAN notifications; user is expected to dequeue
2488 + * from the queue based on polling or other user-defined method
2489 + * @DPDMAI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
2490 + * notifications to the specified DPIO; user is expected to dequeue
2491 + * from the queue only after notification is received
2492 + * @DPDMAI_DEST_DPCON: The queue is set in schedule mode and does not generate
2493 + * FQDAN notifications, but is connected to the specified DPCON object;
2494 + * user is expected to dequeue from the DPCON channel
2497 + DPDMAI_DEST_NONE = 0,
2498 + DPDMAI_DEST_DPIO = 1,
2499 + DPDMAI_DEST_DPCON = 2
2503 + * struct dpdmai_dest_cfg - Structure representing DPDMAI destination parameters
2504 + * @dest_type: Destination type
2505 + * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
2506 + * @priority: Priority selection within the DPIO or DPCON channel; valid values
2507 + * are 0-1 or 0-7, depending on the number of priorities in that
2508 + * channel; not relevant for 'DPDMAI_DEST_NONE' option
2510 +struct dpdmai_dest_cfg {
2511 + enum dpdmai_dest dest_type;
2516 +/* DPDMAI queue modification options */
2519 + * Select to modify the user's context associated with the queue
2521 +#define DPDMAI_QUEUE_OPT_USER_CTX 0x00000001
2524 + * Select to modify the queue's destination
2526 +#define DPDMAI_QUEUE_OPT_DEST 0x00000002
2529 + * struct dpdmai_rx_queue_cfg - DPDMAI RX queue configuration
2530 + * @options: Flags representing the suggested modifications to the queue;
2531 + * Use any combination of 'DPDMAI_QUEUE_OPT_<X>' flags
2532 + * @user_ctx: User context value provided in the frame descriptor of each
2534 + * valid only if 'DPDMAI_QUEUE_OPT_USER_CTX' is contained in 'options'
2535 + * @dest_cfg: Queue destination parameters;
2536 + * valid only if 'DPDMAI_QUEUE_OPT_DEST' is contained in 'options'
2538 +struct dpdmai_rx_queue_cfg {
2540 + uint64_t user_ctx;
2541 + struct dpdmai_dest_cfg dest_cfg;
2546 + * dpdmai_set_rx_queue() - Set Rx queue configuration
2547 + * @mc_io: Pointer to MC portal's I/O object
2548 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2549 + * @token: Token of DPDMAI object
2550 + * @priority: Select the queue relative to number of
2551 + * priorities configured at DPDMAI creation; use
2552 + * DPDMAI_ALL_QUEUES to configure all Rx queues
2554 + * @cfg: Rx queue configuration
2556 + * Return: '0' on Success; Error code otherwise.
2558 +int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io,
2559 + uint32_t cmd_flags,
2562 + const struct dpdmai_rx_queue_cfg *cfg);
2565 + * struct dpdmai_rx_queue_attr - Structure representing attributes of Rx queues
2566 + * @user_ctx: User context value provided in the frame descriptor of each
2568 + * @dest_cfg: Queue destination configuration
2569 + * @fqid: Virtual FQID value to be used for dequeue operations
2571 +struct dpdmai_rx_queue_attr {
2572 + uint64_t user_ctx;
2573 + struct dpdmai_dest_cfg dest_cfg;
2578 + * dpdmai_get_rx_queue() - Retrieve Rx queue attributes.
2579 + * @mc_io: Pointer to MC portal's I/O object
2580 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2581 + * @token: Token of DPDMAI object
2582 + * @priority: Select the queue relative to number of
2583 + * priorities configured at DPDMAI creation
2584 + * @attr: Returned Rx queue attributes
2586 + * Return: '0' on Success; Error code otherwise.
2588 +int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io,
2589 + uint32_t cmd_flags,
2592 + struct dpdmai_rx_queue_attr *attr);
2595 + * struct dpdmai_tx_queue_attr - Structure representing attributes of Tx queues
2596 + * @fqid: Virtual FQID to be used for sending frames to DMA hardware
2599 +struct dpdmai_tx_queue_attr {
2604 + * dpdmai_get_tx_queue() - Retrieve Tx queue attributes.
2605 + * @mc_io: Pointer to MC portal's I/O object
2606 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2607 + * @token: Token of DPDMAI object
2608 + * @priority: Select the queue relative to number of
2609 + * priorities configured at DPDMAI creation
2610 + * @attr: Returned Tx queue attributes
2612 + * Return: '0' on Success; Error code otherwise.
2614 +int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io,
2615 + uint32_t cmd_flags,
2618 + struct dpdmai_tx_queue_attr *attr);
2620 +#endif /* __FSL_DPDMAI_H */
2622 +++ b/drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h
2624 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
2626 + * Redistribution and use in source and binary forms, with or without
2627 + * modification, are permitted provided that the following conditions are met:
2628 + * * Redistributions of source code must retain the above copyright
2629 + * notice, this list of conditions and the following disclaimer.
2630 + * * Redistributions in binary form must reproduce the above copyright
2631 + * notice, this list of conditions and the following disclaimer in the
2632 + * documentation and/or other materials provided with the distribution.
2633 + * * Neither the name of the above-listed copyright holders nor the
2634 + * names of any contributors may be used to endorse or promote products
2635 + * derived from this software without specific prior written permission.
2638 + * ALTERNATIVELY, this software may be distributed under the terms of the
2639 + * GNU General Public License ("GPL") as published by the Free Software
2640 + * Foundation, either version 2 of that License or (at your option) any
2643 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
2644 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2645 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2646 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
2647 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2648 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
2649 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
2650 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
2651 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
2652 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
2653 + * POSSIBILITY OF SUCH DAMAGE.
2655 +#ifndef _FSL_DPDMAI_CMD_H
2656 +#define _FSL_DPDMAI_CMD_H
2658 +/* DPDMAI Version */
2659 +#define DPDMAI_VER_MAJOR 2
2660 +#define DPDMAI_VER_MINOR 2
2662 +#define DPDMAI_CMD_BASE_VERSION 0
2663 +#define DPDMAI_CMD_ID_OFFSET 4
2666 +#define DPDMAI_CMDID_CLOSE ((0x800 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2667 +#define DPDMAI_CMDID_OPEN ((0x80E << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2668 +#define DPDMAI_CMDID_CREATE ((0x90E << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2669 +#define DPDMAI_CMDID_DESTROY ((0x900 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2671 +#define DPDMAI_CMDID_ENABLE ((0x002 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2672 +#define DPDMAI_CMDID_DISABLE ((0x003 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2673 +#define DPDMAI_CMDID_GET_ATTR ((0x004 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2674 +#define DPDMAI_CMDID_RESET ((0x005 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2675 +#define DPDMAI_CMDID_IS_ENABLED ((0x006 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2677 +#define DPDMAI_CMDID_SET_IRQ ((0x010 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2678 +#define DPDMAI_CMDID_GET_IRQ ((0x011 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2679 +#define DPDMAI_CMDID_SET_IRQ_ENABLE ((0x012 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2680 +#define DPDMAI_CMDID_GET_IRQ_ENABLE ((0x013 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2681 +#define DPDMAI_CMDID_SET_IRQ_MASK ((0x014 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2682 +#define DPDMAI_CMDID_GET_IRQ_MASK ((0x015 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2683 +#define DPDMAI_CMDID_GET_IRQ_STATUS ((0x016 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2684 +#define DPDMAI_CMDID_CLEAR_IRQ_STATUS ((0x017 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2686 +#define DPDMAI_CMDID_SET_RX_QUEUE ((0x1A0 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2687 +#define DPDMAI_CMDID_GET_RX_QUEUE ((0x1A1 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2688 +#define DPDMAI_CMDID_GET_TX_QUEUE ((0x1A2 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2691 +#define MC_CMD_HDR_TOKEN_O 32 /* Token field offset */
2692 +#define MC_CMD_HDR_TOKEN_S 16 /* Token field size */
2695 +#define MAKE_UMASK64(_width) \
2696 + ((uint64_t)((_width) < 64 ? ((uint64_t)1 << (_width)) - 1 : \
2699 +static inline uint64_t mc_enc(int lsoffset, int width, uint64_t val)
2701 + return (uint64_t)(((uint64_t)val & MAKE_UMASK64(width)) << lsoffset);
2704 +static inline uint64_t mc_dec(uint64_t val, int lsoffset, int width)
2706 + return (uint64_t)((val >> lsoffset) & MAKE_UMASK64(width));
2709 +#define MC_CMD_OP(_cmd, _param, _offset, _width, _type, _arg) \
2710 + ((_cmd).params[_param] |= mc_enc((_offset), (_width), _arg))
2712 +#define MC_RSP_OP(_cmd, _param, _offset, _width, _type, _arg) \
2713 + (_arg = (_type)mc_dec(_cmd.params[_param], (_offset), (_width)))
2715 +#define MC_CMD_HDR_READ_TOKEN(_hdr) \
2716 + ((uint16_t)mc_dec((_hdr), MC_CMD_HDR_TOKEN_O, MC_CMD_HDR_TOKEN_S))
2718 +/* cmd, param, offset, width, type, arg_name */
2719 +#define DPDMAI_CMD_OPEN(cmd, dpdmai_id) \
2720 + MC_CMD_OP(cmd, 0, 0, 32, int, dpdmai_id)
2722 +/* cmd, param, offset, width, type, arg_name */
2723 +#define DPDMAI_CMD_CREATE(cmd, cfg) \
2725 + MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->priorities[0]);\
2726 + MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->priorities[1]);\
2729 +/* cmd, param, offset, width, type, arg_name */
2730 +#define DPDMAI_RSP_IS_ENABLED(cmd, en) \
2731 + MC_RSP_OP(cmd, 0, 0, 1, int, en)
2733 +/* cmd, param, offset, width, type, arg_name */
2734 +#define DPDMAI_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
2736 + MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
2737 + MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
2738 + MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
2739 + MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
2742 +/* cmd, param, offset, width, type, arg_name */
2743 +#define DPDMAI_CMD_GET_IRQ(cmd, irq_index) \
2744 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
2746 +/* cmd, param, offset, width, type, arg_name */
2747 +#define DPDMAI_RSP_GET_IRQ(cmd, type, irq_cfg) \
2749 + MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
2750 + MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
2751 + MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
2752 + MC_RSP_OP(cmd, 2, 32, 32, int, type); \
2755 +/* cmd, param, offset, width, type, arg_name */
2756 +#define DPDMAI_CMD_SET_IRQ_ENABLE(cmd, irq_index, enable_state) \
2758 + MC_CMD_OP(cmd, 0, 0, 8, uint8_t, enable_state); \
2759 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
2762 +/* cmd, param, offset, width, type, arg_name */
2763 +#define DPDMAI_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
2764 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
2766 +/* cmd, param, offset, width, type, arg_name */
2767 +#define DPDMAI_RSP_GET_IRQ_ENABLE(cmd, enable_state) \
2768 + MC_RSP_OP(cmd, 0, 0, 8, uint8_t, enable_state)
2770 +/* cmd, param, offset, width, type, arg_name */
2771 +#define DPDMAI_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
2773 + MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \
2774 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
2777 +/* cmd, param, offset, width, type, arg_name */
2778 +#define DPDMAI_CMD_GET_IRQ_MASK(cmd, irq_index) \
2779 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
2781 +/* cmd, param, offset, width, type, arg_name */
2782 +#define DPDMAI_RSP_GET_IRQ_MASK(cmd, mask) \
2783 + MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
2785 +/* cmd, param, offset, width, type, arg_name */
2786 +#define DPDMAI_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
2788 + MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
2789 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
2792 +/* cmd, param, offset, width, type, arg_name */
2793 +#define DPDMAI_RSP_GET_IRQ_STATUS(cmd, status) \
2794 + MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
2796 +/* cmd, param, offset, width, type, arg_name */
2797 +#define DPDMAI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
2799 + MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
2800 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
2803 +/* cmd, param, offset, width, type, arg_name */
2804 +#define DPDMAI_RSP_GET_ATTR(cmd, attr) \
2806 + MC_RSP_OP(cmd, 0, 0, 32, int, attr->id); \
2807 + MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->num_of_priorities); \
2808 + MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\
2809 + MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\
2812 +/* cmd, param, offset, width, type, arg_name */
2813 +#define DPDMAI_CMD_SET_RX_QUEUE(cmd, priority, cfg) \
2815 + MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \
2816 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority); \
2817 + MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority); \
2818 + MC_CMD_OP(cmd, 0, 48, 4, enum dpdmai_dest, cfg->dest_cfg.dest_type); \
2819 + MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \
2820 + MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\
2823 +/* cmd, param, offset, width, type, arg_name */
2824 +#define DPDMAI_CMD_GET_RX_QUEUE(cmd, priority) \
2825 + MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority)
2827 +/* cmd, param, offset, width, type, arg_name */
2828 +#define DPDMAI_RSP_GET_RX_QUEUE(cmd, attr) \
2830 + MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id);\
2831 + MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\
2832 + MC_RSP_OP(cmd, 0, 48, 4, enum dpdmai_dest, attr->dest_cfg.dest_type);\
2833 + MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->user_ctx);\
2834 + MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->fqid);\
2837 +/* cmd, param, offset, width, type, arg_name */
2838 +#define DPDMAI_CMD_GET_TX_QUEUE(cmd, priority) \
2839 + MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority)
2841 +/* cmd, param, offset, width, type, arg_name */
2842 +#define DPDMAI_RSP_GET_TX_QUEUE(cmd, attr) \
2843 + MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->fqid)
2845 +#endif /* _FSL_DPDMAI_CMD_H */
2846 --- a/drivers/dma/fsl-edma.c
2847 +++ b/drivers/dma/fsl-edma.c
2848 @@ -146,6 +146,8 @@ struct fsl_edma_slave_config {
2852 + dma_addr_t dma_dev_addr;
2853 + enum dma_data_direction dma_dir;
2856 struct fsl_edma_chan {
2857 @@ -342,6 +344,53 @@ static int fsl_edma_resume(struct dma_ch
2861 +static void fsl_edma_unprep_slave_dma(struct fsl_edma_chan *fsl_chan)
2863 + if (fsl_chan->fsc.dma_dir != DMA_NONE)
2864 + dma_unmap_resource(fsl_chan->vchan.chan.device->dev,
2865 + fsl_chan->fsc.dma_dev_addr,
2866 + fsl_chan->fsc.burst, fsl_chan->fsc.dma_dir, 0);
2867 + fsl_chan->fsc.dma_dir = DMA_NONE;
2870 +static bool fsl_edma_prep_slave_dma(struct fsl_edma_chan *fsl_chan,
2871 + enum dma_transfer_direction dir)
2873 + struct device *dev = fsl_chan->vchan.chan.device->dev;
2874 + enum dma_data_direction dma_dir;
2877 + case DMA_MEM_TO_DEV:
2878 + dma_dir = DMA_FROM_DEVICE;
2880 + case DMA_DEV_TO_MEM:
2881 + dma_dir = DMA_TO_DEVICE;
2883 + case DMA_DEV_TO_DEV:
2884 + dma_dir = DMA_BIDIRECTIONAL;
2887 + dma_dir = DMA_NONE;
2891 + /* Already mapped for this config? */
2892 + if (fsl_chan->fsc.dma_dir == dma_dir)
2895 + fsl_edma_unprep_slave_dma(fsl_chan);
2896 + fsl_chan->fsc.dma_dev_addr = dma_map_resource(dev,
2897 + fsl_chan->fsc.dev_addr,
2898 + fsl_chan->fsc.burst,
2900 + if (dma_mapping_error(dev, fsl_chan->fsc.dma_dev_addr))
2903 + fsl_chan->fsc.dma_dir = dma_dir;
2908 static int fsl_edma_slave_config(struct dma_chan *chan,
2909 struct dma_slave_config *cfg)
2911 @@ -361,6 +410,7 @@ static int fsl_edma_slave_config(struct
2915 + fsl_edma_unprep_slave_dma(fsl_chan);
2919 @@ -553,6 +603,9 @@ static struct dma_async_tx_descriptor *f
2920 if (!is_slave_direction(fsl_chan->fsc.dir))
2923 + if (!fsl_edma_prep_slave_dma(fsl_chan, fsl_chan->fsc.dir))
2926 sg_len = buf_len / period_len;
2927 fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
2929 @@ -572,11 +625,11 @@ static struct dma_async_tx_descriptor *f
2931 if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) {
2932 src_addr = dma_buf_next;
2933 - dst_addr = fsl_chan->fsc.dev_addr;
2934 + dst_addr = fsl_chan->fsc.dma_dev_addr;
2935 soff = fsl_chan->fsc.addr_width;
2938 - src_addr = fsl_chan->fsc.dev_addr;
2939 + src_addr = fsl_chan->fsc.dma_dev_addr;
2940 dst_addr = dma_buf_next;
2942 doff = fsl_chan->fsc.addr_width;
2943 @@ -606,6 +659,9 @@ static struct dma_async_tx_descriptor *f
2944 if (!is_slave_direction(fsl_chan->fsc.dir))
2947 + if (!fsl_edma_prep_slave_dma(fsl_chan, fsl_chan->fsc.dir))
2950 fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
2953 @@ -618,11 +674,11 @@ static struct dma_async_tx_descriptor *f
2955 if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) {
2956 src_addr = sg_dma_address(sg);
2957 - dst_addr = fsl_chan->fsc.dev_addr;
2958 + dst_addr = fsl_chan->fsc.dma_dev_addr;
2959 soff = fsl_chan->fsc.addr_width;
2962 - src_addr = fsl_chan->fsc.dev_addr;
2963 + src_addr = fsl_chan->fsc.dma_dev_addr;
2964 dst_addr = sg_dma_address(sg);
2966 doff = fsl_chan->fsc.addr_width;
2967 @@ -809,6 +865,7 @@ static void fsl_edma_free_chan_resources
2968 fsl_edma_chan_mux(fsl_chan, 0, false);
2969 fsl_chan->edesc = NULL;
2970 vchan_get_all_descriptors(&fsl_chan->vchan, &head);
2971 + fsl_edma_unprep_slave_dma(fsl_chan);
2972 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
2974 vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
2975 @@ -944,6 +1001,7 @@ static int fsl_edma_probe(struct platfor
2976 fsl_chan->slave_id = 0;
2977 fsl_chan->idle = true;
2978 fsl_chan->vchan.desc_free = fsl_edma_free_desc;
2979 + fsl_chan->fsc.dma_dir = DMA_NONE;
2980 vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev);
2982 edma_writew(fsl_edma, 0x0, fsl_edma->membase + EDMA_TCD_CSR(i));
2984 +++ b/drivers/dma/fsl-qdma.c
2987 + * Driver for NXP Layerscape Queue direct memory access controller (qDMA)
2989 + * Copyright 2017 NXP
2992 + * Jiaheng Fan <jiaheng.fan@nxp.com>
2993 + * Wen He <wen.he_1@nxp.com>
2995 + * SPDX-License-Identifier: GPL-2.0+
2998 +#include <linux/interrupt.h>
2999 +#include <linux/module.h>
3000 +#include <linux/delay.h>
3001 +#include <linux/of_irq.h>
3002 +#include <linux/of_address.h>
3003 +#include <linux/of_platform.h>
3004 +#include <linux/of_dma.h>
3005 +#include <linux/dma-mapping.h>
3006 +#include <linux/dmapool.h>
3007 +#include <linux/dmaengine.h>
3008 +#include <linux/slab.h>
3009 +#include <linux/spinlock.h>
3011 +#include "virt-dma.h"
3013 +#define FSL_QDMA_DMR 0x0
3014 +#define FSL_QDMA_DSR 0x4
3015 +#define FSL_QDMA_DEIER 0xe00
3016 +#define FSL_QDMA_DEDR 0xe04
3017 +#define FSL_QDMA_DECFDW0R 0xe10
3018 +#define FSL_QDMA_DECFDW1R 0xe14
3019 +#define FSL_QDMA_DECFDW2R 0xe18
3020 +#define FSL_QDMA_DECFDW3R 0xe1c
3021 +#define FSL_QDMA_DECFQIDR 0xe30
3022 +#define FSL_QDMA_DECBR 0xe34
3024 +#define FSL_QDMA_BCQMR(x) (0xc0 + 0x100 * (x))
3025 +#define FSL_QDMA_BCQSR(x) (0xc4 + 0x100 * (x))
3026 +#define FSL_QDMA_BCQEDPA_SADDR(x) (0xc8 + 0x100 * (x))
3027 +#define FSL_QDMA_BCQDPA_SADDR(x) (0xcc + 0x100 * (x))
3028 +#define FSL_QDMA_BCQEEPA_SADDR(x) (0xd0 + 0x100 * (x))
3029 +#define FSL_QDMA_BCQEPA_SADDR(x) (0xd4 + 0x100 * (x))
3030 +#define FSL_QDMA_BCQIER(x) (0xe0 + 0x100 * (x))
3031 +#define FSL_QDMA_BCQIDR(x) (0xe4 + 0x100 * (x))
3033 +#define FSL_QDMA_SQDPAR 0x80c
3034 +#define FSL_QDMA_SQEPAR 0x814
3035 +#define FSL_QDMA_BSQMR 0x800
3036 +#define FSL_QDMA_BSQSR 0x804
3037 +#define FSL_QDMA_BSQICR 0x828
3038 +#define FSL_QDMA_CQMR 0xa00
3039 +#define FSL_QDMA_CQDSCR1 0xa08
3040 +#define FSL_QDMA_CQDSCR2 0xa0c
3041 +#define FSL_QDMA_CQIER 0xa10
3042 +#define FSL_QDMA_CQEDR 0xa14
3043 +#define FSL_QDMA_SQCCMR 0xa20
3045 +#define FSL_QDMA_SQICR_ICEN
3047 +#define FSL_QDMA_CQIDR_CQT 0xff000000
3048 +#define FSL_QDMA_CQIDR_SQPE 0x800000
3049 +#define FSL_QDMA_CQIDR_SQT 0x8000
3051 +#define FSL_QDMA_BCQIER_CQTIE 0x8000
3052 +#define FSL_QDMA_BCQIER_CQPEIE 0x800000
3053 +#define FSL_QDMA_BSQICR_ICEN 0x80000000
3054 +#define FSL_QDMA_BSQICR_ICST(x) ((x) << 16)
3055 +#define FSL_QDMA_CQIER_MEIE 0x80000000
3056 +#define FSL_QDMA_CQIER_TEIE 0x1
3057 +#define FSL_QDMA_SQCCMR_ENTER_WM 0x200000
3059 +#define FSL_QDMA_QUEUE_MAX 8
3061 +#define FSL_QDMA_BCQMR_EN 0x80000000
3062 +#define FSL_QDMA_BCQMR_EI 0x40000000
3063 +#define FSL_QDMA_BCQMR_CD_THLD(x) ((x) << 20)
3064 +#define FSL_QDMA_BCQMR_CQ_SIZE(x) ((x) << 16)
3066 +#define FSL_QDMA_BCQSR_QF 0x10000
3067 +#define FSL_QDMA_BCQSR_XOFF 0x1
3069 +#define FSL_QDMA_BSQMR_EN 0x80000000
3070 +#define FSL_QDMA_BSQMR_DI 0x40000000
3071 +#define FSL_QDMA_BSQMR_CQ_SIZE(x) ((x) << 16)
3073 +#define FSL_QDMA_BSQSR_QE 0x20000
3075 +#define FSL_QDMA_DMR_DQD 0x40000000
3076 +#define FSL_QDMA_DSR_DB 0x80000000
3078 +#define FSL_QDMA_COMMAND_BUFFER_SIZE 64
3079 +#define FSL_QDMA_DESCRIPTOR_BUFFER_SIZE 32
3080 +#define FSL_QDMA_CIRCULAR_DESC_SIZE_MIN 64
3081 +#define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX 16384
3082 +#define FSL_QDMA_QUEUE_NUM_MAX 8
3084 +#define FSL_QDMA_CMD_RWTTYPE 0x4
3085 +#define FSL_QDMA_CMD_LWC 0x2
3087 +#define FSL_QDMA_CMD_RWTTYPE_OFFSET 28
3088 +#define FSL_QDMA_CMD_NS_OFFSET 27
3089 +#define FSL_QDMA_CMD_DQOS_OFFSET 24
3090 +#define FSL_QDMA_CMD_WTHROTL_OFFSET 20
3091 +#define FSL_QDMA_CMD_DSEN_OFFSET 19
3092 +#define FSL_QDMA_CMD_LWC_OFFSET 16
3094 +#define QDMA_CCDF_STATUS 20
3095 +#define QDMA_CCDF_OFFSET 20
3096 +#define QDMA_CCDF_MASK GENMASK(28, 20)
3097 +#define QDMA_CCDF_FOTMAT BIT(29)
3098 +#define QDMA_CCDF_SER BIT(30)
3100 +#define QDMA_SG_FIN BIT(30)
3101 +#define QDMA_SG_EXT BIT(31)
3102 +#define QDMA_SG_LEN_MASK GENMASK(29, 0)
3104 +#define QDMA_BIG_ENDIAN 0x00000001
3105 +#define COMP_TIMEOUT 1000
3106 +#define COMMAND_QUEUE_OVERFLLOW 10
3108 +#define QDMA_IN(fsl_qdma_engine, addr) \
3109 + (((fsl_qdma_engine)->big_endian & QDMA_BIG_ENDIAN) ? \
3110 + ioread32be(addr) : ioread32(addr))
3111 +#define QDMA_OUT(fsl_qdma_engine, addr, val) \
3112 + (((fsl_qdma_engine)->big_endian & QDMA_BIG_ENDIAN) ? \
3113 + iowrite32be(val, addr) : iowrite32(val, addr))
3115 +#define FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma_engine, x) \
3116 + (((fsl_qdma_engine)->block_offset) * (x))
3118 +static DEFINE_PER_CPU(u64, pre_addr);
3119 +static DEFINE_PER_CPU(u64, pre_queue);
3121 +/* qDMA Command Descriptor Fotmats */
3123 +struct fsl_qdma_format {
3124 + __le32 status; /* ser, status */
3125 + __le32 cfg; /* format, offset */
3128 + __le32 addr_lo; /* low 32-bits of 40-bit address */
3129 + u8 addr_hi; /* high 8-bits of 40-bit address */
3130 + u8 __reserved1[2];
3131 + u8 cfg8b_w1; /* dd, queue */
3138 +qdma_ccdf_addr_get64(const struct fsl_qdma_format *ccdf)
3140 + return le64_to_cpu(ccdf->data) & 0xffffffffffLLU;
3144 +qdma_desc_addr_set64(struct fsl_qdma_format *ccdf, u64 addr)
3146 + ccdf->addr_hi = upper_32_bits(addr);
3147 + ccdf->addr_lo = cpu_to_le32(lower_32_bits(addr));
3151 +qdma_ccdf_get_queue(const struct fsl_qdma_format *ccdf)
3153 + return ccdf->cfg8b_w1 & 0xff;
3157 +qdma_ccdf_get_offset(const struct fsl_qdma_format *ccdf)
3159 + return (le32_to_cpu(ccdf->cfg) & QDMA_CCDF_MASK) >> QDMA_CCDF_OFFSET;
3163 +qdma_ccdf_set_format(struct fsl_qdma_format *ccdf, int offset)
3165 + ccdf->cfg = cpu_to_le32(QDMA_CCDF_FOTMAT | offset);
3169 +qdma_ccdf_get_status(const struct fsl_qdma_format *ccdf)
3171 + return (le32_to_cpu(ccdf->status) & QDMA_CCDF_MASK) >> QDMA_CCDF_STATUS;
3175 +qdma_ccdf_set_ser(struct fsl_qdma_format *ccdf, int status)
3177 + ccdf->status = cpu_to_le32(QDMA_CCDF_SER | status);
3180 +static inline void qdma_csgf_set_len(struct fsl_qdma_format *csgf, int len)
3182 + csgf->cfg = cpu_to_le32(len & QDMA_SG_LEN_MASK);
3185 +static inline void qdma_csgf_set_f(struct fsl_qdma_format *csgf, int len)
3187 + csgf->cfg = cpu_to_le32(QDMA_SG_FIN | (len & QDMA_SG_LEN_MASK));
3190 +static inline void qdma_csgf_set_e(struct fsl_qdma_format *csgf, int len)
3192 + csgf->cfg = cpu_to_le32(QDMA_SG_EXT | (len & QDMA_SG_LEN_MASK));
3195 +/* qDMA Source Descriptor Format */
3196 +struct fsl_qdma_sdf {
3198 + __le32 cfg; /* rev4, bit[0-11] - ssd, bit[12-23] sss */
3203 +/* qDMA Destination Descriptor Format */
3204 +struct fsl_qdma_ddf {
3206 + __le32 cfg; /* rev2, bit[0-11] - dsd, bit[12-23] - dss */
3211 +struct fsl_qdma_chan {
3212 + struct virt_dma_chan vchan;
3213 + struct virt_dma_desc vdesc;
3214 + enum dma_status status;
3215 + struct fsl_qdma_engine *qdma;
3216 + struct fsl_qdma_queue *queue;
3219 +struct fsl_qdma_queue {
3220 + struct fsl_qdma_format *virt_head;
3221 + struct fsl_qdma_format *virt_tail;
3222 + struct list_head comp_used;
3223 + struct list_head comp_free;
3224 + struct dma_pool *comp_pool;
3225 + struct dma_pool *desc_pool;
3226 + spinlock_t queue_lock;
3227 + dma_addr_t bus_addr;
3230 + struct fsl_qdma_format *cq;
3231 + void __iomem *block_base;
3234 +struct fsl_qdma_comp {
3235 + dma_addr_t bus_addr;
3236 + dma_addr_t desc_bus_addr;
3238 + void *desc_virt_addr;
3239 + struct fsl_qdma_chan *qchan;
3240 + struct virt_dma_desc vdesc;
3241 + struct list_head list;
3244 +struct fsl_qdma_engine {
3245 + struct dma_device dma_dev;
3246 + void __iomem *ctrl_base;
3247 + void __iomem *status_base;
3248 + void __iomem *block_base;
3251 + struct mutex fsl_qdma_mutex;
3255 + struct fsl_qdma_queue *queue;
3256 + struct fsl_qdma_queue **status;
3257 + struct fsl_qdma_chan *chans;
3261 + int desc_allocated;
3265 +static u32 qdma_readl(struct fsl_qdma_engine *qdma, void __iomem *addr)
3267 + return QDMA_IN(qdma, addr);
3270 +static void qdma_writel(struct fsl_qdma_engine *qdma, u32 val,
3271 + void __iomem *addr)
3273 + QDMA_OUT(qdma, addr, val);
3276 +static struct fsl_qdma_chan *to_fsl_qdma_chan(struct dma_chan *chan)
3278 + return container_of(chan, struct fsl_qdma_chan, vchan.chan);
3281 +static struct fsl_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
3283 + return container_of(vd, struct fsl_qdma_comp, vdesc);
3286 +static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
3288 + struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
3289 + struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
3290 + struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
3291 + struct fsl_qdma_comp *comp_temp, *_comp_temp;
3292 + unsigned long flags;
3295 + spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
3296 + vchan_get_all_descriptors(&fsl_chan->vchan, &head);
3297 + spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
3299 + vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
3301 + if (!fsl_queue->comp_pool && !fsl_queue->comp_pool)
3304 + list_for_each_entry_safe(comp_temp, _comp_temp,
3305 + &fsl_queue->comp_used, list) {
3306 + dma_pool_free(fsl_queue->comp_pool,
3307 + comp_temp->virt_addr,
3308 + comp_temp->bus_addr);
3309 + dma_pool_free(fsl_queue->desc_pool,
3310 + comp_temp->desc_virt_addr,
3311 + comp_temp->desc_bus_addr);
3312 + list_del(&comp_temp->list);
3316 + list_for_each_entry_safe(comp_temp, _comp_temp,
3317 + &fsl_queue->comp_free, list) {
3318 + dma_pool_free(fsl_queue->comp_pool,
3319 + comp_temp->virt_addr,
3320 + comp_temp->bus_addr);
3321 + dma_pool_free(fsl_queue->desc_pool,
3322 + comp_temp->desc_virt_addr,
3323 + comp_temp->desc_bus_addr);
3324 + list_del(&comp_temp->list);
3328 + dma_pool_destroy(fsl_queue->comp_pool);
3329 + dma_pool_destroy(fsl_queue->desc_pool);
3331 + fsl_qdma->desc_allocated--;
3332 + fsl_queue->comp_pool = NULL;
3333 + fsl_queue->desc_pool = NULL;
3336 +static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
3337 + dma_addr_t dst, dma_addr_t src, u32 len)
3339 + struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest;
3340 + struct fsl_qdma_sdf *sdf;
3341 + struct fsl_qdma_ddf *ddf;
3343 + ccdf = (struct fsl_qdma_format *)fsl_comp->virt_addr;
3344 + csgf_desc = (struct fsl_qdma_format *)fsl_comp->virt_addr + 1;
3345 + csgf_src = (struct fsl_qdma_format *)fsl_comp->virt_addr + 2;
3346 + csgf_dest = (struct fsl_qdma_format *)fsl_comp->virt_addr + 3;
3347 + sdf = (struct fsl_qdma_sdf *)fsl_comp->desc_virt_addr;
3348 + ddf = (struct fsl_qdma_ddf *)fsl_comp->desc_virt_addr + 1;
3350 + memset(fsl_comp->virt_addr, 0, FSL_QDMA_COMMAND_BUFFER_SIZE);
3351 + memset(fsl_comp->desc_virt_addr, 0, FSL_QDMA_DESCRIPTOR_BUFFER_SIZE);
3352 + /* Head Command Descriptor(Frame Descriptor) */
3353 + qdma_desc_addr_set64(ccdf, fsl_comp->bus_addr + 16);
3354 + qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(ccdf));
3355 + qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(ccdf));
3356 + /* Status notification is enqueued to status queue. */
3357 + /* Compound Command Descriptor(Frame List Table) */
3358 + qdma_desc_addr_set64(csgf_desc, fsl_comp->desc_bus_addr);
3359 + /* It must be 32 as Compound S/G Descriptor */
3360 + qdma_csgf_set_len(csgf_desc, 32);
3361 + qdma_desc_addr_set64(csgf_src, src);
3362 + qdma_csgf_set_len(csgf_src, len);
3363 + qdma_desc_addr_set64(csgf_dest, dst);
3364 + qdma_csgf_set_len(csgf_dest, len);
3365 + /* This entry is the last entry. */
3366 + qdma_csgf_set_f(csgf_dest, len);
3367 + /* Descriptor Buffer */
3368 + sdf->cmd = cpu_to_le32(
3369 + FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET);
3370 + ddf->cmd = cpu_to_le32(
3371 + FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET);
3372 + ddf->cmd |= cpu_to_le32(
3373 + FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET);
3377 + * Pre-request command descriptor and compound S/G for enqueue.
3379 +static int fsl_qdma_pre_request_enqueue_comp_desc(struct fsl_qdma_queue *queue)
3381 + struct fsl_qdma_comp *comp_temp;
3384 + for (i = 0; i < queue->n_cq + COMMAND_QUEUE_OVERFLLOW; i++) {
3385 + comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
3388 + comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool,
3390 + &comp_temp->bus_addr);
3392 + if (!comp_temp->virt_addr) {
3397 + list_add_tail(&comp_temp->list, &queue->comp_free);
3404 + * Pre-request source and destination descriptor for enqueue.
3406 +static int fsl_qdma_pre_request_enqueue_sd_desc(struct fsl_qdma_queue *queue)
3408 + struct fsl_qdma_comp *comp_temp, *_comp_temp;
3410 + list_for_each_entry_safe(comp_temp, _comp_temp,
3411 + &queue->comp_free, list) {
3412 + comp_temp->desc_virt_addr = dma_pool_alloc(queue->desc_pool,
3414 + &comp_temp->desc_bus_addr);
3415 + if (!comp_temp->desc_virt_addr)
3423 + * Request a command descriptor for enqueue.
3425 +static struct fsl_qdma_comp *fsl_qdma_request_enqueue_desc(
3426 + struct fsl_qdma_chan *fsl_chan)
3428 + struct fsl_qdma_comp *comp_temp;
3429 + struct fsl_qdma_queue *queue = fsl_chan->queue;
3430 + unsigned long flags;
3431 + int timeout = COMP_TIMEOUT;
3434 + spin_lock_irqsave(&queue->queue_lock, flags);
3435 + if (!list_empty(&queue->comp_free)) {
3436 + comp_temp = list_first_entry(&queue->comp_free,
3437 + struct fsl_qdma_comp,
3439 + list_del(&comp_temp->list);
3441 + spin_unlock_irqrestore(&queue->queue_lock, flags);
3442 + comp_temp->qchan = fsl_chan;
3445 + spin_unlock_irqrestore(&queue->queue_lock, flags);
3453 +static struct fsl_qdma_queue *fsl_qdma_alloc_queue_resources(
3454 + struct platform_device *pdev,
3455 + struct fsl_qdma_engine *fsl_qdma)
3457 + struct fsl_qdma_queue *queue_head, *queue_temp;
3458 + int ret, len, i, j;
3459 + unsigned int queue_size[FSL_QDMA_QUEUE_MAX];
3463 + queue_num = fsl_qdma->n_queues;
3464 + block_number = fsl_qdma->block_number;
3466 + if (queue_num > FSL_QDMA_QUEUE_MAX)
3467 + queue_num = FSL_QDMA_QUEUE_MAX;
3468 + len = sizeof(*queue_head) * queue_num * block_number;
3469 + queue_head = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
3473 + ret = device_property_read_u32_array(&pdev->dev, "queue-sizes",
3474 + queue_size, queue_num);
3476 + dev_err(&pdev->dev, "Can't get queue-sizes.\n");
3479 + for (j = 0; j < block_number; j++) {
3480 + for (i = 0; i < queue_num; i++) {
3481 + if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
3482 + queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
3483 + dev_err(&pdev->dev,
3484 + "Get wrong queue-sizes.\n");
3487 + queue_temp = queue_head + i + (j * queue_num);
3490 + dma_alloc_coherent(&pdev->dev,
3491 + sizeof(struct fsl_qdma_format) *
3493 + &queue_temp->bus_addr,
3495 + if (!queue_temp->cq)
3497 + queue_temp->block_base = fsl_qdma->block_base +
3498 + FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
3499 + queue_temp->n_cq = queue_size[i];
3500 + queue_temp->id = i;
3501 + queue_temp->virt_head = queue_temp->cq;
3502 + queue_temp->virt_tail = queue_temp->cq;
3504 + * List for queue command buffer
3506 + INIT_LIST_HEAD(&queue_temp->comp_used);
3507 + spin_lock_init(&queue_temp->queue_lock);
3510 + return queue_head;
3513 +static struct fsl_qdma_queue *fsl_qdma_prep_status_queue(
3514 + struct platform_device *pdev)
3516 + struct device_node *np = pdev->dev.of_node;
3517 + struct fsl_qdma_queue *status_head;
3518 + unsigned int status_size;
3521 + ret = of_property_read_u32(np, "status-sizes", &status_size);
3523 + dev_err(&pdev->dev, "Can't get status-sizes.\n");
3526 + if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX
3527 + || status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
3528 + dev_err(&pdev->dev, "Get wrong status_size.\n");
3531 + status_head = devm_kzalloc(&pdev->dev, sizeof(*status_head),
3537 + * Buffer for queue command
3539 + status_head->cq = dma_alloc_coherent(&pdev->dev,
3540 + sizeof(struct fsl_qdma_format) *
3542 + &status_head->bus_addr,
3544 + if (!status_head->cq)
3546 + status_head->n_cq = status_size;
3547 + status_head->virt_head = status_head->cq;
3548 + status_head->virt_tail = status_head->cq;
3549 + status_head->comp_pool = NULL;
3551 + return status_head;
3554 +static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
3556 + void __iomem *ctrl = fsl_qdma->ctrl_base;
3557 + void __iomem *block;
3562 + /* Disable the command queue and wait for idle state. */
3563 + reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
3564 + reg |= FSL_QDMA_DMR_DQD;
3565 + qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
3566 + for (j = 0; j < fsl_qdma->block_number; j++) {
3567 + block = fsl_qdma->block_base +
3568 + FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
3569 + for (i = 0; i < FSL_QDMA_QUEUE_NUM_MAX; i++)
3570 + qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQMR(i));
3573 + reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DSR);
3574 + if (!(reg & FSL_QDMA_DSR_DB))
3581 + for (j = 0; j < fsl_qdma->block_number; j++) {
3583 + block = fsl_qdma->block_base +
3584 + FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
3586 + /* Disable status queue. */
3587 + qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BSQMR);
3590 + * clear the command queue interrupt detect register for
3593 + qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
3599 +static int fsl_qdma_queue_transfer_complete(
3600 + struct fsl_qdma_engine *fsl_qdma,
3604 + struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
3605 + struct fsl_qdma_queue *fsl_status = fsl_qdma->status[id];
3606 + struct fsl_qdma_queue *temp_queue;
3607 + struct fsl_qdma_format *status_addr;
3608 + struct fsl_qdma_comp *fsl_comp = NULL;
3610 + bool duplicate, duplicate_handle;
3614 + duplicate_handle = 0;
3615 + reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQSR);
3616 + if (reg & FSL_QDMA_BSQSR_QE)
3619 + status_addr = fsl_status->virt_head;
3621 + if (qdma_ccdf_get_queue(status_addr) ==
3622 + __this_cpu_read(pre_queue) &&
3623 + qdma_ccdf_addr_get64(status_addr) ==
3624 + __this_cpu_read(pre_addr))
3626 + i = qdma_ccdf_get_queue(status_addr) +
3627 + id * fsl_qdma->n_queues;
3628 + __this_cpu_write(pre_addr, qdma_ccdf_addr_get64(status_addr));
3629 + __this_cpu_write(pre_queue, qdma_ccdf_get_queue(status_addr));
3630 + temp_queue = fsl_queue + i;
3632 + spin_lock(&temp_queue->queue_lock);
3633 + if (list_empty(&temp_queue->comp_used)) {
3635 + duplicate_handle = 1;
3637 + spin_unlock(&temp_queue->queue_lock);
3641 + fsl_comp = list_first_entry(&temp_queue->comp_used,
3642 + struct fsl_qdma_comp,
3644 + if (fsl_comp->bus_addr + 16 !=
3645 + __this_cpu_read(pre_addr)) {
3647 + duplicate_handle = 1;
3649 + spin_unlock(&temp_queue->queue_lock);
3656 + if (duplicate_handle) {
3657 + reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
3658 + reg |= FSL_QDMA_BSQMR_DI;
3659 + qdma_desc_addr_set64(status_addr, 0x0);
3660 + fsl_status->virt_head++;
3661 + if (fsl_status->virt_head == fsl_status->cq
3662 + + fsl_status->n_cq)
3663 + fsl_status->virt_head = fsl_status->cq;
3664 + qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
3665 + spin_unlock(&temp_queue->queue_lock);
3668 + list_del(&fsl_comp->list);
3670 + reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
3671 + reg |= FSL_QDMA_BSQMR_DI;
3672 + qdma_desc_addr_set64(status_addr, 0x0);
3673 + fsl_status->virt_head++;
3674 + if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq)
3675 + fsl_status->virt_head = fsl_status->cq;
3676 + qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
3677 + spin_unlock(&temp_queue->queue_lock);
3679 + spin_lock(&fsl_comp->qchan->vchan.lock);
3680 + vchan_cookie_complete(&fsl_comp->vdesc);
3681 + fsl_comp->qchan->status = DMA_COMPLETE;
3682 + spin_unlock(&fsl_comp->qchan->vchan.lock);
3687 +static irqreturn_t fsl_qdma_error_handler(int irq, void *dev_id)
3689 + struct fsl_qdma_engine *fsl_qdma = dev_id;
3690 + unsigned int intr;
3691 + void __iomem *status = fsl_qdma->status_base;
3693 + intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR);
3696 + dev_err(fsl_qdma->dma_dev.dev, "DMA transaction error!\n");
3698 + qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEDR);
3699 + return IRQ_HANDLED;
3702 +static irqreturn_t fsl_qdma_queue_handler(int irq, void *dev_id)
3704 + struct fsl_qdma_engine *fsl_qdma = dev_id;
3705 + unsigned int intr, reg;
3706 + void __iomem *ctrl = fsl_qdma->ctrl_base;
3707 + void __iomem *block;
3710 + id = irq - fsl_qdma->irq_base;
3711 + if (id < 0 && id > fsl_qdma->block_number) {
3712 + dev_err(fsl_qdma->dma_dev.dev,
3713 + "irq %d is wrong irq_base is %d\n",
3714 + irq, fsl_qdma->irq_base);
3717 + block = fsl_qdma->block_base +
3718 + FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
3720 + intr = qdma_readl(fsl_qdma, block + FSL_QDMA_BCQIDR(0));
3722 + if ((intr & FSL_QDMA_CQIDR_SQT) != 0)
3723 + intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id);
3726 + reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
3727 + reg |= FSL_QDMA_DMR_DQD;
3728 + qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
3729 + qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQIER(0));
3730 + dev_err(fsl_qdma->dma_dev.dev, "QDMA: status err!\n");
3733 + qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
3735 + return IRQ_HANDLED;
3739 +fsl_qdma_irq_init(struct platform_device *pdev,
3740 + struct fsl_qdma_engine *fsl_qdma)
3742 + char irq_name[20];
3747 + fsl_qdma->error_irq = platform_get_irq_byname(pdev,
3749 + if (fsl_qdma->error_irq < 0) {
3750 + dev_err(&pdev->dev, "Can't get qdma controller irq.\n");
3751 + return fsl_qdma->error_irq;
3754 + ret = devm_request_irq(&pdev->dev, fsl_qdma->error_irq,
3755 + fsl_qdma_error_handler, 0, "qDMA error", fsl_qdma);
3757 + dev_err(&pdev->dev, "Can't register qDMA controller IRQ.\n");
3761 + for (i = 0; i < fsl_qdma->block_number; i++) {
3762 + sprintf(irq_name, "qdma-queue%d", i);
3763 + fsl_qdma->queue_irq[i] = platform_get_irq_byname(pdev,
3766 + if (fsl_qdma->queue_irq[i] < 0) {
3767 + dev_err(&pdev->dev,
3768 + "Can't get qdma queue %d irq.\n",
3770 + return fsl_qdma->queue_irq[i];
3773 + ret = devm_request_irq(&pdev->dev,
3774 + fsl_qdma->queue_irq[i],
3775 + fsl_qdma_queue_handler,
3780 + dev_err(&pdev->dev,
3781 + "Can't register qDMA queue IRQ.\n");
3785 + cpu = i % num_online_cpus();
3786 + ret = irq_set_affinity_hint(fsl_qdma->queue_irq[i],
3787 + get_cpu_mask(cpu));
3789 + dev_err(&pdev->dev,
3790 + "Can't set cpu %d affinity to IRQ %d.\n",
3792 + fsl_qdma->queue_irq[i]);
3801 +static void fsl_qdma_irq_exit(
3802 + struct platform_device *pdev, struct fsl_qdma_engine *fsl_qdma)
3804 + if (fsl_qdma->queue_irq[0] == fsl_qdma->error_irq) {
3805 + devm_free_irq(&pdev->dev, fsl_qdma->queue_irq[0], fsl_qdma);
3807 + devm_free_irq(&pdev->dev, fsl_qdma->queue_irq[0], fsl_qdma);
3808 + devm_free_irq(&pdev->dev, fsl_qdma->error_irq, fsl_qdma);
3812 +static int fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
3814 + struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
3815 + struct fsl_qdma_queue *temp;
3816 + void __iomem *ctrl = fsl_qdma->ctrl_base;
3817 + void __iomem *status = fsl_qdma->status_base;
3818 + void __iomem *block;
3822 + /* Try to halt the qDMA engine first. */
3823 + ret = fsl_qdma_halt(fsl_qdma);
3825 + dev_err(fsl_qdma->dma_dev.dev, "DMA halt failed!");
3829 + for (i = 0; i < fsl_qdma->block_number; i++) {
3831 + * Clear the command queue interrupt detect register for
3835 + block = fsl_qdma->block_base +
3836 + FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, i);
3837 + qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
3840 + for (j = 0; j < fsl_qdma->block_number; j++) {
3841 + block = fsl_qdma->block_base +
3842 + FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
3843 + for (i = 0; i < fsl_qdma->n_queues; i++) {
3844 + temp = fsl_queue + i + (j * fsl_qdma->n_queues);
3846 + * Initialize Command Queue registers to
3847 + * point to the first
3848 + * command descriptor in memory.
3849 + * Dequeue Pointer Address Registers
3850 + * Enqueue Pointer Address Registers
3853 + qdma_writel(fsl_qdma, temp->bus_addr,
3854 + block + FSL_QDMA_BCQDPA_SADDR(i));
3855 + qdma_writel(fsl_qdma, temp->bus_addr,
3856 + block + FSL_QDMA_BCQEPA_SADDR(i));
3858 + /* Initialize the queue mode. */
3859 + reg = FSL_QDMA_BCQMR_EN;
3860 + reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq) - 4);
3861 + reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq) - 6);
3862 + qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BCQMR(i));
3866 + * Workaround for erratum: ERR010812.
3867 + * We must enable XOFF to avoid the enqueue rejection occurs.
3868 + * Setting SQCCMR ENTER_WM to 0x20.
3871 + qdma_writel(fsl_qdma, FSL_QDMA_SQCCMR_ENTER_WM,
3872 + block + FSL_QDMA_SQCCMR);
3875 + * Initialize status queue registers to point to the first
3876 + * command descriptor in memory.
3877 + * Dequeue Pointer Address Registers
3878 + * Enqueue Pointer Address Registers
3881 + qdma_writel(fsl_qdma, fsl_qdma->status[j]->bus_addr,
3882 + block + FSL_QDMA_SQEPAR);
3883 + qdma_writel(fsl_qdma, fsl_qdma->status[j]->bus_addr,
3884 + block + FSL_QDMA_SQDPAR);
3885 + /* Initialize status queue interrupt. */
3886 + qdma_writel(fsl_qdma, FSL_QDMA_BCQIER_CQTIE,
3887 + block + FSL_QDMA_BCQIER(0));
3888 + qdma_writel(fsl_qdma, FSL_QDMA_BSQICR_ICEN |
3889 + FSL_QDMA_BSQICR_ICST(5) | 0x8000,
3890 + block + FSL_QDMA_BSQICR);
3891 + qdma_writel(fsl_qdma, FSL_QDMA_CQIER_MEIE |
3892 + FSL_QDMA_CQIER_TEIE,
3893 + block + FSL_QDMA_CQIER);
3895 + /* Initialize the status queue mode. */
3896 + reg = FSL_QDMA_BSQMR_EN;
3897 + reg |= FSL_QDMA_BSQMR_CQ_SIZE(ilog2(
3898 + fsl_qdma->status[j]->n_cq) - 6);
3900 + qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
3901 + reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
3905 + /* Initialize controller interrupt register. */
3906 + qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEDR);
3907 + qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEIER);
3909 + reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
3910 + reg &= ~FSL_QDMA_DMR_DQD;
3911 + qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
3916 +static struct dma_async_tx_descriptor *
3917 +fsl_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
3918 + dma_addr_t src, size_t len, unsigned long flags)
3920 + struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
3921 + struct fsl_qdma_comp *fsl_comp;
3923 + fsl_comp = fsl_qdma_request_enqueue_desc(fsl_chan);
3928 + fsl_qdma_comp_fill_memcpy(fsl_comp, dst, src, len);
3930 + return vchan_tx_prep(&fsl_chan->vchan, &fsl_comp->vdesc, flags);
3933 +static void fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
3935 + struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
3936 + struct fsl_qdma_comp *fsl_comp;
3937 + struct virt_dma_desc *vdesc;
3938 + void __iomem *block = fsl_queue->block_base;
3941 + reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQSR(fsl_queue->id));
3942 + if (reg & (FSL_QDMA_BCQSR_QF | FSL_QDMA_BCQSR_XOFF))
3944 + vdesc = vchan_next_desc(&fsl_chan->vchan);
3947 + list_del(&vdesc->node);
3948 + fsl_comp = to_fsl_qdma_comp(vdesc);
3950 + memcpy(fsl_queue->virt_head++, fsl_comp->virt_addr, 16);
3951 + if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq)
3952 + fsl_queue->virt_head = fsl_queue->cq;
3954 + list_add_tail(&fsl_comp->list, &fsl_queue->comp_used);
3956 + reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQMR(fsl_queue->id));
3957 + reg |= FSL_QDMA_BCQMR_EI;
3958 + qdma_writel(fsl_chan->qdma, reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
3959 + fsl_chan->status = DMA_IN_PROGRESS;
3962 +static enum dma_status fsl_qdma_tx_status(struct dma_chan *chan,
3963 + dma_cookie_t cookie, struct dma_tx_state *txstate)
3965 + return dma_cookie_status(chan, cookie, txstate);
3968 +static void fsl_qdma_free_desc(struct virt_dma_desc *vdesc)
3970 + struct fsl_qdma_comp *fsl_comp;
3971 + struct fsl_qdma_queue *fsl_queue;
3972 + unsigned long flags;
3974 + fsl_comp = to_fsl_qdma_comp(vdesc);
3975 + fsl_queue = fsl_comp->qchan->queue;
3977 + spin_lock_irqsave(&fsl_queue->queue_lock, flags);
3978 + list_add_tail(&fsl_comp->list, &fsl_queue->comp_free);
3979 + spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
3982 +static void fsl_qdma_issue_pending(struct dma_chan *chan)
3984 + struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
3985 + struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
3986 + unsigned long flags;
3988 + spin_lock_irqsave(&fsl_queue->queue_lock, flags);
3989 + spin_lock(&fsl_chan->vchan.lock);
3990 + if (vchan_issue_pending(&fsl_chan->vchan))
3991 + fsl_qdma_enqueue_desc(fsl_chan);
3992 + spin_unlock(&fsl_chan->vchan.lock);
3993 + spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
3996 +static void fsl_qdma_synchronize(struct dma_chan *chan)
3998 + struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
4000 + vchan_synchronize(&fsl_chan->vchan);
4003 +static int fsl_qdma_terminate_all(struct dma_chan *chan)
4005 + struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
4006 + unsigned long flags;
4009 + spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
4010 + vchan_get_all_descriptors(&fsl_chan->vchan, &head);
4011 + spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
4012 + vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
4016 +static int fsl_qdma_alloc_chan_resources(struct dma_chan *chan)
4018 + struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
4019 + struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
4020 + struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
4023 + if (fsl_queue->comp_pool && fsl_queue->desc_pool)
4024 + return fsl_qdma->desc_allocated;
4026 + INIT_LIST_HEAD(&fsl_queue->comp_free);
4029 + * The dma pool for queue command buffer
4031 + fsl_queue->comp_pool =
4032 + dma_pool_create("comp_pool",
4033 + chan->device->dev,
4034 + FSL_QDMA_COMMAND_BUFFER_SIZE,
4036 + if (!fsl_queue->comp_pool)
4040 + * The dma pool for Descriptor(SD/DD) buffer
4042 + fsl_queue->desc_pool =
4043 + dma_pool_create("desc_pool",
4044 + chan->device->dev,
4045 + FSL_QDMA_DESCRIPTOR_BUFFER_SIZE,
4047 + if (!fsl_queue->desc_pool)
4048 + goto err_desc_pool;
4050 + ret = fsl_qdma_pre_request_enqueue_comp_desc(fsl_queue);
4052 + dev_err(chan->device->dev, "failed to alloc dma buffer for "
4053 + "comp S/G descriptor\n");
4057 + ret = fsl_qdma_pre_request_enqueue_sd_desc(fsl_queue);
4059 + dev_err(chan->device->dev, "failed to alloc dma buffer for "
4060 + "S/D descriptor\n");
4064 + fsl_qdma->desc_allocated++;
4065 + return fsl_qdma->desc_allocated;
4068 + dma_pool_destroy(fsl_queue->desc_pool);
4070 + dma_pool_destroy(fsl_queue->comp_pool);
4074 +static int fsl_qdma_probe(struct platform_device *pdev)
4076 + struct device_node *np = pdev->dev.of_node;
4077 + struct fsl_qdma_engine *fsl_qdma;
4078 + struct fsl_qdma_chan *fsl_chan;
4079 + struct resource *res;
4080 + unsigned int len, chans, queues;
4085 + ret = of_property_read_u32(np, "channels", &chans);
4087 + dev_err(&pdev->dev, "Can't get channels.\n");
4091 + ret = of_property_read_u32(np, "block-offset", &blk_off);
4093 + dev_err(&pdev->dev, "Can't get block-offset.\n");
4097 + ret = of_property_read_u32(np, "block-number", &blk_num);
4099 + dev_err(&pdev->dev, "Can't get block-number.\n");
4103 + blk_num = min_t(int, blk_num, num_online_cpus());
4105 + len = sizeof(*fsl_qdma);
4106 + fsl_qdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
4110 + len = sizeof(*fsl_chan) * chans;
4111 + fsl_qdma->chans = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
4112 + if (!fsl_qdma->chans)
4115 + len = sizeof(struct fsl_qdma_queue *) * blk_num;
4116 + fsl_qdma->status = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
4117 + if (!fsl_qdma->status)
4120 + len = sizeof(int) * blk_num;
4121 + fsl_qdma->queue_irq = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
4122 + if (!fsl_qdma->queue_irq)
4125 + ret = of_property_read_u32(np, "queues", &queues);
4127 + dev_err(&pdev->dev, "Can't get queues.\n");
4131 + fsl_qdma->desc_allocated = 0;
4132 + fsl_qdma->n_chans = chans;
4133 + fsl_qdma->n_queues = queues;
4134 + fsl_qdma->block_number = blk_num;
4135 + fsl_qdma->block_offset = blk_off;
4137 + mutex_init(&fsl_qdma->fsl_qdma_mutex);
4139 + for (i = 0; i < fsl_qdma->block_number; i++) {
4140 + fsl_qdma->status[i] = fsl_qdma_prep_status_queue(pdev);
4141 + if (!fsl_qdma->status[i])
4144 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4145 + fsl_qdma->ctrl_base = devm_ioremap_resource(&pdev->dev, res);
4146 + if (IS_ERR(fsl_qdma->ctrl_base))
4147 + return PTR_ERR(fsl_qdma->ctrl_base);
4149 + res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
4150 + fsl_qdma->status_base = devm_ioremap_resource(&pdev->dev, res);
4151 + if (IS_ERR(fsl_qdma->status_base))
4152 + return PTR_ERR(fsl_qdma->status_base);
4154 + res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
4155 + fsl_qdma->block_base = devm_ioremap_resource(&pdev->dev, res);
4156 + if (IS_ERR(fsl_qdma->block_base))
4157 + return PTR_ERR(fsl_qdma->block_base);
4158 + fsl_qdma->queue = fsl_qdma_alloc_queue_resources(pdev, fsl_qdma);
4159 + if (!fsl_qdma->queue)
4162 + ret = fsl_qdma_irq_init(pdev, fsl_qdma);
4166 + fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0");
4167 + fsl_qdma->big_endian = of_property_read_bool(np, "big-endian");
4168 + INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels);
4170 + for (i = 0; i < fsl_qdma->n_chans; i++) {
4171 + struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
4173 + fsl_chan->qdma = fsl_qdma;
4174 + fsl_chan->queue = fsl_qdma->queue + i % (fsl_qdma->n_queues *
4175 + fsl_qdma->block_number);
4176 + fsl_chan->vchan.desc_free = fsl_qdma_free_desc;
4177 + vchan_init(&fsl_chan->vchan, &fsl_qdma->dma_dev);
4180 + dma_cap_set(DMA_MEMCPY, fsl_qdma->dma_dev.cap_mask);
4182 + fsl_qdma->dma_dev.dev = &pdev->dev;
4183 + fsl_qdma->dma_dev.device_free_chan_resources
4184 + = fsl_qdma_free_chan_resources;
4185 + fsl_qdma->dma_dev.device_alloc_chan_resources
4186 + = fsl_qdma_alloc_chan_resources;
4187 + fsl_qdma->dma_dev.device_tx_status = fsl_qdma_tx_status;
4188 + fsl_qdma->dma_dev.device_prep_dma_memcpy = fsl_qdma_prep_memcpy;
4189 + fsl_qdma->dma_dev.device_issue_pending = fsl_qdma_issue_pending;
4190 + fsl_qdma->dma_dev.device_synchronize = fsl_qdma_synchronize;
4191 + fsl_qdma->dma_dev.device_terminate_all = fsl_qdma_terminate_all;
4193 + dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
4195 + platform_set_drvdata(pdev, fsl_qdma);
4197 + ret = dma_async_device_register(&fsl_qdma->dma_dev);
4199 + dev_err(&pdev->dev,
4200 + "Can't register NXP Layerscape qDMA engine.\n");
4204 + ret = fsl_qdma_reg_init(fsl_qdma);
4206 + dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
4213 +static void fsl_qdma_cleanup_vchan(struct dma_device *dmadev)
4215 + struct fsl_qdma_chan *chan, *_chan;
4217 + list_for_each_entry_safe(chan, _chan,
4218 + &dmadev->channels, vchan.chan.device_node) {
4219 + list_del(&chan->vchan.chan.device_node);
4220 + tasklet_kill(&chan->vchan.task);
4224 +static int fsl_qdma_remove(struct platform_device *pdev)
4226 + struct device_node *np = pdev->dev.of_node;
4227 + struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev);
4228 + struct fsl_qdma_queue *status;
4231 + fsl_qdma_irq_exit(pdev, fsl_qdma);
4232 + fsl_qdma_cleanup_vchan(&fsl_qdma->dma_dev);
4233 + of_dma_controller_free(np);
4234 + dma_async_device_unregister(&fsl_qdma->dma_dev);
4236 + for (i = 0; i < fsl_qdma->block_number; i++) {
4237 + status = fsl_qdma->status[i];
4238 + dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_format) *
4239 + status->n_cq, status->cq, status->bus_addr);
4244 +static const struct of_device_id fsl_qdma_dt_ids[] = {
4245 + { .compatible = "fsl,ls1021a-qdma", },
4246 + { /* sentinel */ }
4248 +MODULE_DEVICE_TABLE(of, fsl_qdma_dt_ids);
4250 +static struct platform_driver fsl_qdma_driver = {
4252 + .name = "fsl-qdma",
4253 + .of_match_table = fsl_qdma_dt_ids,
4255 + .probe = fsl_qdma_probe,
4256 + .remove = fsl_qdma_remove,
4259 +module_platform_driver(fsl_qdma_driver);
4261 +MODULE_ALIAS("platform:fsl-qdma");
4262 +MODULE_DESCRIPTION("NXP Layerscape qDMA engine driver");
4263 +MODULE_LICENSE("GPL v2");