layerscape: use MKUBIFS_OPTS for per-device ubi parameters
[openwrt/openwrt.git] / target / linux / layerscape / patches-4.14 / 802-dma-support-layerscape.patch
1 From 731adfb43892a1d7fe00e2036200f33a9b61a589 Mon Sep 17 00:00:00 2001
2 From: Biwen Li <biwen.li@nxp.com>
3 Date: Tue, 30 Oct 2018 18:26:02 +0800
4 Subject: [PATCH 19/40] dma: support layerscape
5 This is an integrated patch of dma for layerscape
6
7 Signed-off-by: Catalin Horghidan <catalin.horghidan@nxp.com>
8 Signed-off-by: Changming Huang <jerry.huang@nxp.com>
9 Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
10 Signed-off-by: jiaheng.fan <jiaheng.fan@nxp.com>
11 Signed-off-by: Peng Ma <peng.ma@nxp.com>
12 Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
13 Signed-off-by: Rajiv Vishwakarma <rajiv.vishwakarma@nxp.com>
14 Signed-off-by: Tudor Ambarus <tudor-dan.ambarus@nxp.com>
15 Signed-off-by: Wen He <wen.he_1@nxp.com>
16 Signed-off-by: Yuan Yao <yao.yuan@nxp.com>
17 Signed-off-by: Biwen Li <biwen.li@nxp.com>
18 ---
19 .../devicetree/bindings/dma/fsl-qdma.txt | 51 +
20 drivers/dma/Kconfig | 33 +-
21 drivers/dma/Makefile | 3 +
22 drivers/dma/caam_dma.c | 462 ++++++
23 drivers/dma/dpaa2-qdma/Kconfig | 8 +
24 drivers/dma/dpaa2-qdma/Makefile | 8 +
25 drivers/dma/dpaa2-qdma/dpaa2-qdma.c | 940 ++++++++++++
26 drivers/dma/dpaa2-qdma/dpaa2-qdma.h | 227 +++
27 drivers/dma/dpaa2-qdma/dpdmai.c | 515 +++++++
28 drivers/dma/dpaa2-qdma/fsl_dpdmai.h | 521 +++++++
29 drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h | 222 +++
30 drivers/dma/fsl-qdma.c | 1278 +++++++++++++++++
31 12 files changed, 4267 insertions(+), 1 deletion(-)
32 create mode 100644 Documentation/devicetree/bindings/dma/fsl-qdma.txt
33 create mode 100644 drivers/dma/caam_dma.c
34 create mode 100644 drivers/dma/dpaa2-qdma/Kconfig
35 create mode 100644 drivers/dma/dpaa2-qdma/Makefile
36 create mode 100644 drivers/dma/dpaa2-qdma/dpaa2-qdma.c
37 create mode 100644 drivers/dma/dpaa2-qdma/dpaa2-qdma.h
38 create mode 100644 drivers/dma/dpaa2-qdma/dpdmai.c
39 create mode 100644 drivers/dma/dpaa2-qdma/fsl_dpdmai.h
40 create mode 100644 drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h
41 create mode 100644 drivers/dma/fsl-qdma.c
42
43 --- /dev/null
44 +++ b/Documentation/devicetree/bindings/dma/fsl-qdma.txt
45 @@ -0,0 +1,51 @@
46 +* Freescale queue Direct Memory Access(qDMA) Controller
47 +
48 +The qDMA supports channel virtualization by allowing DMA jobs to be enqueued into
49 +different command queues. Core can initiate a DMA transaction by preparing a command
50 +descriptor for each DMA job and enqueuing this job to a command queue.
51 +
52 +* qDMA Controller
53 +Required properties:
54 +- compatible :
55 + should be "fsl,ls1021a-qdma".
56 +- reg : Specifies base physical address(s) and size of the qDMA registers.
57 + The 1st region is qDMA control register's address and size.
58 + The 2nd region is status queue control register's address and size.
59 + The 3rd region is virtual block control register's address and size.
60 +- interrupts : A list of interrupt-specifiers, one for each entry in
61 + interrupt-names.
62 +- interrupt-names : Should contain:
63 + "qdma-queue0" - the block0 interrupt
64 + "qdma-queue1" - the block1 interrupt
65 + "qdma-queue2" - the block2 interrupt
66 + "qdma-queue3" - the block3 interrupt
67 + "qdma-error" - the error interrupt
68 +- channels : Number of DMA channels supported
69 +- block-number : the virtual block number
70 +- block-offset : the offset of different virtual block
71 +- queues : the number of command queue per virtual block
72 +- status-sizes : status queue size of per virtual block
73 +- queue-sizes : command queue size of per virtual block, the size number based on queues
74 +- big-endian: If present registers and hardware scatter/gather descriptors
75 + of the qDMA are implemented in big endian mode, otherwise in little
76 + mode.
77 +
78 +Examples:
79 + qdma: qdma@8390000 {
80 + compatible = "fsl,ls1021a-qdma";
81 + reg = <0x0 0x8388000 0x0 0x1000>, /* Controller regs */
82 + <0x0 0x8389000 0x0 0x1000>, /* Status regs */
83 + <0x0 0x838a000 0x0 0x2000>; /* Block regs */
84 + interrupts = <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>,
85 + <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>,
86 + <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
87 + interrupt-names = "qdma-error",
88 + "qdma-queue0", "qdma-queue1";
89 + channels = <8>;
90 + block-number = <2>;
91 + block-offset = <0x1000>;
92 + queues = <2>;
93 + status-sizes = <64>;
94 + queue-sizes = <64 64>;
95 + big-endian;
96 + };
97 --- a/drivers/dma/Kconfig
98 +++ b/drivers/dma/Kconfig
99 @@ -129,6 +129,24 @@ config COH901318
100 help
101 Enable support for ST-Ericsson COH 901 318 DMA.
102
103 +config CRYPTO_DEV_FSL_CAAM_DMA
104 + tristate "CAAM DMA engine support"
105 + depends on CRYPTO_DEV_FSL_CAAM_JR
106 + default n
107 + select DMA_ENGINE
108 + select ASYNC_CORE
109 + select ASYNC_TX_ENABLE_CHANNEL_SWITCH
110 + help
111 + Selecting this will offload the DMA operations for users of
112 + the scatter gather memcopy API to the CAAM via job rings. The
113 + CAAM is a hardware module that provides hardware acceleration to
114 + cryptographic operations. It has a built-in DMA controller that can
115 + be programmed to read/write cryptographic data. This module defines
116 + a DMA driver that uses the DMA capabilities of the CAAM.
117 +
118 + To compile this as a module, choose M here: the module
119 + will be called caam_dma.
120 +
121 config DMA_BCM2835
122 tristate "BCM2835 DMA engine support"
123 depends on ARCH_BCM2835
124 @@ -215,6 +233,20 @@ config FSL_EDMA
125 multiplexing capability for DMA request sources(slot).
126 This module can be found on Freescale Vybrid and LS-1 SoCs.
127
128 +config FSL_QDMA
129 + tristate "NXP Layerscape qDMA engine support"
130 + select DMA_ENGINE
131 + select DMA_VIRTUAL_CHANNELS
132 + select DMA_ENGINE_RAID
133 + select ASYNC_TX_ENABLE_CHANNEL_SWITCH
134 + help
135 + Support the NXP Layerscape qDMA engine with command queue and legacy mode.
136 + Channel virtualization is supported through enqueuing of DMA jobs to,
137 + or dequeuing DMA jobs from, different work queues.
138 + This module can be found on NXP Layerscape SoCs.
139 +
140 +source drivers/dma/dpaa2-qdma/Kconfig
141 +
142 config FSL_RAID
143 tristate "Freescale RAID engine Support"
144 depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH
145 @@ -600,7 +632,6 @@ config ZX_DMA
146 help
147 Support the DMA engine for ZTE ZX family platform devices.
148
149 -
150 # driver files
151 source "drivers/dma/bestcomm/Kconfig"
152
153 --- a/drivers/dma/Makefile
154 +++ b/drivers/dma/Makefile
155 @@ -31,7 +31,9 @@ obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o
156 obj-$(CONFIG_DW_DMAC_CORE) += dw/
157 obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
158 obj-$(CONFIG_FSL_DMA) += fsldma.o
159 +obj-$(CONFIG_FSL_DPAA2_QDMA) += dpaa2-qdma/
160 obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
161 +obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o
162 obj-$(CONFIG_FSL_RAID) += fsl_raid.o
163 obj-$(CONFIG_HSU_DMA) += hsu/
164 obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o
165 @@ -71,6 +73,7 @@ obj-$(CONFIG_TI_EDMA) += edma.o
166 obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
167 obj-$(CONFIG_ZX_DMA) += zx_dma.o
168 obj-$(CONFIG_ST_FDMA) += st_fdma.o
169 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_DMA) += caam_dma.o
170
171 obj-y += qcom/
172 obj-y += xilinx/
173 --- /dev/null
174 +++ b/drivers/dma/caam_dma.c
175 @@ -0,0 +1,462 @@
176 +/*
177 + * caam support for SG DMA
178 + *
179 + * Copyright 2016 Freescale Semiconductor, Inc
180 + * Copyright 2017 NXP
181 + *
182 + * Redistribution and use in source and binary forms, with or without
183 + * modification, are permitted provided that the following conditions are met:
184 + * * Redistributions of source code must retain the above copyright
185 + * notice, this list of conditions and the following disclaimer.
186 + * * Redistributions in binary form must reproduce the above copyright
187 + * notice, this list of conditions and the following disclaimer in the
188 + * documentation and/or other materials provided with the distribution.
189 + * * Neither the names of the above-listed copyright holders nor the
190 + * names of any contributors may be used to endorse or promote products
191 + * derived from this software without specific prior written permission.
192 + *
193 + *
194 + * ALTERNATIVELY, this software may be distributed under the terms of the
195 + * GNU General Public License ("GPL") as published by the Free Software
196 + * Foundation, either version 2 of that License or (at your option) any
197 + * later version.
198 + *
199 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
200 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
201 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
202 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
203 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
204 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
205 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
206 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
207 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
208 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
209 + * POSSIBILITY OF SUCH DAMAGE.
210 + */
211 +
212 +#include <linux/dma-mapping.h>
213 +#include <linux/dmaengine.h>
214 +#include <linux/module.h>
215 +#include <linux/platform_device.h>
216 +#include <linux/slab.h>
217 +
218 +#include "dmaengine.h"
219 +
220 +#include "../crypto/caam/regs.h"
221 +#include "../crypto/caam/jr.h"
222 +#include "../crypto/caam/error.h"
223 +#include "../crypto/caam/desc_constr.h"
224 +
225 +#define DESC_DMA_MEMCPY_LEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / \
226 + CAAM_CMD_SZ)
227 +
228 +/*
229 + * This is max chunk size of a DMA transfer. If a buffer is larger than this
230 + * value it is internally broken into chunks of max CAAM_DMA_CHUNK_SIZE bytes
231 + * and for each chunk a DMA transfer request is issued.
232 + * This value is the largest number on 16 bits that is a multiple of 256 bytes
233 + * (the largest configurable CAAM DMA burst size).
234 + */
235 +#define CAAM_DMA_CHUNK_SIZE 65280
236 +
237 +struct caam_dma_sh_desc {
238 + u32 desc[DESC_DMA_MEMCPY_LEN] ____cacheline_aligned;
239 + dma_addr_t desc_dma;
240 +};
241 +
242 +/* caam dma extended descriptor */
243 +struct caam_dma_edesc {
244 + struct dma_async_tx_descriptor async_tx;
245 + struct list_head node;
246 + struct caam_dma_ctx *ctx;
247 + dma_addr_t src_dma;
248 + dma_addr_t dst_dma;
249 + unsigned int src_len;
250 + unsigned int dst_len;
251 + u32 jd[] ____cacheline_aligned;
252 +};
253 +
254 +/*
255 + * caam_dma_ctx - per jr/channel context
256 + * @chan: dma channel used by async_tx API
257 + * @node: list_head used to attach to the global dma_ctx_list
258 + * @jrdev: Job Ring device
259 + * @pending_q: queue of pending (submitted, but not enqueued) jobs
260 + * @done_not_acked: jobs that have been completed by jr, but maybe not acked
261 + * @edesc_lock: protects extended descriptor
262 + */
263 +struct caam_dma_ctx {
264 + struct dma_chan chan;
265 + struct list_head node;
266 + struct device *jrdev;
267 + struct list_head pending_q;
268 + struct list_head done_not_acked;
269 + spinlock_t edesc_lock;
270 +};
271 +
272 +static struct dma_device *dma_dev;
273 +static struct caam_dma_sh_desc *dma_sh_desc;
274 +static LIST_HEAD(dma_ctx_list);
275 +
276 +static dma_cookie_t caam_dma_tx_submit(struct dma_async_tx_descriptor *tx)
277 +{
278 + struct caam_dma_edesc *edesc = NULL;
279 + struct caam_dma_ctx *ctx = NULL;
280 + dma_cookie_t cookie;
281 +
282 + edesc = container_of(tx, struct caam_dma_edesc, async_tx);
283 + ctx = container_of(tx->chan, struct caam_dma_ctx, chan);
284 +
285 + spin_lock_bh(&ctx->edesc_lock);
286 +
287 + cookie = dma_cookie_assign(tx);
288 + list_add_tail(&edesc->node, &ctx->pending_q);
289 +
290 + spin_unlock_bh(&ctx->edesc_lock);
291 +
292 + return cookie;
293 +}
294 +
295 +static void caam_jr_chan_free_edesc(struct caam_dma_edesc *edesc)
296 +{
297 + struct caam_dma_ctx *ctx = edesc->ctx;
298 + struct caam_dma_edesc *_edesc = NULL;
299 +
300 + spin_lock_bh(&ctx->edesc_lock);
301 +
302 + list_add_tail(&edesc->node, &ctx->done_not_acked);
303 + list_for_each_entry_safe(edesc, _edesc, &ctx->done_not_acked, node) {
304 + if (async_tx_test_ack(&edesc->async_tx)) {
305 + list_del(&edesc->node);
306 + kfree(edesc);
307 + }
308 + }
309 +
310 + spin_unlock_bh(&ctx->edesc_lock);
311 +}
312 +
313 +static void caam_dma_done(struct device *dev, u32 *hwdesc, u32 err,
314 + void *context)
315 +{
316 + struct caam_dma_edesc *edesc = context;
317 + struct caam_dma_ctx *ctx = edesc->ctx;
318 + dma_async_tx_callback callback;
319 + void *callback_param;
320 +
321 + if (err)
322 + caam_jr_strstatus(ctx->jrdev, err);
323 +
324 + dma_run_dependencies(&edesc->async_tx);
325 +
326 + spin_lock_bh(&ctx->edesc_lock);
327 + dma_cookie_complete(&edesc->async_tx);
328 + spin_unlock_bh(&ctx->edesc_lock);
329 +
330 + callback = edesc->async_tx.callback;
331 + callback_param = edesc->async_tx.callback_param;
332 +
333 + dma_descriptor_unmap(&edesc->async_tx);
334 +
335 + caam_jr_chan_free_edesc(edesc);
336 +
337 + if (callback)
338 + callback(callback_param);
339 +}
340 +
341 +static void caam_dma_memcpy_init_job_desc(struct caam_dma_edesc *edesc)
342 +{
343 + u32 *jd = edesc->jd;
344 + u32 *sh_desc = dma_sh_desc->desc;
345 + dma_addr_t desc_dma = dma_sh_desc->desc_dma;
346 +
347 + /* init the job descriptor */
348 + init_job_desc_shared(jd, desc_dma, desc_len(sh_desc), HDR_REVERSE);
349 +
350 + /* set SEQIN PTR */
351 + append_seq_in_ptr(jd, edesc->src_dma, edesc->src_len, 0);
352 +
353 + /* set SEQOUT PTR */
354 + append_seq_out_ptr(jd, edesc->dst_dma, edesc->dst_len, 0);
355 +
356 + print_hex_dump_debug("caam dma desc@" __stringify(__LINE__) ": ",
357 + DUMP_PREFIX_ADDRESS, 16, 4, jd, desc_bytes(jd), 1);
358 +}
359 +
360 +static struct dma_async_tx_descriptor *
361 +caam_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
362 + size_t len, unsigned long flags)
363 +{
364 + struct caam_dma_edesc *edesc;
365 + struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
366 + chan);
367 +
368 + edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN, GFP_DMA | GFP_NOWAIT);
369 + if (!edesc)
370 + return ERR_PTR(-ENOMEM);
371 +
372 + dma_async_tx_descriptor_init(&edesc->async_tx, chan);
373 + edesc->async_tx.tx_submit = caam_dma_tx_submit;
374 + edesc->async_tx.flags = flags;
375 + edesc->async_tx.cookie = -EBUSY;
376 +
377 + edesc->src_dma = src;
378 + edesc->src_len = len;
379 + edesc->dst_dma = dst;
380 + edesc->dst_len = len;
381 + edesc->ctx = ctx;
382 +
383 + caam_dma_memcpy_init_job_desc(edesc);
384 +
385 + return &edesc->async_tx;
386 +}
387 +
388 +/* This function can be called in an interrupt context */
389 +static void caam_dma_issue_pending(struct dma_chan *chan)
390 +{
391 + struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
392 + chan);
393 + struct caam_dma_edesc *edesc, *_edesc;
394 +
395 + spin_lock_bh(&ctx->edesc_lock);
396 + list_for_each_entry_safe(edesc, _edesc, &ctx->pending_q, node) {
397 + if (caam_jr_enqueue(ctx->jrdev, edesc->jd,
398 + caam_dma_done, edesc) < 0)
399 + break;
400 + list_del(&edesc->node);
401 + }
402 + spin_unlock_bh(&ctx->edesc_lock);
403 +}
404 +
405 +static void caam_dma_free_chan_resources(struct dma_chan *chan)
406 +{
407 + struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
408 + chan);
409 + struct caam_dma_edesc *edesc, *_edesc;
410 +
411 + spin_lock_bh(&ctx->edesc_lock);
412 + list_for_each_entry_safe(edesc, _edesc, &ctx->pending_q, node) {
413 + list_del(&edesc->node);
414 + kfree(edesc);
415 + }
416 + list_for_each_entry_safe(edesc, _edesc, &ctx->done_not_acked, node) {
417 + list_del(&edesc->node);
418 + kfree(edesc);
419 + }
420 + spin_unlock_bh(&ctx->edesc_lock);
421 +}
422 +
423 +static int caam_dma_jr_chan_bind(void)
424 +{
425 + struct device *jrdev;
426 + struct caam_dma_ctx *ctx;
427 + int bonds = 0;
428 + int i;
429 +
430 + for (i = 0; i < caam_jr_driver_probed(); i++) {
431 + jrdev = caam_jridx_alloc(i);
432 + if (IS_ERR(jrdev)) {
433 + pr_err("job ring device %d allocation failed\n", i);
434 + continue;
435 + }
436 +
437 + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
438 + if (!ctx) {
439 + caam_jr_free(jrdev);
440 + continue;
441 + }
442 +
443 + ctx->chan.device = dma_dev;
444 + ctx->chan.private = ctx;
445 +
446 + ctx->jrdev = jrdev;
447 +
448 + INIT_LIST_HEAD(&ctx->pending_q);
449 + INIT_LIST_HEAD(&ctx->done_not_acked);
450 + INIT_LIST_HEAD(&ctx->node);
451 + spin_lock_init(&ctx->edesc_lock);
452 +
453 + dma_cookie_init(&ctx->chan);
454 +
455 + /* add the context of this channel to the context list */
456 + list_add_tail(&ctx->node, &dma_ctx_list);
457 +
458 + /* add this channel to the device chan list */
459 + list_add_tail(&ctx->chan.device_node, &dma_dev->channels);
460 +
461 + bonds++;
462 + }
463 +
464 + return bonds;
465 +}
466 +
467 +static inline void caam_jr_dma_free(struct dma_chan *chan)
468 +{
469 + struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
470 + chan);
471 +
472 + list_del(&ctx->node);
473 + list_del(&chan->device_node);
474 + caam_jr_free(ctx->jrdev);
475 + kfree(ctx);
476 +}
477 +
478 +static void set_caam_dma_desc(u32 *desc)
479 +{
480 + u32 *jmp_cmd;
481 +
482 + /* dma shared descriptor */
483 + init_sh_desc(desc, HDR_SHARE_NEVER | (1 << HDR_START_IDX_SHIFT));
484 +
485 + /* REG1 = CAAM_DMA_CHUNK_SIZE */
486 + append_math_add_imm_u32(desc, REG1, ZERO, IMM, CAAM_DMA_CHUNK_SIZE);
487 +
488 + /* REG0 = SEQINLEN - CAAM_DMA_CHUNK_SIZE */
489 + append_math_sub_imm_u32(desc, REG0, SEQINLEN, IMM, CAAM_DMA_CHUNK_SIZE);
490 +
491 + /*
492 + * if (REG0 > 0)
493 + * jmp to LABEL1
494 + */
495 + jmp_cmd = append_jump(desc, JUMP_TEST_INVALL | JUMP_COND_MATH_N |
496 + JUMP_COND_MATH_Z);
497 +
498 + /* REG1 = SEQINLEN */
499 + append_math_sub(desc, REG1, SEQINLEN, ZERO, CAAM_CMD_SZ);
500 +
501 + /* LABEL1 */
502 + set_jump_tgt_here(desc, jmp_cmd);
503 +
504 + /* VARSEQINLEN = REG1 */
505 + append_math_add(desc, VARSEQINLEN, REG1, ZERO, CAAM_CMD_SZ);
506 +
507 + /* VARSEQOUTLEN = REG1 */
508 + append_math_add(desc, VARSEQOUTLEN, REG1, ZERO, CAAM_CMD_SZ);
509 +
510 + /* do FIFO STORE */
511 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_METADATA | LDST_VLF);
512 +
513 + /* do FIFO LOAD */
514 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
515 + FIFOLD_TYPE_IFIFO | LDST_VLF);
516 +
517 + /*
518 + * if (REG0 > 0)
519 + * jmp 0xF8 (after shared desc header)
520 + */
521 + append_jump(desc, JUMP_TEST_INVALL | JUMP_COND_MATH_N |
522 + JUMP_COND_MATH_Z | 0xF8);
523 +
524 + print_hex_dump_debug("caam dma shdesc@" __stringify(__LINE__) ": ",
525 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
526 + 1);
527 +}
528 +
529 +static int __init caam_dma_probe(struct platform_device *pdev)
530 +{
531 + struct device *dev = &pdev->dev;
532 + struct device *ctrldev = dev->parent;
533 + struct dma_chan *chan, *_chan;
534 + u32 *sh_desc;
535 + int err = -ENOMEM;
536 + int bonds;
537 +
538 + if (!caam_jr_driver_probed()) {
539 + dev_info(dev, "Defer probing after JR driver probing\n");
540 + return -EPROBE_DEFER;
541 + }
542 +
543 + dma_dev = kzalloc(sizeof(*dma_dev), GFP_KERNEL);
544 + if (!dma_dev)
545 + return -ENOMEM;
546 +
547 + dma_sh_desc = kzalloc(sizeof(*dma_sh_desc), GFP_KERNEL | GFP_DMA);
548 + if (!dma_sh_desc)
549 + goto desc_err;
550 +
551 + sh_desc = dma_sh_desc->desc;
552 + set_caam_dma_desc(sh_desc);
553 + dma_sh_desc->desc_dma = dma_map_single(ctrldev, sh_desc,
554 + desc_bytes(sh_desc),
555 + DMA_TO_DEVICE);
556 + if (dma_mapping_error(ctrldev, dma_sh_desc->desc_dma)) {
557 + dev_err(dev, "unable to map dma descriptor\n");
558 + goto map_err;
559 + }
560 +
561 + INIT_LIST_HEAD(&dma_dev->channels);
562 +
563 + bonds = caam_dma_jr_chan_bind();
564 + if (!bonds) {
565 + err = -ENODEV;
566 + goto jr_bind_err;
567 + }
568 +
569 + dma_dev->dev = dev;
570 + dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
571 + dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
572 + dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
573 + dma_dev->device_tx_status = dma_cookie_status;
574 + dma_dev->device_issue_pending = caam_dma_issue_pending;
575 + dma_dev->device_prep_dma_memcpy = caam_dma_prep_memcpy;
576 + dma_dev->device_free_chan_resources = caam_dma_free_chan_resources;
577 +
578 + err = dma_async_device_register(dma_dev);
579 + if (err) {
580 + dev_err(dev, "Failed to register CAAM DMA engine\n");
581 + goto jr_bind_err;
582 + }
583 +
584 + dev_info(dev, "caam dma support with %d job rings\n", bonds);
585 +
586 + return err;
587 +
588 +jr_bind_err:
589 + list_for_each_entry_safe(chan, _chan, &dma_dev->channels, device_node)
590 + caam_jr_dma_free(chan);
591 +
592 + dma_unmap_single(ctrldev, dma_sh_desc->desc_dma, desc_bytes(sh_desc),
593 + DMA_TO_DEVICE);
594 +map_err:
595 + kfree(dma_sh_desc);
596 +desc_err:
597 + kfree(dma_dev);
598 + return err;
599 +}
600 +
601 +static int caam_dma_remove(struct platform_device *pdev)
602 +{
603 + struct device *dev = &pdev->dev;
604 + struct device *ctrldev = dev->parent;
605 + struct caam_dma_ctx *ctx, *_ctx;
606 +
607 + dma_async_device_unregister(dma_dev);
608 +
609 + list_for_each_entry_safe(ctx, _ctx, &dma_ctx_list, node) {
610 + list_del(&ctx->node);
611 + caam_jr_free(ctx->jrdev);
612 + kfree(ctx);
613 + }
614 +
615 + dma_unmap_single(ctrldev, dma_sh_desc->desc_dma,
616 + desc_bytes(dma_sh_desc->desc), DMA_TO_DEVICE);
617 +
618 + kfree(dma_sh_desc);
619 + kfree(dma_dev);
620 +
621 + dev_info(dev, "caam dma support disabled\n");
622 + return 0;
623 +}
624 +
625 +static struct platform_driver caam_dma_driver = {
626 + .driver = {
627 + .name = "caam-dma",
628 + },
629 + .probe = caam_dma_probe,
630 + .remove = caam_dma_remove,
631 +};
632 +module_platform_driver(caam_dma_driver);
633 +
634 +MODULE_LICENSE("Dual BSD/GPL");
635 +MODULE_DESCRIPTION("NXP CAAM support for DMA engine");
636 +MODULE_AUTHOR("NXP Semiconductors");
637 +MODULE_ALIAS("platform:caam-dma");
638 --- /dev/null
639 +++ b/drivers/dma/dpaa2-qdma/Kconfig
640 @@ -0,0 +1,8 @@
641 +menuconfig FSL_DPAA2_QDMA
642 + tristate "NXP DPAA2 QDMA"
643 + depends on FSL_MC_BUS && FSL_MC_DPIO
644 + select DMA_ENGINE
645 + select DMA_VIRTUAL_CHANNELS
646 + ---help---
647 + NXP Data Path Acceleration Architecture 2 QDMA driver,
648 + using the NXP MC bus driver.
649 --- /dev/null
650 +++ b/drivers/dma/dpaa2-qdma/Makefile
651 @@ -0,0 +1,8 @@
652 +#
653 +# Makefile for the NXP DPAA2 CAAM controllers
654 +#
655 +ccflags-y += -DVERSION=\"\"
656 +
657 +obj-$(CONFIG_FSL_DPAA2_QDMA) += fsl-dpaa2-qdma.o
658 +
659 +fsl-dpaa2-qdma-objs := dpaa2-qdma.o dpdmai.o
660 --- /dev/null
661 +++ b/drivers/dma/dpaa2-qdma/dpaa2-qdma.c
662 @@ -0,0 +1,940 @@
663 +/*
664 + * drivers/dma/dpaa2-qdma/dpaa2-qdma.c
665 + *
666 + * Copyright 2015-2017 NXP Semiconductor, Inc.
667 + * Author: Changming Huang <jerry.huang@nxp.com>
668 + *
669 + * Driver for the NXP QDMA engine with QMan mode.
670 + * Channel virtualization is supported through enqueuing of DMA jobs to,
671 + * or dequeuing DMA jobs from different work queues with QMan portal.
672 + * This module can be found on NXP LS2 SoCs.
673 + *
674 + * This program is free software; you can redistribute it and/or modify it
675 + * under the terms of the GNU General Public License as published by the
676 + * Free Software Foundation; either version 2 of the License, or (at your
677 + * option) any later version.
678 + */
679 +
680 +#include <linux/init.h>
681 +#include <linux/module.h>
682 +#include <linux/interrupt.h>
683 +#include <linux/clk.h>
684 +#include <linux/dma-mapping.h>
685 +#include <linux/dmapool.h>
686 +#include <linux/slab.h>
687 +#include <linux/spinlock.h>
688 +#include <linux/of.h>
689 +#include <linux/of_device.h>
690 +#include <linux/of_address.h>
691 +#include <linux/of_irq.h>
692 +#include <linux/of_dma.h>
693 +#include <linux/types.h>
694 +#include <linux/delay.h>
695 +#include <linux/iommu.h>
696 +
697 +#include "../virt-dma.h"
698 +
699 +#include <linux/fsl/mc.h>
700 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
701 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
702 +#include "fsl_dpdmai_cmd.h"
703 +#include "fsl_dpdmai.h"
704 +#include "dpaa2-qdma.h"
705 +
706 +static bool smmu_disable = true;
707 +
708 +static struct dpaa2_qdma_chan *to_dpaa2_qdma_chan(struct dma_chan *chan)
709 +{
710 + return container_of(chan, struct dpaa2_qdma_chan, vchan.chan);
711 +}
712 +
713 +static struct dpaa2_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
714 +{
715 + return container_of(vd, struct dpaa2_qdma_comp, vdesc);
716 +}
717 +
718 +static int dpaa2_qdma_alloc_chan_resources(struct dma_chan *chan)
719 +{
720 + return 0;
721 +}
722 +
723 +static void dpaa2_qdma_free_chan_resources(struct dma_chan *chan)
724 +{
725 + struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
726 + unsigned long flags;
727 + LIST_HEAD(head);
728 +
729 + spin_lock_irqsave(&dpaa2_chan->vchan.lock, flags);
730 + vchan_get_all_descriptors(&dpaa2_chan->vchan, &head);
731 + spin_unlock_irqrestore(&dpaa2_chan->vchan.lock, flags);
732 +
733 + vchan_dma_desc_free_list(&dpaa2_chan->vchan, &head);
734 +}
735 +
736 +/*
737 + * Request a command descriptor for enqueue.
738 + */
739 +static struct dpaa2_qdma_comp *
740 +dpaa2_qdma_request_desc(struct dpaa2_qdma_chan *dpaa2_chan)
741 +{
742 + struct dpaa2_qdma_comp *comp_temp = NULL;
743 + unsigned long flags;
744 +
745 + spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
746 + if (list_empty(&dpaa2_chan->comp_free)) {
747 + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
748 + comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
749 + if (!comp_temp)
750 + goto err;
751 + comp_temp->fd_virt_addr = dma_pool_alloc(dpaa2_chan->fd_pool,
752 + GFP_NOWAIT, &comp_temp->fd_bus_addr);
753 + if (!comp_temp->fd_virt_addr)
754 + goto err;
755 +
756 + comp_temp->fl_virt_addr =
757 + (void *)((struct dpaa2_fd *)
758 + comp_temp->fd_virt_addr + 1);
759 + comp_temp->fl_bus_addr = comp_temp->fd_bus_addr +
760 + sizeof(struct dpaa2_fd);
761 + comp_temp->desc_virt_addr =
762 + (void *)((struct dpaa2_fl_entry *)
763 + comp_temp->fl_virt_addr + 3);
764 + comp_temp->desc_bus_addr = comp_temp->fl_bus_addr +
765 + sizeof(struct dpaa2_fl_entry) * 3;
766 +
767 + comp_temp->qchan = dpaa2_chan;
768 + comp_temp->sg_blk_num = 0;
769 + INIT_LIST_HEAD(&comp_temp->sg_src_head);
770 + INIT_LIST_HEAD(&comp_temp->sg_dst_head);
771 + return comp_temp;
772 + }
773 + comp_temp = list_first_entry(&dpaa2_chan->comp_free,
774 + struct dpaa2_qdma_comp, list);
775 + list_del(&comp_temp->list);
776 + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
777 +
778 + comp_temp->qchan = dpaa2_chan;
779 +err:
780 + return comp_temp;
781 +}
782 +
783 +static void dpaa2_qdma_populate_fd(uint32_t format,
784 + struct dpaa2_qdma_comp *dpaa2_comp)
785 +{
786 + struct dpaa2_fd *fd;
787 +
788 + fd = (struct dpaa2_fd *)dpaa2_comp->fd_virt_addr;
789 + memset(fd, 0, sizeof(struct dpaa2_fd));
790 +
791 + /* fd populated */
792 + dpaa2_fd_set_addr(fd, dpaa2_comp->fl_bus_addr);
793 + /* Bypass memory translation, Frame list format, short length disable */
794 + /* we need to disable BMT if fsl-mc use iova addr */
795 + if (smmu_disable)
796 + dpaa2_fd_set_bpid(fd, QMAN_FD_BMT_ENABLE);
797 + dpaa2_fd_set_format(fd, QMAN_FD_FMT_ENABLE | QMAN_FD_SL_DISABLE);
798 +
799 + dpaa2_fd_set_frc(fd, format | QDMA_SER_CTX);
800 +}
801 +
802 +/* first frame list for descriptor buffer */
803 +static void dpaa2_qdma_populate_first_framel(
804 + struct dpaa2_fl_entry *f_list,
805 + struct dpaa2_qdma_comp *dpaa2_comp)
806 +{
807 + struct dpaa2_qdma_sd_d *sdd;
808 +
809 + sdd = (struct dpaa2_qdma_sd_d *)dpaa2_comp->desc_virt_addr;
810 + memset(sdd, 0, 2 * (sizeof(*sdd)));
811 + /* source and destination descriptor */
812 + sdd->cmd = cpu_to_le32(QDMA_SD_CMD_RDTTYPE_COHERENT); /* source descriptor CMD */
813 + sdd++;
814 + sdd->cmd = cpu_to_le32(QDMA_DD_CMD_WRTTYPE_COHERENT); /* dest descriptor CMD */
815 +
816 + memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
817 + /* first frame list to source descriptor */
818 +
819 + dpaa2_fl_set_addr(f_list, dpaa2_comp->desc_bus_addr);
820 + dpaa2_fl_set_len(f_list, 0x20);
821 + dpaa2_fl_set_format(f_list, QDMA_FL_FMT_SBF | QDMA_FL_SL_LONG);
822 +
823 + if (smmu_disable)
824 + f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE); /* bypass memory translation */
825 +}
826 +
827 +/* source and destination frame list */
828 +static void dpaa2_qdma_populate_frames(struct dpaa2_fl_entry *f_list,
829 + dma_addr_t dst, dma_addr_t src, size_t len, uint8_t fmt)
830 +{
831 + /* source frame list to source buffer */
832 + memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
833 +
834 +
835 + dpaa2_fl_set_addr(f_list, src);
836 + dpaa2_fl_set_len(f_list, len);
837 + dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG)); /* single buffer frame or scatter gather frame */
838 + if (smmu_disable)
839 + f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE); /* bypass memory translation */
840 +
841 + f_list++;
842 + /* destination frame list to destination buffer */
843 + memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
844 +
845 + dpaa2_fl_set_addr(f_list, dst);
846 + dpaa2_fl_set_len(f_list, len);
847 + dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG));
848 + dpaa2_fl_set_final(f_list, QDMA_FL_F); /* single buffer frame or scatter gather frame */
849 + if (smmu_disable)
850 + f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE); /* bypass memory translation */
851 +}
852 +
853 +static struct dma_async_tx_descriptor *dpaa2_qdma_prep_memcpy(
854 + struct dma_chan *chan, dma_addr_t dst,
855 + dma_addr_t src, size_t len, unsigned long flags)
856 +{
857 + struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
858 + struct dpaa2_qdma_comp *dpaa2_comp;
859 + struct dpaa2_fl_entry *f_list;
860 + uint32_t format;
861 +
862 + dpaa2_comp = dpaa2_qdma_request_desc(dpaa2_chan);
863 +
864 +#ifdef LONG_FORMAT
865 + format = QDMA_FD_LONG_FORMAT;
866 +#else
867 + format = QDMA_FD_SHORT_FORMAT;
868 +#endif
869 + /* populate Frame descriptor */
870 + dpaa2_qdma_populate_fd(format, dpaa2_comp);
871 +
872 + f_list = (struct dpaa2_fl_entry *)dpaa2_comp->fl_virt_addr;
873 +
874 +#ifdef LONG_FORMAT
875 + /* first frame list for descriptor buffer (logn format) */
876 + dpaa2_qdma_populate_first_framel(f_list, dpaa2_comp);
877 +
878 + f_list++;
879 +#endif
880 +
881 + dpaa2_qdma_populate_frames(f_list, dst, src, len, QDMA_FL_FMT_SBF);
882 +
883 + return vchan_tx_prep(&dpaa2_chan->vchan, &dpaa2_comp->vdesc, flags);
884 +}
885 +
886 +static struct qdma_sg_blk *dpaa2_qdma_get_sg_blk(
887 + struct dpaa2_qdma_comp *dpaa2_comp,
888 + struct dpaa2_qdma_chan *dpaa2_chan)
889 +{
890 + struct qdma_sg_blk *sg_blk = NULL;
891 + dma_addr_t phy_sgb;
892 + unsigned long flags;
893 +
894 + spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
895 + if (list_empty(&dpaa2_chan->sgb_free)) {
896 + sg_blk = (struct qdma_sg_blk *)dma_pool_alloc(
897 + dpaa2_chan->sg_blk_pool,
898 + GFP_NOWAIT, &phy_sgb);
899 + if (!sg_blk) {
900 + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
901 + return sg_blk;
902 + }
903 + sg_blk->blk_virt_addr = (void *)(sg_blk + 1);
904 + sg_blk->blk_bus_addr = phy_sgb + sizeof(*sg_blk);
905 + } else {
906 + sg_blk = list_first_entry(&dpaa2_chan->sgb_free,
907 + struct qdma_sg_blk, list);
908 + list_del(&sg_blk->list);
909 + }
910 + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
911 +
912 + return sg_blk;
913 +}
914 +
915 +static uint32_t dpaa2_qdma_populate_sg(struct device *dev,
916 + struct dpaa2_qdma_chan *dpaa2_chan,
917 + struct dpaa2_qdma_comp *dpaa2_comp,
918 + struct scatterlist *dst_sg, u32 dst_nents,
919 + struct scatterlist *src_sg, u32 src_nents)
920 +{
921 + struct dpaa2_qdma_sg *src_sge;
922 + struct dpaa2_qdma_sg *dst_sge;
923 + struct qdma_sg_blk *sg_blk;
924 + struct qdma_sg_blk *sg_blk_dst;
925 + dma_addr_t src;
926 + dma_addr_t dst;
927 + uint32_t num;
928 + uint32_t blocks;
929 + uint32_t len = 0;
930 + uint32_t total_len = 0;
931 + int i, j = 0;
932 +
933 + num = min(dst_nents, src_nents);
934 + blocks = num / (NUM_SG_PER_BLK - 1);
935 + if (num % (NUM_SG_PER_BLK - 1))
936 + blocks += 1;
937 + if (dpaa2_comp->sg_blk_num < blocks) {
938 + len = blocks - dpaa2_comp->sg_blk_num;
939 + for (i = 0; i < len; i++) {
940 + /* source sg blocks */
941 + sg_blk = dpaa2_qdma_get_sg_blk(dpaa2_comp, dpaa2_chan);
942 + if (!sg_blk)
943 + return 0;
944 + list_add_tail(&sg_blk->list, &dpaa2_comp->sg_src_head);
945 + /* destination sg blocks */
946 + sg_blk = dpaa2_qdma_get_sg_blk(dpaa2_comp, dpaa2_chan);
947 + if (!sg_blk)
948 + return 0;
949 + list_add_tail(&sg_blk->list, &dpaa2_comp->sg_dst_head);
950 + }
951 + } else {
952 + len = dpaa2_comp->sg_blk_num - blocks;
953 + for (i = 0; i < len; i++) {
954 + spin_lock(&dpaa2_chan->queue_lock);
955 + /* handle source sg blocks */
956 + sg_blk = list_first_entry(&dpaa2_comp->sg_src_head,
957 + struct qdma_sg_blk, list);
958 + list_del(&sg_blk->list);
959 + list_add_tail(&sg_blk->list, &dpaa2_chan->sgb_free);
960 + /* handle destination sg blocks */
961 + sg_blk = list_first_entry(&dpaa2_comp->sg_dst_head,
962 + struct qdma_sg_blk, list);
963 + list_del(&sg_blk->list);
964 + list_add_tail(&sg_blk->list, &dpaa2_chan->sgb_free);
965 + spin_unlock(&dpaa2_chan->queue_lock);
966 + }
967 + }
968 + dpaa2_comp->sg_blk_num = blocks;
969 +
970 + /* get the first source sg phy address */
971 + sg_blk = list_first_entry(&dpaa2_comp->sg_src_head,
972 + struct qdma_sg_blk, list);
973 + dpaa2_comp->sge_src_bus_addr = sg_blk->blk_bus_addr;
974 + /* get the first destinaiton sg phy address */
975 + sg_blk_dst = list_first_entry(&dpaa2_comp->sg_dst_head,
976 + struct qdma_sg_blk, list);
977 + dpaa2_comp->sge_dst_bus_addr = sg_blk_dst->blk_bus_addr;
978 +
979 + for (i = 0; i < blocks; i++) {
980 + src_sge = (struct dpaa2_qdma_sg *)sg_blk->blk_virt_addr;
981 + dst_sge = (struct dpaa2_qdma_sg *)sg_blk_dst->blk_virt_addr;
982 +
983 + for (j = 0; j < (NUM_SG_PER_BLK - 1); j++) {
984 + len = min(sg_dma_len(dst_sg), sg_dma_len(src_sg));
985 + if (0 == len)
986 + goto fetch;
987 + total_len += len;
988 + src = sg_dma_address(src_sg);
989 + dst = sg_dma_address(dst_sg);
990 +
991 + /* source SG */
992 + src_sge->addr_lo = src;
993 + src_sge->addr_hi = (src >> 32);
994 + src_sge->data_len.data_len_sl0 = len;
995 + src_sge->ctrl.sl = QDMA_SG_SL_LONG;
996 + src_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
997 + /* destination SG */
998 + dst_sge->addr_lo = dst;
999 + dst_sge->addr_hi = (dst >> 32);
1000 + dst_sge->data_len.data_len_sl0 = len;
1001 + dst_sge->ctrl.sl = QDMA_SG_SL_LONG;
1002 + dst_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
1003 +fetch:
1004 + num--;
1005 + if (0 == num) {
1006 + src_sge->ctrl.f = QDMA_SG_F;
1007 + dst_sge->ctrl.f = QDMA_SG_F;
1008 + goto end;
1009 + }
1010 + dst_sg = sg_next(dst_sg);
1011 + src_sg = sg_next(src_sg);
1012 + src_sge++;
1013 + dst_sge++;
1014 + if (j == (NUM_SG_PER_BLK - 2)) {
1015 + /* for next blocks, extension */
1016 + sg_blk = list_next_entry(sg_blk, list);
1017 + sg_blk_dst = list_next_entry(sg_blk_dst, list);
1018 + src_sge->addr_lo = sg_blk->blk_bus_addr;
1019 + src_sge->addr_hi = sg_blk->blk_bus_addr >> 32;
1020 + src_sge->ctrl.sl = QDMA_SG_SL_LONG;
1021 + src_sge->ctrl.fmt = QDMA_SG_FMT_SGTE;
1022 + dst_sge->addr_lo = sg_blk_dst->blk_bus_addr;
1023 + dst_sge->addr_hi =
1024 + sg_blk_dst->blk_bus_addr >> 32;
1025 + dst_sge->ctrl.sl = QDMA_SG_SL_LONG;
1026 + dst_sge->ctrl.fmt = QDMA_SG_FMT_SGTE;
1027 + }
1028 + }
1029 + }
1030 +
1031 +end:
1032 + return total_len;
1033 +}
1034 +
1035 +static enum dma_status dpaa2_qdma_tx_status(struct dma_chan *chan,
1036 + dma_cookie_t cookie, struct dma_tx_state *txstate)
1037 +{
1038 + return dma_cookie_status(chan, cookie, txstate);
1039 +}
1040 +
1041 +static void dpaa2_qdma_free_desc(struct virt_dma_desc *vdesc)
1042 +{
1043 +}
1044 +
1045 +static void dpaa2_qdma_issue_pending(struct dma_chan *chan)
1046 +{
1047 + struct dpaa2_qdma_comp *dpaa2_comp;
1048 + struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
1049 + struct dpaa2_qdma_engine *dpaa2_qdma = dpaa2_chan->qdma;
1050 + struct dpaa2_qdma_priv *priv = dpaa2_qdma->priv;
1051 + struct virt_dma_desc *vdesc;
1052 + struct dpaa2_fd *fd;
1053 + int err;
1054 + unsigned long flags;
1055 +
1056 + spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
1057 + spin_lock(&dpaa2_chan->vchan.lock);
1058 + if (vchan_issue_pending(&dpaa2_chan->vchan)) {
1059 + vdesc = vchan_next_desc(&dpaa2_chan->vchan);
1060 + if (!vdesc)
1061 + goto err_enqueue;
1062 + dpaa2_comp = to_fsl_qdma_comp(vdesc);
1063 +
1064 + fd = (struct dpaa2_fd *)dpaa2_comp->fd_virt_addr;
1065 +
1066 + list_del(&vdesc->node);
1067 + list_add_tail(&dpaa2_comp->list, &dpaa2_chan->comp_used);
1068 +
1069 + /* TOBO: priority hard-coded to zero */
1070 + err = dpaa2_io_service_enqueue_fq(NULL,
1071 + priv->tx_queue_attr[0].fqid, fd);
1072 + if (err) {
1073 + list_del(&dpaa2_comp->list);
1074 + list_add_tail(&dpaa2_comp->list,
1075 + &dpaa2_chan->comp_free);
1076 + }
1077 +
1078 + }
1079 +err_enqueue:
1080 + spin_unlock(&dpaa2_chan->vchan.lock);
1081 + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
1082 +}
1083 +
1084 +static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
1085 +{
1086 + struct device *dev = &ls_dev->dev;
1087 + struct dpaa2_qdma_priv *priv;
1088 + struct dpaa2_qdma_priv_per_prio *ppriv;
1089 + uint8_t prio_def = DPDMAI_PRIO_NUM;
1090 + int err;
1091 + int i;
1092 +
1093 + priv = dev_get_drvdata(dev);
1094 +
1095 + priv->dev = dev;
1096 + priv->dpqdma_id = ls_dev->obj_desc.id;
1097 +
1098 + /*Get the handle for the DPDMAI this interface is associate with */
1099 + err = dpdmai_open(priv->mc_io, 0, priv->dpqdma_id, &ls_dev->mc_handle);
1100 + if (err) {
1101 + dev_err(dev, "dpdmai_open() failed\n");
1102 + return err;
1103 + }
1104 + dev_info(dev, "Opened dpdmai object successfully\n");
1105 +
1106 + err = dpdmai_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
1107 + &priv->dpdmai_attr);
1108 + if (err) {
1109 + dev_err(dev, "dpdmai_get_attributes() failed\n");
1110 + return err;
1111 + }
1112 +
1113 + if (priv->dpdmai_attr.version.major > DPDMAI_VER_MAJOR) {
1114 + dev_err(dev, "DPDMAI major version mismatch\n"
1115 + "Found %u.%u, supported version is %u.%u\n",
1116 + priv->dpdmai_attr.version.major,
1117 + priv->dpdmai_attr.version.minor,
1118 + DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
1119 + }
1120 +
1121 + if (priv->dpdmai_attr.version.minor > DPDMAI_VER_MINOR) {
1122 + dev_err(dev, "DPDMAI minor version mismatch\n"
1123 + "Found %u.%u, supported version is %u.%u\n",
1124 + priv->dpdmai_attr.version.major,
1125 + priv->dpdmai_attr.version.minor,
1126 + DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
1127 + }
1128 +
1129 + priv->num_pairs = min(priv->dpdmai_attr.num_of_priorities, prio_def);
1130 + ppriv = kcalloc(priv->num_pairs, sizeof(*ppriv), GFP_KERNEL);
1131 + if (!ppriv) {
1132 + dev_err(dev, "kzalloc for ppriv failed\n");
1133 + return -1;
1134 + }
1135 + priv->ppriv = ppriv;
1136 +
1137 + for (i = 0; i < priv->num_pairs; i++) {
1138 + err = dpdmai_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
1139 + i, &priv->rx_queue_attr[i]);
1140 + if (err) {
1141 + dev_err(dev, "dpdmai_get_rx_queue() failed\n");
1142 + return err;
1143 + }
1144 + ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
1145 +
1146 + err = dpdmai_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle,
1147 + i, &priv->tx_queue_attr[i]);
1148 + if (err) {
1149 + dev_err(dev, "dpdmai_get_tx_queue() failed\n");
1150 + return err;
1151 + }
1152 + ppriv->req_fqid = priv->tx_queue_attr[i].fqid;
1153 + ppriv->prio = i;
1154 + ppriv->priv = priv;
1155 + ppriv++;
1156 + }
1157 +
1158 + return 0;
1159 +}
1160 +
1161 +static void dpaa2_qdma_fqdan_cb(struct dpaa2_io_notification_ctx *ctx)
1162 +{
1163 + struct dpaa2_qdma_priv_per_prio *ppriv = container_of(ctx,
1164 + struct dpaa2_qdma_priv_per_prio, nctx);
1165 + struct dpaa2_qdma_priv *priv = ppriv->priv;
1166 + struct dpaa2_qdma_comp *dpaa2_comp, *_comp_tmp;
1167 + struct dpaa2_qdma_chan *qchan;
1168 + const struct dpaa2_fd *fd;
1169 + const struct dpaa2_fd *fd_eq;
1170 + struct dpaa2_dq *dq;
1171 + int err;
1172 + int is_last = 0;
1173 + uint8_t status;
1174 + int i;
1175 + int found;
1176 + uint32_t n_chans = priv->dpaa2_qdma->n_chans;
1177 +
1178 + do {
1179 + err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
1180 + ppriv->store);
1181 + } while (err);
1182 +
1183 + while (!is_last) {
1184 + do {
1185 + dq = dpaa2_io_store_next(ppriv->store, &is_last);
1186 + } while (!is_last && !dq);
1187 + if (!dq) {
1188 + dev_err(priv->dev, "FQID returned no valid frames!\n");
1189 + continue;
1190 + }
1191 +
1192 + /* obtain FD and process the error */
1193 + fd = dpaa2_dq_fd(dq);
1194 +
1195 + status = dpaa2_fd_get_ctrl(fd) & 0xff;
1196 + if (status)
1197 + dev_err(priv->dev, "FD error occurred\n");
1198 + found = 0;
1199 + for (i = 0; i < n_chans; i++) {
1200 + qchan = &priv->dpaa2_qdma->chans[i];
1201 + spin_lock(&qchan->queue_lock);
1202 + if (list_empty(&qchan->comp_used)) {
1203 + spin_unlock(&qchan->queue_lock);
1204 + continue;
1205 + }
1206 + list_for_each_entry_safe(dpaa2_comp, _comp_tmp,
1207 + &qchan->comp_used, list) {
1208 + fd_eq = (struct dpaa2_fd *)
1209 + dpaa2_comp->fd_virt_addr;
1210 +
1211 + if (le64_to_cpu(fd_eq->simple.addr) ==
1212 + le64_to_cpu(fd->simple.addr)) {
1213 +
1214 + list_del(&dpaa2_comp->list);
1215 + list_add_tail(&dpaa2_comp->list,
1216 + &qchan->comp_free);
1217 +
1218 + spin_lock(&qchan->vchan.lock);
1219 + vchan_cookie_complete(
1220 + &dpaa2_comp->vdesc);
1221 + spin_unlock(&qchan->vchan.lock);
1222 + found = 1;
1223 + break;
1224 + }
1225 + }
1226 + spin_unlock(&qchan->queue_lock);
1227 + if (found)
1228 + break;
1229 + }
1230 + }
1231 +
1232 + dpaa2_io_service_rearm(NULL, ctx);
1233 +}
1234 +
1235 +static int __cold dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv *priv)
1236 +{
1237 + int err, i, num;
1238 + struct device *dev = priv->dev;
1239 + struct dpaa2_qdma_priv_per_prio *ppriv;
1240 +
1241 + num = priv->num_pairs;
1242 + ppriv = priv->ppriv;
1243 + for (i = 0; i < num; i++) {
1244 + ppriv->nctx.is_cdan = 0;
1245 + ppriv->nctx.desired_cpu = 1;
1246 + ppriv->nctx.id = ppriv->rsp_fqid;
1247 + ppriv->nctx.cb = dpaa2_qdma_fqdan_cb;
1248 + err = dpaa2_io_service_register(NULL, &ppriv->nctx);
1249 + if (err) {
1250 + dev_err(dev, "Notification register failed\n");
1251 + goto err_service;
1252 + }
1253 +
1254 + ppriv->store = dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE,
1255 + dev);
1256 + if (!ppriv->store) {
1257 + dev_err(dev, "dpaa2_io_store_create() failed\n");
1258 + goto err_store;
1259 + }
1260 +
1261 + ppriv++;
1262 + }
1263 + return 0;
1264 +
1265 +err_store:
1266 + dpaa2_io_service_deregister(NULL, &ppriv->nctx);
1267 +err_service:
1268 + ppriv--;
1269 + while (ppriv >= priv->ppriv) {
1270 + dpaa2_io_service_deregister(NULL, &ppriv->nctx);
1271 + dpaa2_io_store_destroy(ppriv->store);
1272 + ppriv--;
1273 + }
1274 + return -1;
1275 +}
1276 +
1277 +static void __cold dpaa2_dpmai_store_free(struct dpaa2_qdma_priv *priv)
1278 +{
1279 + struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
1280 + int i;
1281 +
1282 + for (i = 0; i < priv->num_pairs; i++) {
1283 + dpaa2_io_store_destroy(ppriv->store);
1284 + ppriv++;
1285 + }
1286 +}
1287 +
1288 +static void __cold dpaa2_dpdmai_dpio_free(struct dpaa2_qdma_priv *priv)
1289 +{
1290 + struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
1291 + int i;
1292 +
1293 + for (i = 0; i < priv->num_pairs; i++) {
1294 + dpaa2_io_service_deregister(NULL, &ppriv->nctx);
1295 + ppriv++;
1296 + }
1297 +}
1298 +
1299 +static int __cold dpaa2_dpdmai_bind(struct dpaa2_qdma_priv *priv)
1300 +{
1301 + int err;
1302 + struct dpdmai_rx_queue_cfg rx_queue_cfg;
1303 + struct device *dev = priv->dev;
1304 + struct dpaa2_qdma_priv_per_prio *ppriv;
1305 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
1306 + int i, num;
1307 +
1308 + num = priv->num_pairs;
1309 + ppriv = priv->ppriv;
1310 + for (i = 0; i < num; i++) {
1311 + rx_queue_cfg.options = DPDMAI_QUEUE_OPT_USER_CTX |
1312 + DPDMAI_QUEUE_OPT_DEST;
1313 + rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
1314 + rx_queue_cfg.dest_cfg.dest_type = DPDMAI_DEST_DPIO;
1315 + rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
1316 + rx_queue_cfg.dest_cfg.priority = ppriv->prio;
1317 + err = dpdmai_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
1318 + rx_queue_cfg.dest_cfg.priority, &rx_queue_cfg);
1319 + if (err) {
1320 + dev_err(dev, "dpdmai_set_rx_queue() failed\n");
1321 + return err;
1322 + }
1323 +
1324 + ppriv++;
1325 + }
1326 +
1327 + return 0;
1328 +}
1329 +
1330 +static int __cold dpaa2_dpdmai_dpio_unbind(struct dpaa2_qdma_priv *priv)
1331 +{
1332 + int err = 0;
1333 + struct device *dev = priv->dev;
1334 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
1335 + struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
1336 + int i;
1337 +
1338 + for (i = 0; i < priv->num_pairs; i++) {
1339 + ppriv->nctx.qman64 = 0;
1340 + ppriv->nctx.dpio_id = 0;
1341 + ppriv++;
1342 + }
1343 +
1344 + err = dpdmai_reset(priv->mc_io, 0, ls_dev->mc_handle);
1345 + if (err)
1346 + dev_err(dev, "dpdmai_reset() failed\n");
1347 +
1348 + return err;
1349 +}
1350 +
1351 +static void __cold dpaa2_dpdmai_free_pool(struct dpaa2_qdma_chan *qchan,
1352 + struct list_head *head)
1353 +{
1354 + struct qdma_sg_blk *sgb_tmp, *_sgb_tmp;
1355 + /* free the QDMA SG pool block */
1356 + list_for_each_entry_safe(sgb_tmp, _sgb_tmp, head, list) {
1357 + sgb_tmp->blk_virt_addr = (void *)((struct qdma_sg_blk *)
1358 + sgb_tmp->blk_virt_addr - 1);
1359 + sgb_tmp->blk_bus_addr = sgb_tmp->blk_bus_addr
1360 + - sizeof(*sgb_tmp);
1361 + dma_pool_free(qchan->sg_blk_pool, sgb_tmp->blk_virt_addr,
1362 + sgb_tmp->blk_bus_addr);
1363 + }
1364 +
1365 +}
1366 +
1367 +static void __cold dpaa2_dpdmai_free_comp(struct dpaa2_qdma_chan *qchan,
1368 + struct list_head *head)
1369 +{
1370 + struct dpaa2_qdma_comp *comp_tmp, *_comp_tmp;
1371 + /* free the QDMA comp resource */
1372 + list_for_each_entry_safe(comp_tmp, _comp_tmp,
1373 + head, list) {
1374 + dma_pool_free(qchan->fd_pool,
1375 + comp_tmp->fd_virt_addr,
1376 + comp_tmp->fd_bus_addr);
1377 + /* free the SG source block on comp */
1378 + dpaa2_dpdmai_free_pool(qchan, &comp_tmp->sg_src_head);
1379 + /* free the SG destination block on comp */
1380 + dpaa2_dpdmai_free_pool(qchan, &comp_tmp->sg_dst_head);
1381 + list_del(&comp_tmp->list);
1382 + kfree(comp_tmp);
1383 + }
1384 +
1385 +}
1386 +
1387 +static void __cold dpaa2_dpdmai_free_channels(
1388 + struct dpaa2_qdma_engine *dpaa2_qdma)
1389 +{
1390 + struct dpaa2_qdma_chan *qchan;
1391 + int num, i;
1392 +
1393 + num = dpaa2_qdma->n_chans;
1394 + for (i = 0; i < num; i++) {
1395 + qchan = &dpaa2_qdma->chans[i];
1396 + dpaa2_dpdmai_free_comp(qchan, &qchan->comp_used);
1397 + dpaa2_dpdmai_free_comp(qchan, &qchan->comp_free);
1398 + dpaa2_dpdmai_free_pool(qchan, &qchan->sgb_free);
1399 + dma_pool_destroy(qchan->fd_pool);
1400 + dma_pool_destroy(qchan->sg_blk_pool);
1401 + }
1402 +}
1403 +
1404 +static int dpaa2_dpdmai_alloc_channels(struct dpaa2_qdma_engine *dpaa2_qdma)
1405 +{
1406 + struct dpaa2_qdma_chan *dpaa2_chan;
1407 + struct device *dev = &dpaa2_qdma->priv->dpdmai_dev->dev;
1408 + int i;
1409 +
1410 + INIT_LIST_HEAD(&dpaa2_qdma->dma_dev.channels);
1411 + for (i = 0; i < dpaa2_qdma->n_chans; i++) {
1412 + dpaa2_chan = &dpaa2_qdma->chans[i];
1413 + dpaa2_chan->qdma = dpaa2_qdma;
1414 + dpaa2_chan->vchan.desc_free = dpaa2_qdma_free_desc;
1415 + vchan_init(&dpaa2_chan->vchan, &dpaa2_qdma->dma_dev);
1416 +
1417 + dpaa2_chan->fd_pool = dma_pool_create("fd_pool",
1418 + dev, FD_POOL_SIZE, 32, 0);
1419 + if (!dpaa2_chan->fd_pool)
1420 + return -1;
1421 + dpaa2_chan->sg_blk_pool = dma_pool_create("sg_blk_pool",
1422 + dev, SG_POOL_SIZE, 32, 0);
1423 + if (!dpaa2_chan->sg_blk_pool)
1424 + return -1;
1425 +
1426 + spin_lock_init(&dpaa2_chan->queue_lock);
1427 + INIT_LIST_HEAD(&dpaa2_chan->comp_used);
1428 + INIT_LIST_HEAD(&dpaa2_chan->comp_free);
1429 + INIT_LIST_HEAD(&dpaa2_chan->sgb_free);
1430 + }
1431 + return 0;
1432 +}
1433 +
1434 +static int dpaa2_qdma_probe(struct fsl_mc_device *dpdmai_dev)
1435 +{
1436 + struct dpaa2_qdma_priv *priv;
1437 + struct device *dev = &dpdmai_dev->dev;
1438 + struct dpaa2_qdma_engine *dpaa2_qdma;
1439 + int err;
1440 +
1441 + priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1442 + if (!priv)
1443 + return -ENOMEM;
1444 + dev_set_drvdata(dev, priv);
1445 + priv->dpdmai_dev = dpdmai_dev;
1446 +
1447 + priv->iommu_domain = iommu_get_domain_for_dev(dev);
1448 + if (priv->iommu_domain)
1449 + smmu_disable = false;
1450 +
1451 + /* obtain a MC portal */
1452 + err = fsl_mc_portal_allocate(dpdmai_dev, 0, &priv->mc_io);
1453 + if (err) {
1454 + dev_err(dev, "MC portal allocation failed\n");
1455 + goto err_mcportal;
1456 + }
1457 +
1458 + /* DPDMAI initialization */
1459 + err = dpaa2_qdma_setup(dpdmai_dev);
1460 + if (err) {
1461 + dev_err(dev, "dpaa2_dpdmai_setup() failed\n");
1462 + goto err_dpdmai_setup;
1463 + }
1464 +
1465 + /* DPIO */
1466 + err = dpaa2_qdma_dpio_setup(priv);
1467 + if (err) {
1468 + dev_err(dev, "dpaa2_dpdmai_dpio_setup() failed\n");
1469 + goto err_dpio_setup;
1470 + }
1471 +
1472 + /* DPDMAI binding to DPIO */
1473 + err = dpaa2_dpdmai_bind(priv);
1474 + if (err) {
1475 + dev_err(dev, "dpaa2_dpdmai_bind() failed\n");
1476 + goto err_bind;
1477 + }
1478 +
1479 + /* DPDMAI enable */
1480 + err = dpdmai_enable(priv->mc_io, 0, dpdmai_dev->mc_handle);
1481 + if (err) {
1482 + dev_err(dev, "dpdmai_enable() faile\n");
1483 + goto err_enable;
1484 + }
1485 +
1486 + dpaa2_qdma = kzalloc(sizeof(*dpaa2_qdma), GFP_KERNEL);
1487 + if (!dpaa2_qdma) {
1488 + err = -ENOMEM;
1489 + goto err_eng;
1490 + }
1491 +
1492 + priv->dpaa2_qdma = dpaa2_qdma;
1493 + dpaa2_qdma->priv = priv;
1494 +
1495 + dpaa2_qdma->n_chans = NUM_CH;
1496 +
1497 + err = dpaa2_dpdmai_alloc_channels(dpaa2_qdma);
1498 + if (err) {
1499 + dev_err(dev, "QDMA alloc channels faile\n");
1500 + goto err_reg;
1501 + }
1502 +
1503 + dma_cap_set(DMA_PRIVATE, dpaa2_qdma->dma_dev.cap_mask);
1504 + dma_cap_set(DMA_SLAVE, dpaa2_qdma->dma_dev.cap_mask);
1505 + dma_cap_set(DMA_MEMCPY, dpaa2_qdma->dma_dev.cap_mask);
1506 +
1507 + dpaa2_qdma->dma_dev.dev = dev;
1508 + dpaa2_qdma->dma_dev.device_alloc_chan_resources
1509 + = dpaa2_qdma_alloc_chan_resources;
1510 + dpaa2_qdma->dma_dev.device_free_chan_resources
1511 + = dpaa2_qdma_free_chan_resources;
1512 + dpaa2_qdma->dma_dev.device_tx_status = dpaa2_qdma_tx_status;
1513 + dpaa2_qdma->dma_dev.device_prep_dma_memcpy = dpaa2_qdma_prep_memcpy;
1514 + dpaa2_qdma->dma_dev.device_issue_pending = dpaa2_qdma_issue_pending;
1515 +
1516 + err = dma_async_device_register(&dpaa2_qdma->dma_dev);
1517 + if (err) {
1518 + dev_err(dev, "Can't register NXP QDMA engine.\n");
1519 + goto err_reg;
1520 + }
1521 +
1522 + return 0;
1523 +
1524 +err_reg:
1525 + dpaa2_dpdmai_free_channels(dpaa2_qdma);
1526 + kfree(dpaa2_qdma);
1527 +err_eng:
1528 + dpdmai_disable(priv->mc_io, 0, dpdmai_dev->mc_handle);
1529 +err_enable:
1530 + dpaa2_dpdmai_dpio_unbind(priv);
1531 +err_bind:
1532 + dpaa2_dpmai_store_free(priv);
1533 + dpaa2_dpdmai_dpio_free(priv);
1534 +err_dpio_setup:
1535 + dpdmai_close(priv->mc_io, 0, dpdmai_dev->mc_handle);
1536 +err_dpdmai_setup:
1537 + fsl_mc_portal_free(priv->mc_io);
1538 +err_mcportal:
1539 + kfree(priv->ppriv);
1540 + kfree(priv);
1541 + dev_set_drvdata(dev, NULL);
1542 + return err;
1543 +}
1544 +
1545 +static int dpaa2_qdma_remove(struct fsl_mc_device *ls_dev)
1546 +{
1547 + struct device *dev;
1548 + struct dpaa2_qdma_priv *priv;
1549 + struct dpaa2_qdma_engine *dpaa2_qdma;
1550 +
1551 + dev = &ls_dev->dev;
1552 + priv = dev_get_drvdata(dev);
1553 + dpaa2_qdma = priv->dpaa2_qdma;
1554 +
1555 + dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle);
1556 + dpaa2_dpdmai_dpio_unbind(priv);
1557 + dpaa2_dpmai_store_free(priv);
1558 + dpaa2_dpdmai_dpio_free(priv);
1559 + dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
1560 + fsl_mc_portal_free(priv->mc_io);
1561 + dev_set_drvdata(dev, NULL);
1562 + dpaa2_dpdmai_free_channels(dpaa2_qdma);
1563 +
1564 + dma_async_device_unregister(&dpaa2_qdma->dma_dev);
1565 + kfree(priv);
1566 + kfree(dpaa2_qdma);
1567 +
1568 + return 0;
1569 +}
1570 +
1571 +static const struct fsl_mc_device_id dpaa2_qdma_id_table[] = {
1572 + {
1573 + .vendor = FSL_MC_VENDOR_FREESCALE,
1574 + .obj_type = "dpdmai",
1575 + },
1576 + { .vendor = 0x0 }
1577 +};
1578 +
1579 +static struct fsl_mc_driver dpaa2_qdma_driver = {
1580 + .driver = {
1581 + .name = "dpaa2-qdma",
1582 + .owner = THIS_MODULE,
1583 + },
1584 + .probe = dpaa2_qdma_probe,
1585 + .remove = dpaa2_qdma_remove,
1586 + .match_id_table = dpaa2_qdma_id_table
1587 +};
1588 +
1589 +static int __init dpaa2_qdma_driver_init(void)
1590 +{
1591 + return fsl_mc_driver_register(&(dpaa2_qdma_driver));
1592 +}
1593 +late_initcall(dpaa2_qdma_driver_init);
1594 +
1595 +static void __exit fsl_qdma_exit(void)
1596 +{
1597 + fsl_mc_driver_unregister(&(dpaa2_qdma_driver));
1598 +}
1599 +module_exit(fsl_qdma_exit);
1600 +
1601 +MODULE_DESCRIPTION("NXP DPAA2 qDMA driver");
1602 +MODULE_LICENSE("Dual BSD/GPL");
1603 --- /dev/null
1604 +++ b/drivers/dma/dpaa2-qdma/dpaa2-qdma.h
1605 @@ -0,0 +1,227 @@
1606 +/* Copyright 2015 NXP Semiconductor Inc.
1607 + *
1608 + * Redistribution and use in source and binary forms, with or without
1609 + * modification, are permitted provided that the following conditions are met:
1610 + * * Redistributions of source code must retain the above copyright
1611 + * notice, this list of conditions and the following disclaimer.
1612 + * * Redistributions in binary form must reproduce the above copyright
1613 + * notice, this list of conditions and the following disclaimer in the
1614 + * documentation and/or other materials provided with the distribution.
1615 + * * Neither the name of NXP Semiconductor nor the
1616 + * names of its contributors may be used to endorse or promote products
1617 + * derived from this software without specific prior written permission.
1618 + *
1619 + *
1620 + * ALTERNATIVELY, this software may be distributed under the terms of the
1621 + * GNU General Public License ("GPL") as published by the Free Software
1622 + * Foundation, either version 2 of that License or (at your option) any
1623 + * later version.
1624 + *
1625 + * THIS SOFTWARE IS PROVIDED BY NXP Semiconductor ``AS IS'' AND ANY
1626 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1627 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1628 + * DISCLAIMED. IN NO EVENT SHALL NXP Semiconductor BE LIABLE FOR ANY
1629 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1630 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
1631 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
1632 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1633 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1634 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1635 + */
1636 +
1637 +#ifndef __DPAA2_QDMA_H
1638 +#define __DPAA2_QDMA_H
1639 +
1640 +#define LONG_FORMAT 1
1641 +
1642 +#define DPAA2_QDMA_STORE_SIZE 16
1643 +#define NUM_CH 8
1644 +#define NUM_SG_PER_BLK 16
1645 +
1646 +#define QDMA_DMR_OFFSET 0x0
1647 +#define QDMA_DQ_EN (0 << 30)
1648 +#define QDMA_DQ_DIS (1 << 30)
1649 +
1650 +#define QDMA_DSR_M_OFFSET 0x10004
1651 +
1652 +struct dpaa2_qdma_sd_d {
1653 + uint32_t rsv:32;
1654 + union {
1655 + struct {
1656 + uint32_t ssd:12; /* souce stride distance */
1657 + uint32_t sss:12; /* souce stride size */
1658 + uint32_t rsv1:8;
1659 + } sdf;
1660 + struct {
1661 + uint32_t dsd:12; /* Destination stride distance */
1662 + uint32_t dss:12; /* Destination stride size */
1663 + uint32_t rsv2:8;
1664 + } ddf;
1665 + } df;
1666 + uint32_t rbpcmd; /* Route-by-port command */
1667 + uint32_t cmd;
1668 +} __attribute__((__packed__));
1669 +/* Source descriptor command read transaction type for RBP=0:
1670 + coherent copy of cacheable memory */
1671 +#define QDMA_SD_CMD_RDTTYPE_COHERENT (0xb << 28)
1672 +/* Destination descriptor command write transaction type for RBP=0:
1673 + coherent copy of cacheable memory */
1674 +#define QDMA_DD_CMD_WRTTYPE_COHERENT (0x6 << 28)
1675 +
1676 +#define QDMA_SG_FMT_SDB 0x0 /* single data buffer */
1677 +#define QDMA_SG_FMT_FDS 0x1 /* frame data section */
1678 +#define QDMA_SG_FMT_SGTE 0x2 /* SGT extension */
1679 +#define QDMA_SG_SL_SHORT 0x1 /* short length */
1680 +#define QDMA_SG_SL_LONG 0x0 /* short length */
1681 +#define QDMA_SG_F 0x1 /* last sg entry */
1682 +struct dpaa2_qdma_sg {
1683 + uint32_t addr_lo; /* address 0:31 */
1684 + uint32_t addr_hi:17; /* address 32:48 */
1685 + uint32_t rsv:15;
1686 + union {
1687 + uint32_t data_len_sl0; /* SL=0, the long format */
1688 + struct {
1689 + uint32_t len:17; /* SL=1, the short format */
1690 + uint32_t reserve:3;
1691 + uint32_t sf:1;
1692 + uint32_t sr:1;
1693 + uint32_t size:10; /* buff size */
1694 + } data_len_sl1;
1695 + } data_len; /* AVAIL_LENGTH */
1696 + struct {
1697 + uint32_t bpid:14;
1698 + uint32_t ivp:1;
1699 + uint32_t mbt:1;
1700 + uint32_t offset:12;
1701 + uint32_t fmt:2;
1702 + uint32_t sl:1;
1703 + uint32_t f:1;
1704 + } ctrl;
1705 +} __attribute__((__packed__));
1706 +
1707 +#define QMAN_FD_FMT_ENABLE (1) /* frame list table enable */
1708 +#define QMAN_FD_BMT_ENABLE (1 << 15) /* bypass memory translation */
1709 +#define QMAN_FD_BMT_DISABLE (0 << 15) /* bypass memory translation */
1710 +#define QMAN_FD_SL_DISABLE (0 << 14) /* short lengthe disabled */
1711 +#define QMAN_FD_SL_ENABLE (1 << 14) /* short lengthe enabled */
1712 +
1713 +#define QDMA_SB_FRAME (0 << 28) /* single frame */
1714 +#define QDMA_SG_FRAME (2 << 28) /* scatter gather frames */
1715 +#define QDMA_FINAL_BIT_DISABLE (0 << 31) /* final bit disable */
1716 +#define QDMA_FINAL_BIT_ENABLE (1 << 31) /* final bit enable */
1717 +
1718 +#define QDMA_FD_SHORT_FORMAT (1 << 11) /* short format */
1719 +#define QDMA_FD_LONG_FORMAT (0 << 11) /* long format */
1720 +#define QDMA_SER_DISABLE (0 << 8) /* no notification */
1721 +#define QDMA_SER_CTX (1 << 8) /* notification by FQD_CTX[fqid] */
1722 +#define QDMA_SER_DEST (2 << 8) /* notification by destination desc */
1723 +#define QDMA_SER_BOTH (3 << 8) /* soruce and dest notification */
1724 +#define QDMA_FD_SPF_ENALBE (1 << 30) /* source prefetch enable */
1725 +
1726 +#define QMAN_FD_VA_ENABLE (1 << 14) /* Address used is virtual address */
1727 +#define QMAN_FD_VA_DISABLE (0 << 14)/* Address used is a real address */
1728 +#define QMAN_FD_CBMT_ENABLE (1 << 15) /* Flow Context: 49bit physical address */
1729 +#define QMAN_FD_CBMT_DISABLE (0 << 15) /* Flow Context: 64bit virtual address */
1730 +#define QMAN_FD_SC_DISABLE (0 << 27) /* stashing control */
1731 +
1732 +#define QDMA_FL_FMT_SBF (0x0) /* Single buffer frame */
1733 +#define QDMA_FL_FMT_SGE 0x2 /* Scatter gather frame */
1734 +#define QDMA_FL_BMT_ENABLE (0x1 << 15)/* enable bypass memory translation */
1735 +#define QDMA_FL_BMT_DISABLE 0x0 /* enable bypass memory translation */
1736 +#define QDMA_FL_SL_LONG (0x0 << 2)/* long length */
1737 +#define QDMA_FL_SL_SHORT 0x1 /* short length */
1738 +#define QDMA_FL_F (0x1)/* last frame list bit */
1739 +/*Description of Frame list table structure*/
1740 +
1741 +struct dpaa2_qdma_chan {
1742 + struct virt_dma_chan vchan;
1743 + struct virt_dma_desc vdesc;
1744 + enum dma_status status;
1745 + struct dpaa2_qdma_engine *qdma;
1746 +
1747 + struct mutex dpaa2_queue_mutex;
1748 + spinlock_t queue_lock;
1749 + struct dma_pool *fd_pool;
1750 + struct dma_pool *sg_blk_pool;
1751 +
1752 + struct list_head comp_used;
1753 + struct list_head comp_free;
1754 +
1755 + struct list_head sgb_free;
1756 +};
1757 +
1758 +struct qdma_sg_blk {
1759 + dma_addr_t blk_bus_addr;
1760 + void *blk_virt_addr;
1761 + struct list_head list;
1762 +};
1763 +
1764 +struct dpaa2_qdma_comp {
1765 + dma_addr_t fd_bus_addr;
1766 + dma_addr_t fl_bus_addr;
1767 + dma_addr_t desc_bus_addr;
1768 + dma_addr_t sge_src_bus_addr;
1769 + dma_addr_t sge_dst_bus_addr;
1770 + void *fd_virt_addr;
1771 + void *fl_virt_addr;
1772 + void *desc_virt_addr;
1773 + void *sg_src_virt_addr;
1774 + void *sg_dst_virt_addr;
1775 + struct qdma_sg_blk *sg_blk;
1776 + uint32_t sg_blk_num;
1777 + struct list_head sg_src_head;
1778 + struct list_head sg_dst_head;
1779 + struct dpaa2_qdma_chan *qchan;
1780 + struct virt_dma_desc vdesc;
1781 + struct list_head list;
1782 +};
1783 +
1784 +struct dpaa2_qdma_engine {
1785 + struct dma_device dma_dev;
1786 + u32 n_chans;
1787 + struct dpaa2_qdma_chan chans[NUM_CH];
1788 +
1789 + struct dpaa2_qdma_priv *priv;
1790 +};
1791 +
1792 +/*
1793 + * dpaa2_qdma_priv - driver private data
1794 + */
1795 +struct dpaa2_qdma_priv {
1796 + int dpqdma_id;
1797 +
1798 + struct iommu_domain *iommu_domain;
1799 + struct dpdmai_attr dpdmai_attr;
1800 + struct device *dev;
1801 + struct fsl_mc_io *mc_io;
1802 + struct fsl_mc_device *dpdmai_dev;
1803 +
1804 + struct dpdmai_rx_queue_attr rx_queue_attr[DPDMAI_PRIO_NUM];
1805 + struct dpdmai_tx_queue_attr tx_queue_attr[DPDMAI_PRIO_NUM];
1806 +
1807 + uint8_t num_pairs;
1808 +
1809 + struct dpaa2_qdma_engine *dpaa2_qdma;
1810 + struct dpaa2_qdma_priv_per_prio *ppriv;
1811 +};
1812 +
1813 +struct dpaa2_qdma_priv_per_prio {
1814 + int req_fqid;
1815 + int rsp_fqid;
1816 + int prio;
1817 +
1818 + struct dpaa2_io_store *store;
1819 + struct dpaa2_io_notification_ctx nctx;
1820 +
1821 + struct dpaa2_qdma_priv *priv;
1822 +};
1823 +
1824 +/* FD pool size: one FD + 3 Frame list + 2 source/destination descriptor */
1825 +#define FD_POOL_SIZE (sizeof(struct dpaa2_fd) + \
1826 + sizeof(struct dpaa2_fl_entry) * 3 + \
1827 + sizeof(struct dpaa2_qdma_sd_d) * 2)
1828 +
1829 +/* qdma_sg_blk + 16 SGs */
1830 +#define SG_POOL_SIZE (sizeof(struct qdma_sg_blk) +\
1831 + sizeof(struct dpaa2_qdma_sg) * NUM_SG_PER_BLK)
1832 +#endif /* __DPAA2_QDMA_H */
1833 --- /dev/null
1834 +++ b/drivers/dma/dpaa2-qdma/dpdmai.c
1835 @@ -0,0 +1,515 @@
1836 +/* Copyright 2013-2015 Freescale Semiconductor Inc.
1837 + *
1838 + * Redistribution and use in source and binary forms, with or without
1839 + * modification, are permitted provided that the following conditions are met:
1840 + * * Redistributions of source code must retain the above copyright
1841 + * notice, this list of conditions and the following disclaimer.
1842 + * * Redistributions in binary form must reproduce the above copyright
1843 + * notice, this list of conditions and the following disclaimer in the
1844 + * documentation and/or other materials provided with the distribution.
1845 + * * Neither the name of the above-listed copyright holders nor the
1846 + * names of any contributors may be used to endorse or promote products
1847 + * derived from this software without specific prior written permission.
1848 + *
1849 + *
1850 + * ALTERNATIVELY, this software may be distributed under the terms of the
1851 + * GNU General Public License ("GPL") as published by the Free Software
1852 + * Foundation, either version 2 of that License or (at your option) any
1853 + * later version.
1854 + *
1855 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
1856 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1857 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1858 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
1859 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
1860 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
1861 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
1862 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
1863 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
1864 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
1865 + * POSSIBILITY OF SUCH DAMAGE.
1866 + */
1867 +#include <linux/types.h>
1868 +#include <linux/io.h>
1869 +#include "fsl_dpdmai.h"
1870 +#include "fsl_dpdmai_cmd.h"
1871 +#include <linux/fsl/mc.h>
1872 +
1873 +struct dpdmai_cmd_open {
1874 + __le32 dpdmai_id;
1875 +};
1876 +
1877 +struct dpdmai_rsp_get_attributes {
1878 + __le32 id;
1879 + u8 num_of_priorities;
1880 + u8 pad0[3];
1881 + __le16 major;
1882 + __le16 minor;
1883 +};
1884 +
1885 +
1886 +struct dpdmai_cmd_queue {
1887 + __le32 dest_id;
1888 + u8 priority;
1889 + u8 queue;
1890 + u8 dest_type;
1891 + u8 pad;
1892 + __le64 user_ctx;
1893 + union {
1894 + __le32 options;
1895 + __le32 fqid;
1896 + };
1897 +};
1898 +
1899 +struct dpdmai_rsp_get_tx_queue {
1900 + __le64 pad;
1901 + __le32 fqid;
1902 +};
1903 +
1904 +
1905 +int dpdmai_open(struct fsl_mc_io *mc_io,
1906 + uint32_t cmd_flags,
1907 + int dpdmai_id,
1908 + uint16_t *token)
1909 +{
1910 + struct fsl_mc_command cmd = { 0 };
1911 + struct dpdmai_cmd_open *cmd_params;
1912 + int err;
1913 +
1914 + /* prepare command */
1915 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_OPEN,
1916 + cmd_flags,
1917 + 0);
1918 +
1919 + cmd_params = (struct dpdmai_cmd_open *)cmd.params;
1920 + cmd_params->dpdmai_id = cpu_to_le32(dpdmai_id);
1921 +
1922 + /* send command to mc*/
1923 + err = mc_send_command(mc_io, &cmd);
1924 + if (err)
1925 + return err;
1926 +
1927 + /* retrieve response parameters */
1928 + *token = mc_cmd_hdr_read_token(&cmd);
1929 + return 0;
1930 +}
1931 +
1932 +int dpdmai_close(struct fsl_mc_io *mc_io,
1933 + uint32_t cmd_flags,
1934 + uint16_t token)
1935 +{
1936 + struct fsl_mc_command cmd = { 0 };
1937 +
1938 + /* prepare command */
1939 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLOSE,
1940 + cmd_flags, token);
1941 +
1942 + /* send command to mc*/
1943 + return mc_send_command(mc_io, &cmd);
1944 +}
1945 +
1946 +int dpdmai_create(struct fsl_mc_io *mc_io,
1947 + uint32_t cmd_flags,
1948 + const struct dpdmai_cfg *cfg,
1949 + uint16_t *token)
1950 +{
1951 + struct fsl_mc_command cmd = { 0 };
1952 + int err;
1953 +
1954 + /* prepare command */
1955 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CREATE,
1956 + cmd_flags,
1957 + 0);
1958 + DPDMAI_CMD_CREATE(cmd, cfg);
1959 +
1960 + /* send command to mc*/
1961 + err = mc_send_command(mc_io, &cmd);
1962 + if (err)
1963 + return err;
1964 +
1965 + /* retrieve response parameters */
1966 + *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
1967 +
1968 + return 0;
1969 +}
1970 +
1971 +int dpdmai_destroy(struct fsl_mc_io *mc_io,
1972 + uint32_t cmd_flags,
1973 + uint16_t token)
1974 +{
1975 + struct fsl_mc_command cmd = { 0 };
1976 +
1977 + /* prepare command */
1978 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DESTROY,
1979 + cmd_flags,
1980 + token);
1981 +
1982 + /* send command to mc*/
1983 + return mc_send_command(mc_io, &cmd);
1984 +}
1985 +
1986 +int dpdmai_enable(struct fsl_mc_io *mc_io,
1987 + uint32_t cmd_flags,
1988 + uint16_t token)
1989 +{
1990 + struct fsl_mc_command cmd = { 0 };
1991 +
1992 + /* prepare command */
1993 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_ENABLE,
1994 + cmd_flags,
1995 + token);
1996 +
1997 + /* send command to mc*/
1998 + return mc_send_command(mc_io, &cmd);
1999 +}
2000 +
2001 +int dpdmai_disable(struct fsl_mc_io *mc_io,
2002 + uint32_t cmd_flags,
2003 + uint16_t token)
2004 +{
2005 + struct fsl_mc_command cmd = { 0 };
2006 +
2007 + /* prepare command */
2008 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DISABLE,
2009 + cmd_flags,
2010 + token);
2011 +
2012 + /* send command to mc*/
2013 + return mc_send_command(mc_io, &cmd);
2014 +}
2015 +
2016 +int dpdmai_is_enabled(struct fsl_mc_io *mc_io,
2017 + uint32_t cmd_flags,
2018 + uint16_t token,
2019 + int *en)
2020 +{
2021 + struct fsl_mc_command cmd = { 0 };
2022 + int err;
2023 + /* prepare command */
2024 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_IS_ENABLED,
2025 + cmd_flags,
2026 + token);
2027 +
2028 + /* send command to mc*/
2029 + err = mc_send_command(mc_io, &cmd);
2030 + if (err)
2031 + return err;
2032 +
2033 + /* retrieve response parameters */
2034 + DPDMAI_RSP_IS_ENABLED(cmd, *en);
2035 +
2036 + return 0;
2037 +}
2038 +
2039 +int dpdmai_reset(struct fsl_mc_io *mc_io,
2040 + uint32_t cmd_flags,
2041 + uint16_t token)
2042 +{
2043 + struct fsl_mc_command cmd = { 0 };
2044 +
2045 + /* prepare command */
2046 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_RESET,
2047 + cmd_flags,
2048 + token);
2049 +
2050 + /* send command to mc*/
2051 + return mc_send_command(mc_io, &cmd);
2052 +}
2053 +
2054 +int dpdmai_get_irq(struct fsl_mc_io *mc_io,
2055 + uint32_t cmd_flags,
2056 + uint16_t token,
2057 + uint8_t irq_index,
2058 + int *type,
2059 + struct dpdmai_irq_cfg *irq_cfg)
2060 +{
2061 + struct fsl_mc_command cmd = { 0 };
2062 + int err;
2063 +
2064 + /* prepare command */
2065 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ,
2066 + cmd_flags,
2067 + token);
2068 + DPDMAI_CMD_GET_IRQ(cmd, irq_index);
2069 +
2070 + /* send command to mc*/
2071 + err = mc_send_command(mc_io, &cmd);
2072 + if (err)
2073 + return err;
2074 +
2075 + /* retrieve response parameters */
2076 + DPDMAI_RSP_GET_IRQ(cmd, *type, irq_cfg);
2077 +
2078 + return 0;
2079 +}
2080 +
2081 +int dpdmai_set_irq(struct fsl_mc_io *mc_io,
2082 + uint32_t cmd_flags,
2083 + uint16_t token,
2084 + uint8_t irq_index,
2085 + struct dpdmai_irq_cfg *irq_cfg)
2086 +{
2087 + struct fsl_mc_command cmd = { 0 };
2088 +
2089 + /* prepare command */
2090 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ,
2091 + cmd_flags,
2092 + token);
2093 + DPDMAI_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
2094 +
2095 + /* send command to mc*/
2096 + return mc_send_command(mc_io, &cmd);
2097 +}
2098 +
2099 +int dpdmai_get_irq_enable(struct fsl_mc_io *mc_io,
2100 + uint32_t cmd_flags,
2101 + uint16_t token,
2102 + uint8_t irq_index,
2103 + uint8_t *en)
2104 +{
2105 + struct fsl_mc_command cmd = { 0 };
2106 + int err;
2107 +
2108 + /* prepare command */
2109 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_ENABLE,
2110 + cmd_flags,
2111 + token);
2112 + DPDMAI_CMD_GET_IRQ_ENABLE(cmd, irq_index);
2113 +
2114 + /* send command to mc*/
2115 + err = mc_send_command(mc_io, &cmd);
2116 + if (err)
2117 + return err;
2118 +
2119 + /* retrieve response parameters */
2120 + DPDMAI_RSP_GET_IRQ_ENABLE(cmd, *en);
2121 +
2122 + return 0;
2123 +}
2124 +
2125 +int dpdmai_set_irq_enable(struct fsl_mc_io *mc_io,
2126 + uint32_t cmd_flags,
2127 + uint16_t token,
2128 + uint8_t irq_index,
2129 + uint8_t en)
2130 +{
2131 + struct fsl_mc_command cmd = { 0 };
2132 +
2133 + /* prepare command */
2134 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ_ENABLE,
2135 + cmd_flags,
2136 + token);
2137 + DPDMAI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
2138 +
2139 + /* send command to mc*/
2140 + return mc_send_command(mc_io, &cmd);
2141 +}
2142 +
2143 +int dpdmai_get_irq_mask(struct fsl_mc_io *mc_io,
2144 + uint32_t cmd_flags,
2145 + uint16_t token,
2146 + uint8_t irq_index,
2147 + uint32_t *mask)
2148 +{
2149 + struct fsl_mc_command cmd = { 0 };
2150 + int err;
2151 +
2152 + /* prepare command */
2153 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_MASK,
2154 + cmd_flags,
2155 + token);
2156 + DPDMAI_CMD_GET_IRQ_MASK(cmd, irq_index);
2157 +
2158 + /* send command to mc*/
2159 + err = mc_send_command(mc_io, &cmd);
2160 + if (err)
2161 + return err;
2162 +
2163 + /* retrieve response parameters */
2164 + DPDMAI_RSP_GET_IRQ_MASK(cmd, *mask);
2165 +
2166 + return 0;
2167 +}
2168 +
2169 +int dpdmai_set_irq_mask(struct fsl_mc_io *mc_io,
2170 + uint32_t cmd_flags,
2171 + uint16_t token,
2172 + uint8_t irq_index,
2173 + uint32_t mask)
2174 +{
2175 + struct fsl_mc_command cmd = { 0 };
2176 +
2177 + /* prepare command */
2178 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ_MASK,
2179 + cmd_flags,
2180 + token);
2181 + DPDMAI_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
2182 +
2183 + /* send command to mc*/
2184 + return mc_send_command(mc_io, &cmd);
2185 +}
2186 +
2187 +int dpdmai_get_irq_status(struct fsl_mc_io *mc_io,
2188 + uint32_t cmd_flags,
2189 + uint16_t token,
2190 + uint8_t irq_index,
2191 + uint32_t *status)
2192 +{
2193 + struct fsl_mc_command cmd = { 0 };
2194 + int err;
2195 +
2196 + /* prepare command */
2197 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_STATUS,
2198 + cmd_flags,
2199 + token);
2200 + DPDMAI_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
2201 +
2202 + /* send command to mc*/
2203 + err = mc_send_command(mc_io, &cmd);
2204 + if (err)
2205 + return err;
2206 +
2207 + /* retrieve response parameters */
2208 + DPDMAI_RSP_GET_IRQ_STATUS(cmd, *status);
2209 +
2210 + return 0;
2211 +}
2212 +
2213 +int dpdmai_clear_irq_status(struct fsl_mc_io *mc_io,
2214 + uint32_t cmd_flags,
2215 + uint16_t token,
2216 + uint8_t irq_index,
2217 + uint32_t status)
2218 +{
2219 + struct fsl_mc_command cmd = { 0 };
2220 +
2221 + /* prepare command */
2222 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLEAR_IRQ_STATUS,
2223 + cmd_flags,
2224 + token);
2225 + DPDMAI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
2226 +
2227 + /* send command to mc*/
2228 + return mc_send_command(mc_io, &cmd);
2229 +}
2230 +
2231 +int dpdmai_get_attributes(struct fsl_mc_io *mc_io,
2232 + uint32_t cmd_flags,
2233 + uint16_t token,
2234 + struct dpdmai_attr *attr)
2235 +{
2236 + struct fsl_mc_command cmd = { 0 };
2237 + int err;
2238 + struct dpdmai_rsp_get_attributes *rsp_params;
2239 +
2240 + /* prepare command */
2241 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_ATTR,
2242 + cmd_flags,
2243 + token);
2244 +
2245 + /* send command to mc*/
2246 + err = mc_send_command(mc_io, &cmd);
2247 + if (err)
2248 + return err;
2249 +
2250 + /* retrieve response parameters */
2251 + rsp_params = (struct dpdmai_rsp_get_attributes *)cmd.params;
2252 + attr->id = le32_to_cpu(rsp_params->id);
2253 + attr->version.major = le16_to_cpu(rsp_params->major);
2254 + attr->version.minor = le16_to_cpu(rsp_params->minor);
2255 + attr->num_of_priorities = rsp_params->num_of_priorities;
2256 +
2257 +
2258 + return 0;
2259 +}
2260 +
2261 +int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io,
2262 + uint32_t cmd_flags,
2263 + uint16_t token,
2264 + uint8_t priority,
2265 + const struct dpdmai_rx_queue_cfg *cfg)
2266 +{
2267 + struct fsl_mc_command cmd = { 0 };
2268 + struct dpdmai_cmd_queue *cmd_params;
2269 +
2270 + /* prepare command */
2271 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_RX_QUEUE,
2272 + cmd_flags,
2273 + token);
2274 +
2275 + cmd_params = (struct dpdmai_cmd_queue *)cmd.params;
2276 + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
2277 + cmd_params->priority = cfg->dest_cfg.priority;
2278 + cmd_params->queue = priority;
2279 + cmd_params->dest_type = cfg->dest_cfg.dest_type;
2280 + cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
2281 + cmd_params->options = cpu_to_le32(cfg->options);
2282 +
2283 +
2284 + /* send command to mc*/
2285 + return mc_send_command(mc_io, &cmd);
2286 +}
2287 +
2288 +int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io,
2289 + uint32_t cmd_flags,
2290 + uint16_t token,
2291 + uint8_t priority, struct dpdmai_rx_queue_attr *attr)
2292 +{
2293 + struct fsl_mc_command cmd = { 0 };
2294 + struct dpdmai_cmd_queue *cmd_params;
2295 + int err;
2296 +
2297 + /* prepare command */
2298 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_RX_QUEUE,
2299 + cmd_flags,
2300 + token);
2301 +
2302 + cmd_params = (struct dpdmai_cmd_queue *)cmd.params;
2303 + cmd_params->queue = priority;
2304 +
2305 + /* send command to mc*/
2306 + err = mc_send_command(mc_io, &cmd);
2307 + if (err)
2308 + return err;
2309 +
2310 + /* retrieve response parameters */
2311 + attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id);
2312 + attr->dest_cfg.priority = cmd_params->priority;
2313 + attr->dest_cfg.dest_type = cmd_params->dest_type;
2314 + attr->user_ctx = le64_to_cpu(cmd_params->user_ctx);
2315 + attr->fqid = le32_to_cpu(cmd_params->fqid);
2316 +
2317 + return 0;
2318 +}
2319 +
2320 +int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io,
2321 + uint32_t cmd_flags,
2322 + uint16_t token,
2323 + uint8_t priority,
2324 + struct dpdmai_tx_queue_attr *attr)
2325 +{
2326 + struct fsl_mc_command cmd = { 0 };
2327 + struct dpdmai_cmd_queue *cmd_params;
2328 + struct dpdmai_rsp_get_tx_queue *rsp_params;
2329 + int err;
2330 +
2331 + /* prepare command */
2332 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_TX_QUEUE,
2333 + cmd_flags,
2334 + token);
2335 +
2336 + cmd_params = (struct dpdmai_cmd_queue *)cmd.params;
2337 + cmd_params->queue = priority;
2338 +
2339 + /* send command to mc*/
2340 + err = mc_send_command(mc_io, &cmd);
2341 + if (err)
2342 + return err;
2343 +
2344 + /* retrieve response parameters */
2345 +
2346 + rsp_params = (struct dpdmai_rsp_get_tx_queue *)cmd.params;
2347 + attr->fqid = le32_to_cpu(rsp_params->fqid);
2348 +
2349 + return 0;
2350 +}
2351 --- /dev/null
2352 +++ b/drivers/dma/dpaa2-qdma/fsl_dpdmai.h
2353 @@ -0,0 +1,521 @@
2354 +/* Copyright 2013-2015 Freescale Semiconductor Inc.
2355 + *
2356 + * Redistribution and use in source and binary forms, with or without
2357 + * modification, are permitted provided that the following conditions are met:
2358 + * * Redistributions of source code must retain the above copyright
2359 + * notice, this list of conditions and the following disclaimer.
2360 + * * Redistributions in binary form must reproduce the above copyright
2361 + * notice, this list of conditions and the following disclaimer in the
2362 + * documentation and/or other materials provided with the distribution.
2363 + * * Neither the name of the above-listed copyright holders nor the
2364 + * names of any contributors may be used to endorse or promote products
2365 + * derived from this software without specific prior written permission.
2366 + *
2367 + *
2368 + * ALTERNATIVELY, this software may be distributed under the terms of the
2369 + * GNU General Public License ("GPL") as published by the Free Software
2370 + * Foundation, either version 2 of that License or (at your option) any
2371 + * later version.
2372 + *
2373 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
2374 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2375 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2376 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
2377 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2378 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
2379 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
2380 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
2381 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
2382 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
2383 + * POSSIBILITY OF SUCH DAMAGE.
2384 + */
2385 +#ifndef __FSL_DPDMAI_H
2386 +#define __FSL_DPDMAI_H
2387 +
2388 +struct fsl_mc_io;
2389 +
2390 +/* Data Path DMA Interface API
2391 + * Contains initialization APIs and runtime control APIs for DPDMAI
2392 + */
2393 +
2394 +/* General DPDMAI macros */
2395 +
2396 +/**
2397 + * Maximum number of Tx/Rx priorities per DPDMAI object
2398 + */
2399 +#define DPDMAI_PRIO_NUM 2
2400 +
2401 +/**
2402 + * All queues considered; see dpdmai_set_rx_queue()
2403 + */
2404 +#define DPDMAI_ALL_QUEUES (uint8_t)(-1)
2405 +
2406 +/**
2407 + * dpdmai_open() - Open a control session for the specified object
2408 + * @mc_io: Pointer to MC portal's I/O object
2409 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2410 + * @dpdmai_id: DPDMAI unique ID
2411 + * @token: Returned token; use in subsequent API calls
2412 + *
2413 + * This function can be used to open a control session for an
2414 + * already created object; an object may have been declared in
2415 + * the DPL or by calling the dpdmai_create() function.
2416 + * This function returns a unique authentication token,
2417 + * associated with the specific object ID and the specific MC
2418 + * portal; this token must be used in all subsequent commands for
2419 + * this specific object.
2420 + *
2421 + * Return: '0' on Success; Error code otherwise.
2422 + */
2423 +int dpdmai_open(struct fsl_mc_io *mc_io,
2424 + uint32_t cmd_flags,
2425 + int dpdmai_id,
2426 + uint16_t *token);
2427 +
2428 +/**
2429 + * dpdmai_close() - Close the control session of the object
2430 + * @mc_io: Pointer to MC portal's I/O object
2431 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2432 + * @token: Token of DPDMAI object
2433 + *
2434 + * After this function is called, no further operations are
2435 + * allowed on the object without opening a new control session.
2436 + *
2437 + * Return: '0' on Success; Error code otherwise.
2438 + */
2439 +int dpdmai_close(struct fsl_mc_io *mc_io,
2440 + uint32_t cmd_flags,
2441 + uint16_t token);
2442 +
2443 +/**
2444 + * struct dpdmai_cfg - Structure representing DPDMAI configuration
2445 + * @priorities: Priorities for the DMA hardware processing; valid priorities are
2446 + * configured with values 1-8; the entry following last valid entry
2447 + * should be configured with 0
2448 + */
2449 +struct dpdmai_cfg {
2450 + uint8_t priorities[DPDMAI_PRIO_NUM];
2451 +};
2452 +
2453 +/**
2454 + * dpdmai_create() - Create the DPDMAI object
2455 + * @mc_io: Pointer to MC portal's I/O object
2456 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2457 + * @cfg: Configuration structure
2458 + * @token: Returned token; use in subsequent API calls
2459 + *
2460 + * Create the DPDMAI object, allocate required resources and
2461 + * perform required initialization.
2462 + *
2463 + * The object can be created either by declaring it in the
2464 + * DPL file, or by calling this function.
2465 + *
2466 + * This function returns a unique authentication token,
2467 + * associated with the specific object ID and the specific MC
2468 + * portal; this token must be used in all subsequent calls to
2469 + * this specific object. For objects that are created using the
2470 + * DPL file, call dpdmai_open() function to get an authentication
2471 + * token first.
2472 + *
2473 + * Return: '0' on Success; Error code otherwise.
2474 + */
2475 +int dpdmai_create(struct fsl_mc_io *mc_io,
2476 + uint32_t cmd_flags,
2477 + const struct dpdmai_cfg *cfg,
2478 + uint16_t *token);
2479 +
2480 +/**
2481 + * dpdmai_destroy() - Destroy the DPDMAI object and release all its resources.
2482 + * @mc_io: Pointer to MC portal's I/O object
2483 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2484 + * @token: Token of DPDMAI object
2485 + *
2486 + * Return: '0' on Success; error code otherwise.
2487 + */
2488 +int dpdmai_destroy(struct fsl_mc_io *mc_io,
2489 + uint32_t cmd_flags,
2490 + uint16_t token);
2491 +
2492 +/**
2493 + * dpdmai_enable() - Enable the DPDMAI, allow sending and receiving frames.
2494 + * @mc_io: Pointer to MC portal's I/O object
2495 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2496 + * @token: Token of DPDMAI object
2497 + *
2498 + * Return: '0' on Success; Error code otherwise.
2499 + */
2500 +int dpdmai_enable(struct fsl_mc_io *mc_io,
2501 + uint32_t cmd_flags,
2502 + uint16_t token);
2503 +
2504 +/**
2505 + * dpdmai_disable() - Disable the DPDMAI, stop sending and receiving frames.
2506 + * @mc_io: Pointer to MC portal's I/O object
2507 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2508 + * @token: Token of DPDMAI object
2509 + *
2510 + * Return: '0' on Success; Error code otherwise.
2511 + */
2512 +int dpdmai_disable(struct fsl_mc_io *mc_io,
2513 + uint32_t cmd_flags,
2514 + uint16_t token);
2515 +
2516 +/**
2517 + * dpdmai_is_enabled() - Check if the DPDMAI is enabled.
2518 + * @mc_io: Pointer to MC portal's I/O object
2519 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2520 + * @token: Token of DPDMAI object
2521 + * @en: Returns '1' if object is enabled; '0' otherwise
2522 + *
2523 + * Return: '0' on Success; Error code otherwise.
2524 + */
2525 +int dpdmai_is_enabled(struct fsl_mc_io *mc_io,
2526 + uint32_t cmd_flags,
2527 + uint16_t token,
2528 + int *en);
2529 +
2530 +/**
2531 + * dpdmai_reset() - Reset the DPDMAI, returns the object to initial state.
2532 + * @mc_io: Pointer to MC portal's I/O object
2533 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2534 + * @token: Token of DPDMAI object
2535 + *
2536 + * Return: '0' on Success; Error code otherwise.
2537 + */
2538 +int dpdmai_reset(struct fsl_mc_io *mc_io,
2539 + uint32_t cmd_flags,
2540 + uint16_t token);
2541 +
2542 +/**
2543 + * struct dpdmai_irq_cfg - IRQ configuration
2544 + * @addr: Address that must be written to signal a message-based interrupt
2545 + * @val: Value to write into irq_addr address
2546 + * @irq_num: A user defined number associated with this IRQ
2547 + */
2548 +struct dpdmai_irq_cfg {
2549 + uint64_t addr;
2550 + uint32_t val;
2551 + int irq_num;
2552 +};
2553 +
2554 +/**
2555 + * dpdmai_set_irq() - Set IRQ information for the DPDMAI to trigger an interrupt.
2556 + * @mc_io: Pointer to MC portal's I/O object
2557 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2558 + * @token: Token of DPDMAI object
2559 + * @irq_index: Identifies the interrupt index to configure
2560 + * @irq_cfg: IRQ configuration
2561 + *
2562 + * Return: '0' on Success; Error code otherwise.
2563 + */
2564 +int dpdmai_set_irq(struct fsl_mc_io *mc_io,
2565 + uint32_t cmd_flags,
2566 + uint16_t token,
2567 + uint8_t irq_index,
2568 + struct dpdmai_irq_cfg *irq_cfg);
2569 +
2570 +/**
2571 + * dpdmai_get_irq() - Get IRQ information from the DPDMAI
2572 + *
2573 + * @mc_io: Pointer to MC portal's I/O object
2574 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2575 + * @token: Token of DPDMAI object
2576 + * @irq_index: The interrupt index to configure
2577 + * @type: Interrupt type: 0 represents message interrupt
2578 + * type (both irq_addr and irq_val are valid)
2579 + * @irq_cfg: IRQ attributes
2580 + *
2581 + * Return: '0' on Success; Error code otherwise.
2582 + */
2583 +int dpdmai_get_irq(struct fsl_mc_io *mc_io,
2584 + uint32_t cmd_flags,
2585 + uint16_t token,
2586 + uint8_t irq_index,
2587 + int *type,
2588 + struct dpdmai_irq_cfg *irq_cfg);
2589 +
2590 +/**
2591 + * dpdmai_set_irq_enable() - Set overall interrupt state.
2592 + * @mc_io: Pointer to MC portal's I/O object
2593 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2594 + * @token: Token of DPDMAI object
2595 + * @irq_index: The interrupt index to configure
2596 + * @en: Interrupt state - enable = 1, disable = 0
2597 + *
2598 + * Allows GPP software to control when interrupts are generated.
2599 + * Each interrupt can have up to 32 causes. The enable/disable control's the
2600 + * overall interrupt state. if the interrupt is disabled no causes will cause
2601 + * an interrupt
2602 + *
2603 + * Return: '0' on Success; Error code otherwise.
2604 + */
2605 +int dpdmai_set_irq_enable(struct fsl_mc_io *mc_io,
2606 + uint32_t cmd_flags,
2607 + uint16_t token,
2608 + uint8_t irq_index,
2609 + uint8_t en);
2610 +
2611 +/**
2612 + * dpdmai_get_irq_enable() - Get overall interrupt state
2613 + * @mc_io: Pointer to MC portal's I/O object
2614 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2615 + * @token: Token of DPDMAI object
2616 + * @irq_index: The interrupt index to configure
2617 + * @en: Returned Interrupt state - enable = 1, disable = 0
2618 + *
2619 + * Return: '0' on Success; Error code otherwise.
2620 + */
2621 +int dpdmai_get_irq_enable(struct fsl_mc_io *mc_io,
2622 + uint32_t cmd_flags,
2623 + uint16_t token,
2624 + uint8_t irq_index,
2625 + uint8_t *en);
2626 +
2627 +/**
2628 + * dpdmai_set_irq_mask() - Set interrupt mask.
2629 + * @mc_io: Pointer to MC portal's I/O object
2630 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2631 + * @token: Token of DPDMAI object
2632 + * @irq_index: The interrupt index to configure
2633 + * @mask: event mask to trigger interrupt;
2634 + * each bit:
2635 + * 0 = ignore event
2636 + * 1 = consider event for asserting IRQ
2637 + *
2638 + * Every interrupt can have up to 32 causes and the interrupt model supports
2639 + * masking/unmasking each cause independently
2640 + *
2641 + * Return: '0' on Success; Error code otherwise.
2642 + */
2643 +int dpdmai_set_irq_mask(struct fsl_mc_io *mc_io,
2644 + uint32_t cmd_flags,
2645 + uint16_t token,
2646 + uint8_t irq_index,
2647 + uint32_t mask);
2648 +
2649 +/**
2650 + * dpdmai_get_irq_mask() - Get interrupt mask.
2651 + * @mc_io: Pointer to MC portal's I/O object
2652 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2653 + * @token: Token of DPDMAI object
2654 + * @irq_index: The interrupt index to configure
2655 + * @mask: Returned event mask to trigger interrupt
2656 + *
2657 + * Every interrupt can have up to 32 causes and the interrupt model supports
2658 + * masking/unmasking each cause independently
2659 + *
2660 + * Return: '0' on Success; Error code otherwise.
2661 + */
2662 +int dpdmai_get_irq_mask(struct fsl_mc_io *mc_io,
2663 + uint32_t cmd_flags,
2664 + uint16_t token,
2665 + uint8_t irq_index,
2666 + uint32_t *mask);
2667 +
2668 +/**
2669 + * dpdmai_get_irq_status() - Get the current status of any pending interrupts
2670 + * @mc_io: Pointer to MC portal's I/O object
2671 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2672 + * @token: Token of DPDMAI object
2673 + * @irq_index: The interrupt index to configure
2674 + * @status: Returned interrupts status - one bit per cause:
2675 + * 0 = no interrupt pending
2676 + * 1 = interrupt pending
2677 + *
2678 + * Return: '0' on Success; Error code otherwise.
2679 + */
2680 +int dpdmai_get_irq_status(struct fsl_mc_io *mc_io,
2681 + uint32_t cmd_flags,
2682 + uint16_t token,
2683 + uint8_t irq_index,
2684 + uint32_t *status);
2685 +
2686 +/**
2687 + * dpdmai_clear_irq_status() - Clear a pending interrupt's status
2688 + * @mc_io: Pointer to MC portal's I/O object
2689 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2690 + * @token: Token of DPDMAI object
2691 + * @irq_index: The interrupt index to configure
2692 + * @status: bits to clear (W1C) - one bit per cause:
2693 + * 0 = don't change
2694 + * 1 = clear status bit
2695 + *
2696 + * Return: '0' on Success; Error code otherwise.
2697 + */
2698 +int dpdmai_clear_irq_status(struct fsl_mc_io *mc_io,
2699 + uint32_t cmd_flags,
2700 + uint16_t token,
2701 + uint8_t irq_index,
2702 + uint32_t status);
2703 +
2704 +/**
2705 + * struct dpdmai_attr - Structure representing DPDMAI attributes
2706 + * @id: DPDMAI object ID
2707 + * @version: DPDMAI version
2708 + * @num_of_priorities: number of priorities
2709 + */
2710 +struct dpdmai_attr {
2711 + int id;
2712 + /**
2713 + * struct version - DPDMAI version
2714 + * @major: DPDMAI major version
2715 + * @minor: DPDMAI minor version
2716 + */
2717 + struct {
2718 + uint16_t major;
2719 + uint16_t minor;
2720 + } version;
2721 + uint8_t num_of_priorities;
2722 +};
2723 +
2724 +/**
2725 + * dpdmai_get_attributes() - Retrieve DPDMAI attributes.
2726 + * @mc_io: Pointer to MC portal's I/O object
2727 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2728 + * @token: Token of DPDMAI object
2729 + * @attr: Returned object's attributes
2730 + *
2731 + * Return: '0' on Success; Error code otherwise.
2732 + */
2733 +int dpdmai_get_attributes(struct fsl_mc_io *mc_io,
2734 + uint32_t cmd_flags,
2735 + uint16_t token,
2736 + struct dpdmai_attr *attr);
2737 +
2738 +/**
2739 + * enum dpdmai_dest - DPDMAI destination types
2740 + * @DPDMAI_DEST_NONE: Unassigned destination; The queue is set in parked mode
2741 + * and does not generate FQDAN notifications; user is expected to dequeue
2742 + * from the queue based on polling or other user-defined method
2743 + * @DPDMAI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
2744 + * notifications to the specified DPIO; user is expected to dequeue
2745 + * from the queue only after notification is received
2746 + * @DPDMAI_DEST_DPCON: The queue is set in schedule mode and does not generate
2747 + * FQDAN notifications, but is connected to the specified DPCON object;
2748 + * user is expected to dequeue from the DPCON channel
2749 + */
2750 +enum dpdmai_dest {
2751 + DPDMAI_DEST_NONE = 0,
2752 + DPDMAI_DEST_DPIO = 1,
2753 + DPDMAI_DEST_DPCON = 2
2754 +};
2755 +
2756 +/**
2757 + * struct dpdmai_dest_cfg - Structure representing DPDMAI destination parameters
2758 + * @dest_type: Destination type
2759 + * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
2760 + * @priority: Priority selection within the DPIO or DPCON channel; valid values
2761 + * are 0-1 or 0-7, depending on the number of priorities in that
2762 + * channel; not relevant for 'DPDMAI_DEST_NONE' option
2763 + */
2764 +struct dpdmai_dest_cfg {
2765 + enum dpdmai_dest dest_type;
2766 + int dest_id;
2767 + uint8_t priority;
2768 +};
2769 +
2770 +/* DPDMAI queue modification options */
2771 +
2772 +/**
2773 + * Select to modify the user's context associated with the queue
2774 + */
2775 +#define DPDMAI_QUEUE_OPT_USER_CTX 0x00000001
2776 +
2777 +/**
2778 + * Select to modify the queue's destination
2779 + */
2780 +#define DPDMAI_QUEUE_OPT_DEST 0x00000002
2781 +
2782 +/**
2783 + * struct dpdmai_rx_queue_cfg - DPDMAI RX queue configuration
2784 + * @options: Flags representing the suggested modifications to the queue;
2785 + * Use any combination of 'DPDMAI_QUEUE_OPT_<X>' flags
2786 + * @user_ctx: User context value provided in the frame descriptor of each
2787 + * dequeued frame;
2788 + * valid only if 'DPDMAI_QUEUE_OPT_USER_CTX' is contained in 'options'
2789 + * @dest_cfg: Queue destination parameters;
2790 + * valid only if 'DPDMAI_QUEUE_OPT_DEST' is contained in 'options'
2791 + */
2792 +struct dpdmai_rx_queue_cfg {
2793 + uint32_t options;
2794 + uint64_t user_ctx;
2795 + struct dpdmai_dest_cfg dest_cfg;
2796 +
2797 +};
2798 +
2799 +/**
2800 + * dpdmai_set_rx_queue() - Set Rx queue configuration
2801 + * @mc_io: Pointer to MC portal's I/O object
2802 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2803 + * @token: Token of DPDMAI object
2804 + * @priority: Select the queue relative to number of
2805 + * priorities configured at DPDMAI creation; use
2806 + * DPDMAI_ALL_QUEUES to configure all Rx queues
2807 + * identically.
2808 + * @cfg: Rx queue configuration
2809 + *
2810 + * Return: '0' on Success; Error code otherwise.
2811 + */
2812 +int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io,
2813 + uint32_t cmd_flags,
2814 + uint16_t token,
2815 + uint8_t priority,
2816 + const struct dpdmai_rx_queue_cfg *cfg);
2817 +
2818 +/**
2819 + * struct dpdmai_rx_queue_attr - Structure representing attributes of Rx queues
2820 + * @user_ctx: User context value provided in the frame descriptor of each
2821 + * dequeued frame
2822 + * @dest_cfg: Queue destination configuration
2823 + * @fqid: Virtual FQID value to be used for dequeue operations
2824 + */
2825 +struct dpdmai_rx_queue_attr {
2826 + uint64_t user_ctx;
2827 + struct dpdmai_dest_cfg dest_cfg;
2828 + uint32_t fqid;
2829 +};
2830 +
2831 +/**
2832 + * dpdmai_get_rx_queue() - Retrieve Rx queue attributes.
2833 + * @mc_io: Pointer to MC portal's I/O object
2834 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2835 + * @token: Token of DPDMAI object
2836 + * @priority: Select the queue relative to number of
2837 + * priorities configured at DPDMAI creation
2838 + * @attr: Returned Rx queue attributes
2839 + *
2840 + * Return: '0' on Success; Error code otherwise.
2841 + */
2842 +int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io,
2843 + uint32_t cmd_flags,
2844 + uint16_t token,
2845 + uint8_t priority,
2846 + struct dpdmai_rx_queue_attr *attr);
2847 +
2848 +/**
2849 + * struct dpdmai_tx_queue_attr - Structure representing attributes of Tx queues
2850 + * @fqid: Virtual FQID to be used for sending frames to DMA hardware
2851 + */
2852 +
2853 +struct dpdmai_tx_queue_attr {
2854 + uint32_t fqid;
2855 +};
2856 +
2857 +/**
2858 + * dpdmai_get_tx_queue() - Retrieve Tx queue attributes.
2859 + * @mc_io: Pointer to MC portal's I/O object
2860 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2861 + * @token: Token of DPDMAI object
2862 + * @priority: Select the queue relative to number of
2863 + * priorities configured at DPDMAI creation
2864 + * @attr: Returned Tx queue attributes
2865 + *
2866 + * Return: '0' on Success; Error code otherwise.
2867 + */
2868 +int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io,
2869 + uint32_t cmd_flags,
2870 + uint16_t token,
2871 + uint8_t priority,
2872 + struct dpdmai_tx_queue_attr *attr);
2873 +
2874 +#endif /* __FSL_DPDMAI_H */
2875 --- /dev/null
2876 +++ b/drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h
2877 @@ -0,0 +1,222 @@
2878 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
2879 + *
2880 + * Redistribution and use in source and binary forms, with or without
2881 + * modification, are permitted provided that the following conditions are met:
2882 + * * Redistributions of source code must retain the above copyright
2883 + * notice, this list of conditions and the following disclaimer.
2884 + * * Redistributions in binary form must reproduce the above copyright
2885 + * notice, this list of conditions and the following disclaimer in the
2886 + * documentation and/or other materials provided with the distribution.
2887 + * * Neither the name of the above-listed copyright holders nor the
2888 + * names of any contributors may be used to endorse or promote products
2889 + * derived from this software without specific prior written permission.
2890 + *
2891 + *
2892 + * ALTERNATIVELY, this software may be distributed under the terms of the
2893 + * GNU General Public License ("GPL") as published by the Free Software
2894 + * Foundation, either version 2 of that License or (at your option) any
2895 + * later version.
2896 + *
2897 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
2898 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2899 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2900 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
2901 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2902 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
2903 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
2904 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
2905 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
2906 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
2907 + * POSSIBILITY OF SUCH DAMAGE.
2908 + */
2909 +#ifndef _FSL_DPDMAI_CMD_H
2910 +#define _FSL_DPDMAI_CMD_H
2911 +
2912 +/* DPDMAI Version */
2913 +#define DPDMAI_VER_MAJOR 2
2914 +#define DPDMAI_VER_MINOR 2
2915 +
2916 +#define DPDMAI_CMD_BASE_VERSION 0
2917 +#define DPDMAI_CMD_ID_OFFSET 4
2918 +
2919 +/* Command IDs */
2920 +#define DPDMAI_CMDID_CLOSE ((0x800 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2921 +#define DPDMAI_CMDID_OPEN ((0x80E << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2922 +#define DPDMAI_CMDID_CREATE ((0x90E << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2923 +#define DPDMAI_CMDID_DESTROY ((0x900 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2924 +
2925 +#define DPDMAI_CMDID_ENABLE ((0x002 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2926 +#define DPDMAI_CMDID_DISABLE ((0x003 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2927 +#define DPDMAI_CMDID_GET_ATTR ((0x004 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2928 +#define DPDMAI_CMDID_RESET ((0x005 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2929 +#define DPDMAI_CMDID_IS_ENABLED ((0x006 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2930 +
2931 +#define DPDMAI_CMDID_SET_IRQ ((0x010 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2932 +#define DPDMAI_CMDID_GET_IRQ ((0x011 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2933 +#define DPDMAI_CMDID_SET_IRQ_ENABLE ((0x012 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2934 +#define DPDMAI_CMDID_GET_IRQ_ENABLE ((0x013 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2935 +#define DPDMAI_CMDID_SET_IRQ_MASK ((0x014 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2936 +#define DPDMAI_CMDID_GET_IRQ_MASK ((0x015 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2937 +#define DPDMAI_CMDID_GET_IRQ_STATUS ((0x016 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2938 +#define DPDMAI_CMDID_CLEAR_IRQ_STATUS ((0x017 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2939 +
2940 +#define DPDMAI_CMDID_SET_RX_QUEUE ((0x1A0 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2941 +#define DPDMAI_CMDID_GET_RX_QUEUE ((0x1A1 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2942 +#define DPDMAI_CMDID_GET_TX_QUEUE ((0x1A2 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2943 +
2944 +
2945 +#define MC_CMD_HDR_TOKEN_O 32 /* Token field offset */
2946 +#define MC_CMD_HDR_TOKEN_S 16 /* Token field size */
2947 +
2948 +
2949 +#define MAKE_UMASK64(_width) \
2950 + ((uint64_t)((_width) < 64 ? ((uint64_t)1 << (_width)) - 1 : \
2951 + (uint64_t)-1))
2952 +
2953 +static inline uint64_t mc_enc(int lsoffset, int width, uint64_t val)
2954 +{
2955 + return (uint64_t)(((uint64_t)val & MAKE_UMASK64(width)) << lsoffset);
2956 +}
2957 +
2958 +static inline uint64_t mc_dec(uint64_t val, int lsoffset, int width)
2959 +{
2960 + return (uint64_t)((val >> lsoffset) & MAKE_UMASK64(width));
2961 +}
2962 +
2963 +#define MC_CMD_OP(_cmd, _param, _offset, _width, _type, _arg) \
2964 + ((_cmd).params[_param] |= mc_enc((_offset), (_width), _arg))
2965 +
2966 +#define MC_RSP_OP(_cmd, _param, _offset, _width, _type, _arg) \
2967 + (_arg = (_type)mc_dec(_cmd.params[_param], (_offset), (_width)))
2968 +
2969 +#define MC_CMD_HDR_READ_TOKEN(_hdr) \
2970 + ((uint16_t)mc_dec((_hdr), MC_CMD_HDR_TOKEN_O, MC_CMD_HDR_TOKEN_S))
2971 +
2972 +/* cmd, param, offset, width, type, arg_name */
2973 +#define DPDMAI_CMD_OPEN(cmd, dpdmai_id) \
2974 + MC_CMD_OP(cmd, 0, 0, 32, int, dpdmai_id)
2975 +
2976 +/* cmd, param, offset, width, type, arg_name */
2977 +#define DPDMAI_CMD_CREATE(cmd, cfg) \
2978 +do { \
2979 + MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->priorities[0]);\
2980 + MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->priorities[1]);\
2981 +} while (0)
2982 +
2983 +/* cmd, param, offset, width, type, arg_name */
2984 +#define DPDMAI_RSP_IS_ENABLED(cmd, en) \
2985 + MC_RSP_OP(cmd, 0, 0, 1, int, en)
2986 +
2987 +/* cmd, param, offset, width, type, arg_name */
2988 +#define DPDMAI_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
2989 +do { \
2990 + MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
2991 + MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
2992 + MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
2993 + MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
2994 +} while (0)
2995 +
2996 +/* cmd, param, offset, width, type, arg_name */
2997 +#define DPDMAI_CMD_GET_IRQ(cmd, irq_index) \
2998 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
2999 +
3000 +/* cmd, param, offset, width, type, arg_name */
3001 +#define DPDMAI_RSP_GET_IRQ(cmd, type, irq_cfg) \
3002 +do { \
3003 + MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
3004 + MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
3005 + MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
3006 + MC_RSP_OP(cmd, 2, 32, 32, int, type); \
3007 +} while (0)
3008 +
3009 +/* cmd, param, offset, width, type, arg_name */
3010 +#define DPDMAI_CMD_SET_IRQ_ENABLE(cmd, irq_index, enable_state) \
3011 +do { \
3012 + MC_CMD_OP(cmd, 0, 0, 8, uint8_t, enable_state); \
3013 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
3014 +} while (0)
3015 +
3016 +/* cmd, param, offset, width, type, arg_name */
3017 +#define DPDMAI_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
3018 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
3019 +
3020 +/* cmd, param, offset, width, type, arg_name */
3021 +#define DPDMAI_RSP_GET_IRQ_ENABLE(cmd, enable_state) \
3022 + MC_RSP_OP(cmd, 0, 0, 8, uint8_t, enable_state)
3023 +
3024 +/* cmd, param, offset, width, type, arg_name */
3025 +#define DPDMAI_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
3026 +do { \
3027 + MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \
3028 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
3029 +} while (0)
3030 +
3031 +/* cmd, param, offset, width, type, arg_name */
3032 +#define DPDMAI_CMD_GET_IRQ_MASK(cmd, irq_index) \
3033 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
3034 +
3035 +/* cmd, param, offset, width, type, arg_name */
3036 +#define DPDMAI_RSP_GET_IRQ_MASK(cmd, mask) \
3037 + MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
3038 +
3039 +/* cmd, param, offset, width, type, arg_name */
3040 +#define DPDMAI_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
3041 +do { \
3042 + MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
3043 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
3044 +} while (0)
3045 +
3046 +/* cmd, param, offset, width, type, arg_name */
3047 +#define DPDMAI_RSP_GET_IRQ_STATUS(cmd, status) \
3048 + MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
3049 +
3050 +/* cmd, param, offset, width, type, arg_name */
3051 +#define DPDMAI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
3052 +do { \
3053 + MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
3054 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
3055 +} while (0)
3056 +
3057 +/* cmd, param, offset, width, type, arg_name */
3058 +#define DPDMAI_RSP_GET_ATTR(cmd, attr) \
3059 +do { \
3060 + MC_RSP_OP(cmd, 0, 0, 32, int, attr->id); \
3061 + MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->num_of_priorities); \
3062 + MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\
3063 + MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\
3064 +} while (0)
3065 +
3066 +/* cmd, param, offset, width, type, arg_name */
3067 +#define DPDMAI_CMD_SET_RX_QUEUE(cmd, priority, cfg) \
3068 +do { \
3069 + MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \
3070 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority); \
3071 + MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority); \
3072 + MC_CMD_OP(cmd, 0, 48, 4, enum dpdmai_dest, cfg->dest_cfg.dest_type); \
3073 + MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \
3074 + MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\
3075 +} while (0)
3076 +
3077 +/* cmd, param, offset, width, type, arg_name */
3078 +#define DPDMAI_CMD_GET_RX_QUEUE(cmd, priority) \
3079 + MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority)
3080 +
3081 +/* cmd, param, offset, width, type, arg_name */
3082 +#define DPDMAI_RSP_GET_RX_QUEUE(cmd, attr) \
3083 +do { \
3084 + MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id);\
3085 + MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\
3086 + MC_RSP_OP(cmd, 0, 48, 4, enum dpdmai_dest, attr->dest_cfg.dest_type);\
3087 + MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->user_ctx);\
3088 + MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->fqid);\
3089 +} while (0)
3090 +
3091 +/* cmd, param, offset, width, type, arg_name */
3092 +#define DPDMAI_CMD_GET_TX_QUEUE(cmd, priority) \
3093 + MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority)
3094 +
3095 +/* cmd, param, offset, width, type, arg_name */
3096 +#define DPDMAI_RSP_GET_TX_QUEUE(cmd, attr) \
3097 + MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->fqid)
3098 +
3099 +#endif /* _FSL_DPDMAI_CMD_H */
3100 --- /dev/null
3101 +++ b/drivers/dma/fsl-qdma.c
3102 @@ -0,0 +1,1278 @@
3103 +/*
3104 + * Driver for NXP Layerscape Queue direct memory access controller (qDMA)
3105 + *
3106 + * Copyright 2017 NXP
3107 + *
3108 + * Author:
3109 + * Jiaheng Fan <jiaheng.fan@nxp.com>
3110 + * Wen He <wen.he_1@nxp.com>
3111 + *
3112 + * SPDX-License-Identifier: GPL-2.0+
3113 + */
3114 +
3115 +#include <linux/interrupt.h>
3116 +#include <linux/module.h>
3117 +#include <linux/delay.h>
3118 +#include <linux/of_irq.h>
3119 +#include <linux/of_address.h>
3120 +#include <linux/of_platform.h>
3121 +#include <linux/of_dma.h>
3122 +#include <linux/dma-mapping.h>
3123 +#include <linux/dmapool.h>
3124 +#include <linux/dmaengine.h>
3125 +#include <linux/slab.h>
3126 +#include <linux/spinlock.h>
3127 +
3128 +#include "virt-dma.h"
3129 +
3130 +#define FSL_QDMA_DMR 0x0
3131 +#define FSL_QDMA_DSR 0x4
3132 +#define FSL_QDMA_DEIER 0xe00
3133 +#define FSL_QDMA_DEDR 0xe04
3134 +#define FSL_QDMA_DECFDW0R 0xe10
3135 +#define FSL_QDMA_DECFDW1R 0xe14
3136 +#define FSL_QDMA_DECFDW2R 0xe18
3137 +#define FSL_QDMA_DECFDW3R 0xe1c
3138 +#define FSL_QDMA_DECFQIDR 0xe30
3139 +#define FSL_QDMA_DECBR 0xe34
3140 +
3141 +#define FSL_QDMA_BCQMR(x) (0xc0 + 0x100 * (x))
3142 +#define FSL_QDMA_BCQSR(x) (0xc4 + 0x100 * (x))
3143 +#define FSL_QDMA_BCQEDPA_SADDR(x) (0xc8 + 0x100 * (x))
3144 +#define FSL_QDMA_BCQDPA_SADDR(x) (0xcc + 0x100 * (x))
3145 +#define FSL_QDMA_BCQEEPA_SADDR(x) (0xd0 + 0x100 * (x))
3146 +#define FSL_QDMA_BCQEPA_SADDR(x) (0xd4 + 0x100 * (x))
3147 +#define FSL_QDMA_BCQIER(x) (0xe0 + 0x100 * (x))
3148 +#define FSL_QDMA_BCQIDR(x) (0xe4 + 0x100 * (x))
3149 +
3150 +#define FSL_QDMA_SQDPAR 0x80c
3151 +#define FSL_QDMA_SQEPAR 0x814
3152 +#define FSL_QDMA_BSQMR 0x800
3153 +#define FSL_QDMA_BSQSR 0x804
3154 +#define FSL_QDMA_BSQICR 0x828
3155 +#define FSL_QDMA_CQMR 0xa00
3156 +#define FSL_QDMA_CQDSCR1 0xa08
3157 +#define FSL_QDMA_CQDSCR2 0xa0c
3158 +#define FSL_QDMA_CQIER 0xa10
3159 +#define FSL_QDMA_CQEDR 0xa14
3160 +#define FSL_QDMA_SQCCMR 0xa20
3161 +
3162 +#define FSL_QDMA_SQICR_ICEN
3163 +
3164 +#define FSL_QDMA_CQIDR_CQT 0xff000000
3165 +#define FSL_QDMA_CQIDR_SQPE 0x800000
3166 +#define FSL_QDMA_CQIDR_SQT 0x8000
3167 +
3168 +#define FSL_QDMA_BCQIER_CQTIE 0x8000
3169 +#define FSL_QDMA_BCQIER_CQPEIE 0x800000
3170 +#define FSL_QDMA_BSQICR_ICEN 0x80000000
3171 +#define FSL_QDMA_BSQICR_ICST(x) ((x) << 16)
3172 +#define FSL_QDMA_CQIER_MEIE 0x80000000
3173 +#define FSL_QDMA_CQIER_TEIE 0x1
3174 +#define FSL_QDMA_SQCCMR_ENTER_WM 0x200000
3175 +
3176 +#define FSL_QDMA_QUEUE_MAX 8
3177 +
3178 +#define FSL_QDMA_BCQMR_EN 0x80000000
3179 +#define FSL_QDMA_BCQMR_EI 0x40000000
3180 +#define FSL_QDMA_BCQMR_CD_THLD(x) ((x) << 20)
3181 +#define FSL_QDMA_BCQMR_CQ_SIZE(x) ((x) << 16)
3182 +
3183 +#define FSL_QDMA_BCQSR_QF 0x10000
3184 +#define FSL_QDMA_BCQSR_XOFF 0x1
3185 +
3186 +#define FSL_QDMA_BSQMR_EN 0x80000000
3187 +#define FSL_QDMA_BSQMR_DI 0x40000000
3188 +#define FSL_QDMA_BSQMR_CQ_SIZE(x) ((x) << 16)
3189 +
3190 +#define FSL_QDMA_BSQSR_QE 0x20000
3191 +
3192 +#define FSL_QDMA_DMR_DQD 0x40000000
3193 +#define FSL_QDMA_DSR_DB 0x80000000
3194 +
3195 +#define FSL_QDMA_COMMAND_BUFFER_SIZE 64
3196 +#define FSL_QDMA_DESCRIPTOR_BUFFER_SIZE 32
3197 +#define FSL_QDMA_CIRCULAR_DESC_SIZE_MIN 64
3198 +#define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX 16384
3199 +#define FSL_QDMA_QUEUE_NUM_MAX 8
3200 +
3201 +#define FSL_QDMA_CMD_RWTTYPE 0x4
3202 +#define FSL_QDMA_CMD_LWC 0x2
3203 +
3204 +#define FSL_QDMA_CMD_RWTTYPE_OFFSET 28
3205 +#define FSL_QDMA_CMD_NS_OFFSET 27
3206 +#define FSL_QDMA_CMD_DQOS_OFFSET 24
3207 +#define FSL_QDMA_CMD_WTHROTL_OFFSET 20
3208 +#define FSL_QDMA_CMD_DSEN_OFFSET 19
3209 +#define FSL_QDMA_CMD_LWC_OFFSET 16
3210 +
3211 +#define QDMA_CCDF_STATUS 20
3212 +#define QDMA_CCDF_OFFSET 20
3213 +#define QDMA_CCDF_MASK GENMASK(28, 20)
3214 +#define QDMA_CCDF_FOTMAT BIT(29)
3215 +#define QDMA_CCDF_SER BIT(30)
3216 +
3217 +#define QDMA_SG_FIN BIT(30)
3218 +#define QDMA_SG_EXT BIT(31)
3219 +#define QDMA_SG_LEN_MASK GENMASK(29, 0)
3220 +
3221 +#define QDMA_BIG_ENDIAN 0x00000001
3222 +#define COMP_TIMEOUT 1000
3223 +#define COMMAND_QUEUE_OVERFLLOW 10
3224 +
3225 +#define QDMA_IN(fsl_qdma_engine, addr) \
3226 + (((fsl_qdma_engine)->big_endian & QDMA_BIG_ENDIAN) ? \
3227 + ioread32be(addr) : ioread32(addr))
3228 +#define QDMA_OUT(fsl_qdma_engine, addr, val) \
3229 + (((fsl_qdma_engine)->big_endian & QDMA_BIG_ENDIAN) ? \
3230 + iowrite32be(val, addr) : iowrite32(val, addr))
3231 +
3232 +#define FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma_engine, x) \
3233 + (((fsl_qdma_engine)->block_offset) * (x))
3234 +
3235 +static DEFINE_PER_CPU(u64, pre_addr);
3236 +static DEFINE_PER_CPU(u64, pre_queue);
3237 +
3238 +/* qDMA Command Descriptor Fotmats */
3239 +
3240 +struct fsl_qdma_format {
3241 + __le32 status; /* ser, status */
3242 + __le32 cfg; /* format, offset */
3243 + union {
3244 + struct {
3245 + __le32 addr_lo; /* low 32-bits of 40-bit address */
3246 + u8 addr_hi; /* high 8-bits of 40-bit address */
3247 + u8 __reserved1[2];
3248 + u8 cfg8b_w1; /* dd, queue */
3249 + } __packed;
3250 + __le64 data;
3251 + };
3252 +} __packed;
3253 +
3254 +static inline u64
3255 +qdma_ccdf_addr_get64(const struct fsl_qdma_format *ccdf)
3256 +{
3257 + return le64_to_cpu(ccdf->data) & 0xffffffffffLLU;
3258 +}
3259 +
3260 +static inline void
3261 +qdma_desc_addr_set64(struct fsl_qdma_format *ccdf, u64 addr)
3262 +{
3263 + ccdf->addr_hi = upper_32_bits(addr);
3264 + ccdf->addr_lo = cpu_to_le32(lower_32_bits(addr));
3265 +}
3266 +
3267 +static inline u64
3268 +qdma_ccdf_get_queue(const struct fsl_qdma_format *ccdf)
3269 +{
3270 + return ccdf->cfg8b_w1 & 0xff;
3271 +}
3272 +
3273 +static inline int
3274 +qdma_ccdf_get_offset(const struct fsl_qdma_format *ccdf)
3275 +{
3276 + return (le32_to_cpu(ccdf->cfg) & QDMA_CCDF_MASK) >> QDMA_CCDF_OFFSET;
3277 +}
3278 +
3279 +static inline void
3280 +qdma_ccdf_set_format(struct fsl_qdma_format *ccdf, int offset)
3281 +{
3282 + ccdf->cfg = cpu_to_le32(QDMA_CCDF_FOTMAT | offset);
3283 +}
3284 +
3285 +static inline int
3286 +qdma_ccdf_get_status(const struct fsl_qdma_format *ccdf)
3287 +{
3288 + return (le32_to_cpu(ccdf->status) & QDMA_CCDF_MASK) >> QDMA_CCDF_STATUS;
3289 +}
3290 +
3291 +static inline void
3292 +qdma_ccdf_set_ser(struct fsl_qdma_format *ccdf, int status)
3293 +{
3294 + ccdf->status = cpu_to_le32(QDMA_CCDF_SER | status);
3295 +}
3296 +
3297 +static inline void qdma_csgf_set_len(struct fsl_qdma_format *csgf, int len)
3298 +{
3299 + csgf->cfg = cpu_to_le32(len & QDMA_SG_LEN_MASK);
3300 +}
3301 +
3302 +static inline void qdma_csgf_set_f(struct fsl_qdma_format *csgf, int len)
3303 +{
3304 + csgf->cfg = cpu_to_le32(QDMA_SG_FIN | (len & QDMA_SG_LEN_MASK));
3305 +}
3306 +
3307 +static inline void qdma_csgf_set_e(struct fsl_qdma_format *csgf, int len)
3308 +{
3309 + csgf->cfg = cpu_to_le32(QDMA_SG_EXT | (len & QDMA_SG_LEN_MASK));
3310 +}
3311 +
3312 +/* qDMA Source Descriptor Format */
3313 +struct fsl_qdma_sdf {
3314 + __le32 rev3;
3315 + __le32 cfg; /* rev4, bit[0-11] - ssd, bit[12-23] sss */
3316 + __le32 rev5;
3317 + __le32 cmd;
3318 +} __packed;
3319 +
3320 +/* qDMA Destination Descriptor Format */
3321 +struct fsl_qdma_ddf {
3322 + __le32 rev1;
3323 + __le32 cfg; /* rev2, bit[0-11] - dsd, bit[12-23] - dss */
3324 + __le32 rev3;
3325 + __le32 cmd;
3326 +} __packed;
3327 +
3328 +struct fsl_qdma_chan {
3329 + struct virt_dma_chan vchan;
3330 + struct virt_dma_desc vdesc;
3331 + enum dma_status status;
3332 + struct fsl_qdma_engine *qdma;
3333 + struct fsl_qdma_queue *queue;
3334 +};
3335 +
3336 +struct fsl_qdma_queue {
3337 + struct fsl_qdma_format *virt_head;
3338 + struct fsl_qdma_format *virt_tail;
3339 + struct list_head comp_used;
3340 + struct list_head comp_free;
3341 + struct dma_pool *comp_pool;
3342 + struct dma_pool *desc_pool;
3343 + spinlock_t queue_lock;
3344 + dma_addr_t bus_addr;
3345 + u32 n_cq;
3346 + u32 id;
3347 + struct fsl_qdma_format *cq;
3348 + void __iomem *block_base;
3349 +};
3350 +
3351 +struct fsl_qdma_comp {
3352 + dma_addr_t bus_addr;
3353 + dma_addr_t desc_bus_addr;
3354 + void *virt_addr;
3355 + void *desc_virt_addr;
3356 + struct fsl_qdma_chan *qchan;
3357 + struct virt_dma_desc vdesc;
3358 + struct list_head list;
3359 +};
3360 +
3361 +struct fsl_qdma_engine {
3362 + struct dma_device dma_dev;
3363 + void __iomem *ctrl_base;
3364 + void __iomem *status_base;
3365 + void __iomem *block_base;
3366 + u32 n_chans;
3367 + u32 n_queues;
3368 + struct mutex fsl_qdma_mutex;
3369 + int error_irq;
3370 + int *queue_irq;
3371 + bool big_endian;
3372 + struct fsl_qdma_queue *queue;
3373 + struct fsl_qdma_queue **status;
3374 + struct fsl_qdma_chan *chans;
3375 + int block_number;
3376 + int block_offset;
3377 + int irq_base;
3378 + int desc_allocated;
3379 +
3380 +};
3381 +
3382 +static u32 qdma_readl(struct fsl_qdma_engine *qdma, void __iomem *addr)
3383 +{
3384 + return QDMA_IN(qdma, addr);
3385 +}
3386 +
3387 +static void qdma_writel(struct fsl_qdma_engine *qdma, u32 val,
3388 + void __iomem *addr)
3389 +{
3390 + QDMA_OUT(qdma, addr, val);
3391 +}
3392 +
3393 +static struct fsl_qdma_chan *to_fsl_qdma_chan(struct dma_chan *chan)
3394 +{
3395 + return container_of(chan, struct fsl_qdma_chan, vchan.chan);
3396 +}
3397 +
3398 +static struct fsl_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
3399 +{
3400 + return container_of(vd, struct fsl_qdma_comp, vdesc);
3401 +}
3402 +
3403 +static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
3404 +{
3405 + struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
3406 + struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
3407 + struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
3408 + struct fsl_qdma_comp *comp_temp, *_comp_temp;
3409 + unsigned long flags;
3410 + LIST_HEAD(head);
3411 +
3412 + spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
3413 + vchan_get_all_descriptors(&fsl_chan->vchan, &head);
3414 + spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
3415 +
3416 + vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
3417 +
3418 + if (!fsl_queue->comp_pool && !fsl_queue->comp_pool)
3419 + return;
3420 +
3421 + list_for_each_entry_safe(comp_temp, _comp_temp,
3422 + &fsl_queue->comp_used, list) {
3423 + dma_pool_free(fsl_queue->comp_pool,
3424 + comp_temp->virt_addr,
3425 + comp_temp->bus_addr);
3426 + dma_pool_free(fsl_queue->desc_pool,
3427 + comp_temp->desc_virt_addr,
3428 + comp_temp->desc_bus_addr);
3429 + list_del(&comp_temp->list);
3430 + kfree(comp_temp);
3431 + }
3432 +
3433 + list_for_each_entry_safe(comp_temp, _comp_temp,
3434 + &fsl_queue->comp_free, list) {
3435 + dma_pool_free(fsl_queue->comp_pool,
3436 + comp_temp->virt_addr,
3437 + comp_temp->bus_addr);
3438 + dma_pool_free(fsl_queue->desc_pool,
3439 + comp_temp->desc_virt_addr,
3440 + comp_temp->desc_bus_addr);
3441 + list_del(&comp_temp->list);
3442 + kfree(comp_temp);
3443 + }
3444 +
3445 + dma_pool_destroy(fsl_queue->comp_pool);
3446 + dma_pool_destroy(fsl_queue->desc_pool);
3447 +
3448 + fsl_qdma->desc_allocated--;
3449 + fsl_queue->comp_pool = NULL;
3450 + fsl_queue->desc_pool = NULL;
3451 +}
3452 +
3453 +static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
3454 + dma_addr_t dst, dma_addr_t src, u32 len)
3455 +{
3456 + struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest;
3457 + struct fsl_qdma_sdf *sdf;
3458 + struct fsl_qdma_ddf *ddf;
3459 +
3460 + ccdf = (struct fsl_qdma_format *)fsl_comp->virt_addr;
3461 + csgf_desc = (struct fsl_qdma_format *)fsl_comp->virt_addr + 1;
3462 + csgf_src = (struct fsl_qdma_format *)fsl_comp->virt_addr + 2;
3463 + csgf_dest = (struct fsl_qdma_format *)fsl_comp->virt_addr + 3;
3464 + sdf = (struct fsl_qdma_sdf *)fsl_comp->desc_virt_addr;
3465 + ddf = (struct fsl_qdma_ddf *)fsl_comp->desc_virt_addr + 1;
3466 +
3467 + memset(fsl_comp->virt_addr, 0, FSL_QDMA_COMMAND_BUFFER_SIZE);
3468 + memset(fsl_comp->desc_virt_addr, 0, FSL_QDMA_DESCRIPTOR_BUFFER_SIZE);
3469 + /* Head Command Descriptor(Frame Descriptor) */
3470 + qdma_desc_addr_set64(ccdf, fsl_comp->bus_addr + 16);
3471 + qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(ccdf));
3472 + qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(ccdf));
3473 + /* Status notification is enqueued to status queue. */
3474 + /* Compound Command Descriptor(Frame List Table) */
3475 + qdma_desc_addr_set64(csgf_desc, fsl_comp->desc_bus_addr);
3476 + /* It must be 32 as Compound S/G Descriptor */
3477 + qdma_csgf_set_len(csgf_desc, 32);
3478 + qdma_desc_addr_set64(csgf_src, src);
3479 + qdma_csgf_set_len(csgf_src, len);
3480 + qdma_desc_addr_set64(csgf_dest, dst);
3481 + qdma_csgf_set_len(csgf_dest, len);
3482 + /* This entry is the last entry. */
3483 + qdma_csgf_set_f(csgf_dest, len);
3484 + /* Descriptor Buffer */
3485 + sdf->cmd = cpu_to_le32(
3486 + FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET);
3487 + ddf->cmd = cpu_to_le32(
3488 + FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET);
3489 + ddf->cmd |= cpu_to_le32(
3490 + FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET);
3491 +}
3492 +
3493 +/*
3494 + * Pre-request command descriptor and compound S/G for enqueue.
3495 + */
3496 +static int fsl_qdma_pre_request_enqueue_comp_desc(struct fsl_qdma_queue *queue)
3497 +{
3498 + struct fsl_qdma_comp *comp_temp;
3499 + int i;
3500 +
3501 + for (i = 0; i < queue->n_cq + COMMAND_QUEUE_OVERFLLOW; i++) {
3502 + comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
3503 + if (!comp_temp)
3504 + return -ENOMEM;
3505 + comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool,
3506 + GFP_KERNEL,
3507 + &comp_temp->bus_addr);
3508 +
3509 + if (!comp_temp->virt_addr) {
3510 + kfree(comp_temp);
3511 + return -ENOMEM;
3512 + }
3513 +
3514 + list_add_tail(&comp_temp->list, &queue->comp_free);
3515 + }
3516 +
3517 + return 0;
3518 +}
3519 +
3520 +/*
3521 + * Pre-request source and destination descriptor for enqueue.
3522 + */
3523 +static int fsl_qdma_pre_request_enqueue_sd_desc(struct fsl_qdma_queue *queue)
3524 +{
3525 + struct fsl_qdma_comp *comp_temp, *_comp_temp;
3526 +
3527 + list_for_each_entry_safe(comp_temp, _comp_temp,
3528 + &queue->comp_free, list) {
3529 + comp_temp->desc_virt_addr = dma_pool_alloc(queue->desc_pool,
3530 + GFP_KERNEL,
3531 + &comp_temp->desc_bus_addr);
3532 + if (!comp_temp->desc_virt_addr)
3533 + return -ENOMEM;
3534 + }
3535 +
3536 + return 0;
3537 +}
3538 +
3539 +/*
3540 + * Request a command descriptor for enqueue.
3541 + */
3542 +static struct fsl_qdma_comp *fsl_qdma_request_enqueue_desc(
3543 + struct fsl_qdma_chan *fsl_chan)
3544 +{
3545 + struct fsl_qdma_comp *comp_temp;
3546 + struct fsl_qdma_queue *queue = fsl_chan->queue;
3547 + unsigned long flags;
3548 + int timeout = COMP_TIMEOUT;
3549 +
3550 + while (timeout) {
3551 + spin_lock_irqsave(&queue->queue_lock, flags);
3552 + if (!list_empty(&queue->comp_free)) {
3553 + comp_temp = list_first_entry(&queue->comp_free,
3554 + struct fsl_qdma_comp,
3555 + list);
3556 + list_del(&comp_temp->list);
3557 +
3558 + spin_unlock_irqrestore(&queue->queue_lock, flags);
3559 + comp_temp->qchan = fsl_chan;
3560 + return comp_temp;
3561 + }
3562 + spin_unlock_irqrestore(&queue->queue_lock, flags);
3563 + udelay(1);
3564 + timeout--;
3565 + }
3566 +
3567 + return NULL;
3568 +}
3569 +
3570 +static struct fsl_qdma_queue *fsl_qdma_alloc_queue_resources(
3571 + struct platform_device *pdev,
3572 + struct fsl_qdma_engine *fsl_qdma)
3573 +{
3574 + struct fsl_qdma_queue *queue_head, *queue_temp;
3575 + int ret, len, i, j;
3576 + unsigned int queue_size[FSL_QDMA_QUEUE_MAX];
3577 + int queue_num;
3578 + int block_number;
3579 +
3580 + queue_num = fsl_qdma->n_queues;
3581 + block_number = fsl_qdma->block_number;
3582 +
3583 + if (queue_num > FSL_QDMA_QUEUE_MAX)
3584 + queue_num = FSL_QDMA_QUEUE_MAX;
3585 + len = sizeof(*queue_head) * queue_num * block_number;
3586 + queue_head = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
3587 + if (!queue_head)
3588 + return NULL;
3589 +
3590 + ret = device_property_read_u32_array(&pdev->dev, "queue-sizes",
3591 + queue_size, queue_num);
3592 + if (ret) {
3593 + dev_err(&pdev->dev, "Can't get queue-sizes.\n");
3594 + return NULL;
3595 + }
3596 + for (j = 0; j < block_number; j++) {
3597 + for (i = 0; i < queue_num; i++) {
3598 + if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
3599 + queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
3600 + dev_err(&pdev->dev,
3601 + "Get wrong queue-sizes.\n");
3602 + return NULL;
3603 + }
3604 + queue_temp = queue_head + i + (j * queue_num);
3605 +
3606 + queue_temp->cq =
3607 + dma_alloc_coherent(&pdev->dev,
3608 + sizeof(struct fsl_qdma_format) *
3609 + queue_size[i],
3610 + &queue_temp->bus_addr,
3611 + GFP_KERNEL);
3612 + if (!queue_temp->cq)
3613 + return NULL;
3614 + queue_temp->block_base = fsl_qdma->block_base +
3615 + FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
3616 + queue_temp->n_cq = queue_size[i];
3617 + queue_temp->id = i;
3618 + queue_temp->virt_head = queue_temp->cq;
3619 + queue_temp->virt_tail = queue_temp->cq;
3620 + /*
3621 + * List for queue command buffer
3622 + */
3623 + INIT_LIST_HEAD(&queue_temp->comp_used);
3624 + spin_lock_init(&queue_temp->queue_lock);
3625 + }
3626 + }
3627 + return queue_head;
3628 +}
3629 +
3630 +static struct fsl_qdma_queue *fsl_qdma_prep_status_queue(
3631 + struct platform_device *pdev)
3632 +{
3633 + struct device_node *np = pdev->dev.of_node;
3634 + struct fsl_qdma_queue *status_head;
3635 + unsigned int status_size;
3636 + int ret;
3637 +
3638 + ret = of_property_read_u32(np, "status-sizes", &status_size);
3639 + if (ret) {
3640 + dev_err(&pdev->dev, "Can't get status-sizes.\n");
3641 + return NULL;
3642 + }
3643 + if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX
3644 + || status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
3645 + dev_err(&pdev->dev, "Get wrong status_size.\n");
3646 + return NULL;
3647 + }
3648 + status_head = devm_kzalloc(&pdev->dev, sizeof(*status_head),
3649 + GFP_KERNEL);
3650 + if (!status_head)
3651 + return NULL;
3652 +
3653 + /*
3654 + * Buffer for queue command
3655 + */
3656 + status_head->cq = dma_alloc_coherent(&pdev->dev,
3657 + sizeof(struct fsl_qdma_format) *
3658 + status_size,
3659 + &status_head->bus_addr,
3660 + GFP_KERNEL);
3661 + if (!status_head->cq)
3662 + return NULL;
3663 + status_head->n_cq = status_size;
3664 + status_head->virt_head = status_head->cq;
3665 + status_head->virt_tail = status_head->cq;
3666 + status_head->comp_pool = NULL;
3667 +
3668 + return status_head;
3669 +}
3670 +
3671 +static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
3672 +{
3673 + void __iomem *ctrl = fsl_qdma->ctrl_base;
3674 + void __iomem *block;
3675 + int i, count = 5;
3676 + int j;
3677 + u32 reg;
3678 +
3679 + /* Disable the command queue and wait for idle state. */
3680 + reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
3681 + reg |= FSL_QDMA_DMR_DQD;
3682 + qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
3683 + for (j = 0; j < fsl_qdma->block_number; j++) {
3684 + block = fsl_qdma->block_base +
3685 + FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
3686 + for (i = 0; i < FSL_QDMA_QUEUE_NUM_MAX; i++)
3687 + qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQMR(i));
3688 + }
3689 + while (1) {
3690 + reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DSR);
3691 + if (!(reg & FSL_QDMA_DSR_DB))
3692 + break;
3693 + if (count-- < 0)
3694 + return -EBUSY;
3695 + udelay(100);
3696 + }
3697 +
3698 + for (j = 0; j < fsl_qdma->block_number; j++) {
3699 +
3700 + block = fsl_qdma->block_base +
3701 + FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
3702 +
3703 + /* Disable status queue. */
3704 + qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BSQMR);
3705 +
3706 + /*
3707 + * clear the command queue interrupt detect register for
3708 + * all queues.
3709 + */
3710 + qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
3711 + }
3712 +
3713 + return 0;
3714 +}
3715 +
3716 +static int fsl_qdma_queue_transfer_complete(
3717 + struct fsl_qdma_engine *fsl_qdma,
3718 + void *block,
3719 + int id)
3720 +{
3721 + struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
3722 + struct fsl_qdma_queue *fsl_status = fsl_qdma->status[id];
3723 + struct fsl_qdma_queue *temp_queue;
3724 + struct fsl_qdma_format *status_addr;
3725 + struct fsl_qdma_comp *fsl_comp = NULL;
3726 + u32 reg, i;
3727 + bool duplicate, duplicate_handle;
3728 +
3729 + while (1) {
3730 + duplicate = 0;
3731 + duplicate_handle = 0;
3732 + reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQSR);
3733 + if (reg & FSL_QDMA_BSQSR_QE)
3734 + return 0;
3735 +
3736 + status_addr = fsl_status->virt_head;
3737 +
3738 + if (qdma_ccdf_get_queue(status_addr) ==
3739 + __this_cpu_read(pre_queue) &&
3740 + qdma_ccdf_addr_get64(status_addr) ==
3741 + __this_cpu_read(pre_addr))
3742 + duplicate = 1;
3743 + i = qdma_ccdf_get_queue(status_addr) +
3744 + id * fsl_qdma->n_queues;
3745 + __this_cpu_write(pre_addr, qdma_ccdf_addr_get64(status_addr));
3746 + __this_cpu_write(pre_queue, qdma_ccdf_get_queue(status_addr));
3747 + temp_queue = fsl_queue + i;
3748 +
3749 + spin_lock(&temp_queue->queue_lock);
3750 + if (list_empty(&temp_queue->comp_used)) {
3751 + if (duplicate)
3752 + duplicate_handle = 1;
3753 + else {
3754 + spin_unlock(&temp_queue->queue_lock);
3755 + return -1;
3756 + }
3757 + } else {
3758 + fsl_comp = list_first_entry(&temp_queue->comp_used,
3759 + struct fsl_qdma_comp,
3760 + list);
3761 + if (fsl_comp->bus_addr + 16 !=
3762 + __this_cpu_read(pre_addr)) {
3763 + if (duplicate)
3764 + duplicate_handle = 1;
3765 + else {
3766 + spin_unlock(&temp_queue->queue_lock);
3767 + return -1;
3768 + }
3769 + }
3770 +
3771 + }
3772 +
3773 + if (duplicate_handle) {
3774 + reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
3775 + reg |= FSL_QDMA_BSQMR_DI;
3776 + qdma_desc_addr_set64(status_addr, 0x0);
3777 + fsl_status->virt_head++;
3778 + if (fsl_status->virt_head == fsl_status->cq
3779 + + fsl_status->n_cq)
3780 + fsl_status->virt_head = fsl_status->cq;
3781 + qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
3782 + spin_unlock(&temp_queue->queue_lock);
3783 + continue;
3784 + }
3785 + list_del(&fsl_comp->list);
3786 +
3787 + reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
3788 + reg |= FSL_QDMA_BSQMR_DI;
3789 + qdma_desc_addr_set64(status_addr, 0x0);
3790 + fsl_status->virt_head++;
3791 + if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq)
3792 + fsl_status->virt_head = fsl_status->cq;
3793 + qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
3794 + spin_unlock(&temp_queue->queue_lock);
3795 +
3796 + spin_lock(&fsl_comp->qchan->vchan.lock);
3797 + vchan_cookie_complete(&fsl_comp->vdesc);
3798 + fsl_comp->qchan->status = DMA_COMPLETE;
3799 + spin_unlock(&fsl_comp->qchan->vchan.lock);
3800 + }
3801 + return 0;
3802 +}
3803 +
3804 +static irqreturn_t fsl_qdma_error_handler(int irq, void *dev_id)
3805 +{
3806 + struct fsl_qdma_engine *fsl_qdma = dev_id;
3807 + unsigned int intr;
3808 + void __iomem *status = fsl_qdma->status_base;
3809 +
3810 + intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR);
3811 +
3812 + if (intr)
3813 + dev_err(fsl_qdma->dma_dev.dev, "DMA transaction error!\n");
3814 +
3815 + qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEDR);
3816 + return IRQ_HANDLED;
3817 +}
3818 +
3819 +static irqreturn_t fsl_qdma_queue_handler(int irq, void *dev_id)
3820 +{
3821 + struct fsl_qdma_engine *fsl_qdma = dev_id;
3822 + unsigned int intr, reg;
3823 + void __iomem *ctrl = fsl_qdma->ctrl_base;
3824 + void __iomem *block;
3825 + int id;
3826 +
3827 + id = irq - fsl_qdma->irq_base;
3828 + if (id < 0 && id > fsl_qdma->block_number) {
3829 + dev_err(fsl_qdma->dma_dev.dev,
3830 + "irq %d is wrong irq_base is %d\n",
3831 + irq, fsl_qdma->irq_base);
3832 + }
3833 +
3834 + block = fsl_qdma->block_base +
3835 + FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
3836 +
3837 + intr = qdma_readl(fsl_qdma, block + FSL_QDMA_BCQIDR(0));
3838 +
3839 + if ((intr & FSL_QDMA_CQIDR_SQT) != 0)
3840 + intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id);
3841 +
3842 + if (intr != 0) {
3843 + reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
3844 + reg |= FSL_QDMA_DMR_DQD;
3845 + qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
3846 + qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQIER(0));
3847 + dev_err(fsl_qdma->dma_dev.dev, "QDMA: status err!\n");
3848 + }
3849 +
3850 + qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
3851 +
3852 + return IRQ_HANDLED;
3853 +}
3854 +
3855 +static int
3856 +fsl_qdma_irq_init(struct platform_device *pdev,
3857 + struct fsl_qdma_engine *fsl_qdma)
3858 +{
3859 + char irq_name[20];
3860 + int i;
3861 + int cpu;
3862 + int ret;
3863 +
3864 + fsl_qdma->error_irq = platform_get_irq_byname(pdev,
3865 + "qdma-error");
3866 + if (fsl_qdma->error_irq < 0) {
3867 + dev_err(&pdev->dev, "Can't get qdma controller irq.\n");
3868 + return fsl_qdma->error_irq;
3869 + }
3870 +
3871 + ret = devm_request_irq(&pdev->dev, fsl_qdma->error_irq,
3872 + fsl_qdma_error_handler, 0, "qDMA error", fsl_qdma);
3873 + if (ret) {
3874 + dev_err(&pdev->dev, "Can't register qDMA controller IRQ.\n");
3875 + return ret;
3876 + }
3877 +
3878 + for (i = 0; i < fsl_qdma->block_number; i++) {
3879 + sprintf(irq_name, "qdma-queue%d", i);
3880 + fsl_qdma->queue_irq[i] = platform_get_irq_byname(pdev,
3881 + irq_name);
3882 +
3883 + if (fsl_qdma->queue_irq[i] < 0) {
3884 + dev_err(&pdev->dev,
3885 + "Can't get qdma queue %d irq.\n",
3886 + i);
3887 + return fsl_qdma->queue_irq[i];
3888 + }
3889 +
3890 + ret = devm_request_irq(&pdev->dev,
3891 + fsl_qdma->queue_irq[i],
3892 + fsl_qdma_queue_handler,
3893 + 0,
3894 + "qDMA queue",
3895 + fsl_qdma);
3896 + if (ret) {
3897 + dev_err(&pdev->dev,
3898 + "Can't register qDMA queue IRQ.\n");
3899 + return ret;
3900 + }
3901 +
3902 + cpu = i % num_online_cpus();
3903 + ret = irq_set_affinity_hint(fsl_qdma->queue_irq[i],
3904 + get_cpu_mask(cpu));
3905 + if (ret) {
3906 + dev_err(&pdev->dev,
3907 + "Can't set cpu %d affinity to IRQ %d.\n",
3908 + cpu,
3909 + fsl_qdma->queue_irq[i]);
3910 + return ret;
3911 + }
3912 +
3913 + }
3914 +
3915 + return 0;
3916 +}
3917 +
3918 +static void fsl_qdma_irq_exit(
3919 + struct platform_device *pdev, struct fsl_qdma_engine *fsl_qdma)
3920 +{
3921 + if (fsl_qdma->queue_irq[0] == fsl_qdma->error_irq) {
3922 + devm_free_irq(&pdev->dev, fsl_qdma->queue_irq[0], fsl_qdma);
3923 + } else {
3924 + devm_free_irq(&pdev->dev, fsl_qdma->queue_irq[0], fsl_qdma);
3925 + devm_free_irq(&pdev->dev, fsl_qdma->error_irq, fsl_qdma);
3926 + }
3927 +}
3928 +
3929 +static int fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
3930 +{
3931 + struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
3932 + struct fsl_qdma_queue *temp;
3933 + void __iomem *ctrl = fsl_qdma->ctrl_base;
3934 + void __iomem *status = fsl_qdma->status_base;
3935 + void __iomem *block;
3936 + int i, j, ret;
3937 + u32 reg;
3938 +
3939 + /* Try to halt the qDMA engine first. */
3940 + ret = fsl_qdma_halt(fsl_qdma);
3941 + if (ret) {
3942 + dev_err(fsl_qdma->dma_dev.dev, "DMA halt failed!");
3943 + return ret;
3944 + }
3945 +
3946 + for (i = 0; i < fsl_qdma->block_number; i++) {
3947 + /*
3948 + * Clear the command queue interrupt detect register for
3949 + * all queues.
3950 + */
3951 +
3952 + block = fsl_qdma->block_base +
3953 + FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, i);
3954 + qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
3955 + }
3956 +
3957 + for (j = 0; j < fsl_qdma->block_number; j++) {
3958 + block = fsl_qdma->block_base +
3959 + FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
3960 + for (i = 0; i < fsl_qdma->n_queues; i++) {
3961 + temp = fsl_queue + i + (j * fsl_qdma->n_queues);
3962 + /*
3963 + * Initialize Command Queue registers to
3964 + * point to the first
3965 + * command descriptor in memory.
3966 + * Dequeue Pointer Address Registers
3967 + * Enqueue Pointer Address Registers
3968 + */
3969 +
3970 + qdma_writel(fsl_qdma, temp->bus_addr,
3971 + block + FSL_QDMA_BCQDPA_SADDR(i));
3972 + qdma_writel(fsl_qdma, temp->bus_addr,
3973 + block + FSL_QDMA_BCQEPA_SADDR(i));
3974 +
3975 + /* Initialize the queue mode. */
3976 + reg = FSL_QDMA_BCQMR_EN;
3977 + reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq) - 4);
3978 + reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq) - 6);
3979 + qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BCQMR(i));
3980 + }
3981 +
3982 + /*
3983 + * Workaround for erratum: ERR010812.
3984 + * We must enable XOFF to avoid the enqueue rejection occurs.
3985 + * Setting SQCCMR ENTER_WM to 0x20.
3986 + */
3987 +
3988 + qdma_writel(fsl_qdma, FSL_QDMA_SQCCMR_ENTER_WM,
3989 + block + FSL_QDMA_SQCCMR);
3990 +
3991 + /*
3992 + * Initialize status queue registers to point to the first
3993 + * command descriptor in memory.
3994 + * Dequeue Pointer Address Registers
3995 + * Enqueue Pointer Address Registers
3996 + */
3997 +
3998 + qdma_writel(fsl_qdma, fsl_qdma->status[j]->bus_addr,
3999 + block + FSL_QDMA_SQEPAR);
4000 + qdma_writel(fsl_qdma, fsl_qdma->status[j]->bus_addr,
4001 + block + FSL_QDMA_SQDPAR);
4002 + /* Initialize status queue interrupt. */
4003 + qdma_writel(fsl_qdma, FSL_QDMA_BCQIER_CQTIE,
4004 + block + FSL_QDMA_BCQIER(0));
4005 + qdma_writel(fsl_qdma, FSL_QDMA_BSQICR_ICEN |
4006 + FSL_QDMA_BSQICR_ICST(5) | 0x8000,
4007 + block + FSL_QDMA_BSQICR);
4008 + qdma_writel(fsl_qdma, FSL_QDMA_CQIER_MEIE |
4009 + FSL_QDMA_CQIER_TEIE,
4010 + block + FSL_QDMA_CQIER);
4011 +
4012 + /* Initialize the status queue mode. */
4013 + reg = FSL_QDMA_BSQMR_EN;
4014 + reg |= FSL_QDMA_BSQMR_CQ_SIZE(ilog2(
4015 + fsl_qdma->status[j]->n_cq) - 6);
4016 +
4017 + qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
4018 + reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
4019 +
4020 + }
4021 +
4022 + /* Initialize controller interrupt register. */
4023 + qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEDR);
4024 + qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEIER);
4025 +
4026 + reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
4027 + reg &= ~FSL_QDMA_DMR_DQD;
4028 + qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
4029 +
4030 + return 0;
4031 +}
4032 +
4033 +static struct dma_async_tx_descriptor *
4034 +fsl_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
4035 + dma_addr_t src, size_t len, unsigned long flags)
4036 +{
4037 + struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
4038 + struct fsl_qdma_comp *fsl_comp;
4039 +
4040 + fsl_comp = fsl_qdma_request_enqueue_desc(fsl_chan);
4041 +
4042 + if (!fsl_comp)
4043 + return NULL;
4044 +
4045 + fsl_qdma_comp_fill_memcpy(fsl_comp, dst, src, len);
4046 +
4047 + return vchan_tx_prep(&fsl_chan->vchan, &fsl_comp->vdesc, flags);
4048 +}
4049 +
4050 +static void fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
4051 +{
4052 + struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
4053 + struct fsl_qdma_comp *fsl_comp;
4054 + struct virt_dma_desc *vdesc;
4055 + void __iomem *block = fsl_queue->block_base;
4056 + u32 reg;
4057 +
4058 + reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQSR(fsl_queue->id));
4059 + if (reg & (FSL_QDMA_BCQSR_QF | FSL_QDMA_BCQSR_XOFF))
4060 + return;
4061 + vdesc = vchan_next_desc(&fsl_chan->vchan);
4062 + if (!vdesc)
4063 + return;
4064 + list_del(&vdesc->node);
4065 + fsl_comp = to_fsl_qdma_comp(vdesc);
4066 +
4067 + memcpy(fsl_queue->virt_head++, fsl_comp->virt_addr, 16);
4068 + if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq)
4069 + fsl_queue->virt_head = fsl_queue->cq;
4070 +
4071 + list_add_tail(&fsl_comp->list, &fsl_queue->comp_used);
4072 + barrier();
4073 + reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQMR(fsl_queue->id));
4074 + reg |= FSL_QDMA_BCQMR_EI;
4075 + qdma_writel(fsl_chan->qdma, reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
4076 + fsl_chan->status = DMA_IN_PROGRESS;
4077 +}
4078 +
4079 +static enum dma_status fsl_qdma_tx_status(struct dma_chan *chan,
4080 + dma_cookie_t cookie, struct dma_tx_state *txstate)
4081 +{
4082 + return dma_cookie_status(chan, cookie, txstate);
4083 +}
4084 +
4085 +static void fsl_qdma_free_desc(struct virt_dma_desc *vdesc)
4086 +{
4087 + struct fsl_qdma_comp *fsl_comp;
4088 + struct fsl_qdma_queue *fsl_queue;
4089 + unsigned long flags;
4090 +
4091 + fsl_comp = to_fsl_qdma_comp(vdesc);
4092 + fsl_queue = fsl_comp->qchan->queue;
4093 +
4094 + spin_lock_irqsave(&fsl_queue->queue_lock, flags);
4095 + list_add_tail(&fsl_comp->list, &fsl_queue->comp_free);
4096 + spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
4097 +}
4098 +
4099 +static void fsl_qdma_issue_pending(struct dma_chan *chan)
4100 +{
4101 + struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
4102 + struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
4103 + unsigned long flags;
4104 +
4105 + spin_lock_irqsave(&fsl_queue->queue_lock, flags);
4106 + spin_lock(&fsl_chan->vchan.lock);
4107 + if (vchan_issue_pending(&fsl_chan->vchan))
4108 + fsl_qdma_enqueue_desc(fsl_chan);
4109 + spin_unlock(&fsl_chan->vchan.lock);
4110 + spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
4111 +}
4112 +
4113 +static void fsl_qdma_synchronize(struct dma_chan *chan)
4114 +{
4115 + struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
4116 +
4117 + vchan_synchronize(&fsl_chan->vchan);
4118 +}
4119 +
4120 +static int fsl_qdma_terminate_all(struct dma_chan *chan)
4121 +{
4122 + struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
4123 + unsigned long flags;
4124 + LIST_HEAD(head);
4125 +
4126 + spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
4127 + vchan_get_all_descriptors(&fsl_chan->vchan, &head);
4128 + spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
4129 + vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
4130 + return 0;
4131 +}
4132 +
4133 +static int fsl_qdma_alloc_chan_resources(struct dma_chan *chan)
4134 +{
4135 + struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
4136 + struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
4137 + struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
4138 + int ret;
4139 +
4140 + if (fsl_queue->comp_pool && fsl_queue->desc_pool)
4141 + return fsl_qdma->desc_allocated;
4142 +
4143 + INIT_LIST_HEAD(&fsl_queue->comp_free);
4144 +
4145 + /*
4146 + * The dma pool for queue command buffer
4147 + */
4148 + fsl_queue->comp_pool =
4149 + dma_pool_create("comp_pool",
4150 + chan->device->dev,
4151 + FSL_QDMA_COMMAND_BUFFER_SIZE,
4152 + 64, 0);
4153 + if (!fsl_queue->comp_pool)
4154 + return -ENOMEM;
4155 +
4156 + /*
4157 + * The dma pool for Descriptor(SD/DD) buffer
4158 + */
4159 + fsl_queue->desc_pool =
4160 + dma_pool_create("desc_pool",
4161 + chan->device->dev,
4162 + FSL_QDMA_DESCRIPTOR_BUFFER_SIZE,
4163 + 32, 0);
4164 + if (!fsl_queue->desc_pool)
4165 + goto err_desc_pool;
4166 +
4167 + ret = fsl_qdma_pre_request_enqueue_comp_desc(fsl_queue);
4168 + if (ret) {
4169 + dev_err(chan->device->dev, "failed to alloc dma buffer for "
4170 + "comp S/G descriptor\n");
4171 + goto err_mem;
4172 + }
4173 +
4174 + ret = fsl_qdma_pre_request_enqueue_sd_desc(fsl_queue);
4175 + if (ret) {
4176 + dev_err(chan->device->dev, "failed to alloc dma buffer for "
4177 + "S/D descriptor\n");
4178 + goto err_mem;
4179 + }
4180 +
4181 + fsl_qdma->desc_allocated++;
4182 + return fsl_qdma->desc_allocated;
4183 +
4184 +err_mem:
4185 + dma_pool_destroy(fsl_queue->desc_pool);
4186 +err_desc_pool:
4187 + dma_pool_destroy(fsl_queue->comp_pool);
4188 + return -ENOMEM;
4189 +}
4190 +
4191 +static int fsl_qdma_probe(struct platform_device *pdev)
4192 +{
4193 + struct device_node *np = pdev->dev.of_node;
4194 + struct fsl_qdma_engine *fsl_qdma;
4195 + struct fsl_qdma_chan *fsl_chan;
4196 + struct resource *res;
4197 + unsigned int len, chans, queues;
4198 + int ret, i;
4199 + int blk_num;
4200 + int blk_off;
4201 +
4202 + ret = of_property_read_u32(np, "channels", &chans);
4203 + if (ret) {
4204 + dev_err(&pdev->dev, "Can't get channels.\n");
4205 + return ret;
4206 + }
4207 +
4208 + ret = of_property_read_u32(np, "block-offset", &blk_off);
4209 + if (ret) {
4210 + dev_err(&pdev->dev, "Can't get block-offset.\n");
4211 + return ret;
4212 + }
4213 +
4214 + ret = of_property_read_u32(np, "block-number", &blk_num);
4215 + if (ret) {
4216 + dev_err(&pdev->dev, "Can't get block-number.\n");
4217 + return ret;
4218 + }
4219 +
4220 + blk_num = min_t(int, blk_num, num_online_cpus());
4221 +
4222 + len = sizeof(*fsl_qdma);
4223 + fsl_qdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
4224 + if (!fsl_qdma)
4225 + return -ENOMEM;
4226 +
4227 + len = sizeof(*fsl_chan) * chans;
4228 + fsl_qdma->chans = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
4229 + if (!fsl_qdma->chans)
4230 + return -ENOMEM;
4231 +
4232 + len = sizeof(struct fsl_qdma_queue *) * blk_num;
4233 + fsl_qdma->status = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
4234 + if (!fsl_qdma->status)
4235 + return -ENOMEM;
4236 +
4237 + len = sizeof(int) * blk_num;
4238 + fsl_qdma->queue_irq = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
4239 + if (!fsl_qdma->queue_irq)
4240 + return -ENOMEM;
4241 +
4242 + ret = of_property_read_u32(np, "queues", &queues);
4243 + if (ret) {
4244 + dev_err(&pdev->dev, "Can't get queues.\n");
4245 + return ret;
4246 + }
4247 +
4248 + fsl_qdma->desc_allocated = 0;
4249 + fsl_qdma->n_chans = chans;
4250 + fsl_qdma->n_queues = queues;
4251 + fsl_qdma->block_number = blk_num;
4252 + fsl_qdma->block_offset = blk_off;
4253 +
4254 + mutex_init(&fsl_qdma->fsl_qdma_mutex);
4255 +
4256 + for (i = 0; i < fsl_qdma->block_number; i++) {
4257 + fsl_qdma->status[i] = fsl_qdma_prep_status_queue(pdev);
4258 + if (!fsl_qdma->status[i])
4259 + return -ENOMEM;
4260 + }
4261 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4262 + fsl_qdma->ctrl_base = devm_ioremap_resource(&pdev->dev, res);
4263 + if (IS_ERR(fsl_qdma->ctrl_base))
4264 + return PTR_ERR(fsl_qdma->ctrl_base);
4265 +
4266 + res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
4267 + fsl_qdma->status_base = devm_ioremap_resource(&pdev->dev, res);
4268 + if (IS_ERR(fsl_qdma->status_base))
4269 + return PTR_ERR(fsl_qdma->status_base);
4270 +
4271 + res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
4272 + fsl_qdma->block_base = devm_ioremap_resource(&pdev->dev, res);
4273 + if (IS_ERR(fsl_qdma->block_base))
4274 + return PTR_ERR(fsl_qdma->block_base);
4275 + fsl_qdma->queue = fsl_qdma_alloc_queue_resources(pdev, fsl_qdma);
4276 + if (!fsl_qdma->queue)
4277 + return -ENOMEM;
4278 +
4279 + ret = fsl_qdma_irq_init(pdev, fsl_qdma);
4280 + if (ret)
4281 + return ret;
4282 +
4283 + fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0");
4284 + fsl_qdma->big_endian = of_property_read_bool(np, "big-endian");
4285 + INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels);
4286 +
4287 + for (i = 0; i < fsl_qdma->n_chans; i++) {
4288 + struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
4289 +
4290 + fsl_chan->qdma = fsl_qdma;
4291 + fsl_chan->queue = fsl_qdma->queue + i % (fsl_qdma->n_queues *
4292 + fsl_qdma->block_number);
4293 + fsl_chan->vchan.desc_free = fsl_qdma_free_desc;
4294 + vchan_init(&fsl_chan->vchan, &fsl_qdma->dma_dev);
4295 + }
4296 +
4297 + dma_cap_set(DMA_MEMCPY, fsl_qdma->dma_dev.cap_mask);
4298 +
4299 + fsl_qdma->dma_dev.dev = &pdev->dev;
4300 + fsl_qdma->dma_dev.device_free_chan_resources
4301 + = fsl_qdma_free_chan_resources;
4302 + fsl_qdma->dma_dev.device_alloc_chan_resources
4303 + = fsl_qdma_alloc_chan_resources;
4304 + fsl_qdma->dma_dev.device_tx_status = fsl_qdma_tx_status;
4305 + fsl_qdma->dma_dev.device_prep_dma_memcpy = fsl_qdma_prep_memcpy;
4306 + fsl_qdma->dma_dev.device_issue_pending = fsl_qdma_issue_pending;
4307 + fsl_qdma->dma_dev.device_synchronize = fsl_qdma_synchronize;
4308 + fsl_qdma->dma_dev.device_terminate_all = fsl_qdma_terminate_all;
4309 +
4310 + dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
4311 +
4312 + platform_set_drvdata(pdev, fsl_qdma);
4313 +
4314 + ret = dma_async_device_register(&fsl_qdma->dma_dev);
4315 + if (ret) {
4316 + dev_err(&pdev->dev,
4317 + "Can't register NXP Layerscape qDMA engine.\n");
4318 + return ret;
4319 + }
4320 +
4321 + ret = fsl_qdma_reg_init(fsl_qdma);
4322 + if (ret) {
4323 + dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
4324 + return ret;
4325 + }
4326 +
4327 + return 0;
4328 +}
4329 +
4330 +static void fsl_qdma_cleanup_vchan(struct dma_device *dmadev)
4331 +{
4332 + struct fsl_qdma_chan *chan, *_chan;
4333 +
4334 + list_for_each_entry_safe(chan, _chan,
4335 + &dmadev->channels, vchan.chan.device_node) {
4336 + list_del(&chan->vchan.chan.device_node);
4337 + tasklet_kill(&chan->vchan.task);
4338 + }
4339 +}
4340 +
4341 +static int fsl_qdma_remove(struct platform_device *pdev)
4342 +{
4343 + struct device_node *np = pdev->dev.of_node;
4344 + struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev);
4345 + struct fsl_qdma_queue *status;
4346 + int i;
4347 +
4348 + fsl_qdma_irq_exit(pdev, fsl_qdma);
4349 + fsl_qdma_cleanup_vchan(&fsl_qdma->dma_dev);
4350 + of_dma_controller_free(np);
4351 + dma_async_device_unregister(&fsl_qdma->dma_dev);
4352 +
4353 + for (i = 0; i < fsl_qdma->block_number; i++) {
4354 + status = fsl_qdma->status[i];
4355 + dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_format) *
4356 + status->n_cq, status->cq, status->bus_addr);
4357 + }
4358 + return 0;
4359 +}
4360 +
4361 +static const struct of_device_id fsl_qdma_dt_ids[] = {
4362 + { .compatible = "fsl,ls1021a-qdma", },
4363 + { /* sentinel */ }
4364 +};
4365 +MODULE_DEVICE_TABLE(of, fsl_qdma_dt_ids);
4366 +
4367 +static struct platform_driver fsl_qdma_driver = {
4368 + .driver = {
4369 + .name = "fsl-qdma",
4370 + .of_match_table = fsl_qdma_dt_ids,
4371 + },
4372 + .probe = fsl_qdma_probe,
4373 + .remove = fsl_qdma_remove,
4374 +};
4375 +
4376 +module_platform_driver(fsl_qdma_driver);
4377 +
4378 +MODULE_ALIAS("platform:fsl-qdma");
4379 +MODULE_DESCRIPTION("NXP Layerscape qDMA engine driver");
4380 +MODULE_LICENSE("GPL v2");