kernel: bump 4.9 to 4.9.63
[openwrt/openwrt.git] / target / linux / layerscape / patches-4.9 / 805-dma-support-layerscape.patch
1 From 854c1f0e9574e9b25a55b439608c71e013b34a56 Mon Sep 17 00:00:00 2001
2 From: Yangbo Lu <yangbo.lu@nxp.com>
3 Date: Mon, 25 Sep 2017 12:12:20 +0800
4 Subject: [PATCH] dma: support layerscape
5
6 This is a integrated patch for layerscape dma support.
7
8 Signed-off-by: jiaheng.fan <jiaheng.fan@nxp.com>
9 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
10 ---
11 drivers/dma/Kconfig | 31 +
12 drivers/dma/Makefile | 3 +
13 drivers/dma/caam_dma.c | 563 +++++++++++++++
14 drivers/dma/dpaa2-qdma/Kconfig | 8 +
15 drivers/dma/dpaa2-qdma/Makefile | 8 +
16 drivers/dma/dpaa2-qdma/dpaa2-qdma.c | 986 +++++++++++++++++++++++++
17 drivers/dma/dpaa2-qdma/dpaa2-qdma.h | 262 +++++++
18 drivers/dma/dpaa2-qdma/dpdmai.c | 454 ++++++++++++
19 drivers/dma/dpaa2-qdma/fsl_dpdmai.h | 521 ++++++++++++++
20 drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h | 222 ++++++
21 drivers/dma/fsl-qdma.c | 1201 +++++++++++++++++++++++++++++++
22 11 files changed, 4259 insertions(+)
23 create mode 100644 drivers/dma/caam_dma.c
24 create mode 100644 drivers/dma/dpaa2-qdma/Kconfig
25 create mode 100644 drivers/dma/dpaa2-qdma/Makefile
26 create mode 100644 drivers/dma/dpaa2-qdma/dpaa2-qdma.c
27 create mode 100644 drivers/dma/dpaa2-qdma/dpaa2-qdma.h
28 create mode 100644 drivers/dma/dpaa2-qdma/dpdmai.c
29 create mode 100644 drivers/dma/dpaa2-qdma/fsl_dpdmai.h
30 create mode 100644 drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h
31 create mode 100644 drivers/dma/fsl-qdma.c
32
33 --- a/drivers/dma/Kconfig
34 +++ b/drivers/dma/Kconfig
35 @@ -192,6 +192,20 @@ config FSL_EDMA
36 multiplexing capability for DMA request sources(slot).
37 This module can be found on Freescale Vybrid and LS-1 SoCs.
38
39 +config FSL_QDMA
40 + tristate "Freescale qDMA engine support"
41 + select DMA_ENGINE
42 + select DMA_VIRTUAL_CHANNELS
43 + select DMA_ENGINE_RAID
44 + select ASYNC_TX_ENABLE_CHANNEL_SWITCH
45 + help
46 + Support the Freescale qDMA engine with command queue and legacy mode.
47 + Channel virtualization is supported through enqueuing of DMA jobs to,
48 + or dequeuing DMA jobs from, different work queues.
49 + This module can be found on Freescale LS SoCs.
50 +
51 +source drivers/dma/dpaa2-qdma/Kconfig
52 +
53 config FSL_RAID
54 tristate "Freescale RAID engine Support"
55 depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH
56 @@ -564,6 +578,23 @@ config ZX_DMA
57 help
58 Support the DMA engine for ZTE ZX296702 platform devices.
59
60 +config CRYPTO_DEV_FSL_CAAM_DMA
61 + tristate "CAAM DMA engine support"
62 + depends on CRYPTO_DEV_FSL_CAAM_JR
63 + default y
64 + select DMA_ENGINE
65 + select ASYNC_CORE
66 + select ASYNC_TX_ENABLE_CHANNEL_SWITCH
67 + help
68 + Selecting this will offload the DMA operations for users of
69 + the scatter gather memcopy API to the CAAM via job rings. The
70 + CAAM is a hardware module that provides hardware acceleration to
71 + cryptographic operations. It has a built-in DMA controller that can
72 + be programmed to read/write cryptographic data. This module defines
73 + a DMA driver that uses the DMA capabilities of the CAAM.
74 +
75 + To compile this as a module, choose M here: the module
76 + will be called caam_dma.
77
78 # driver files
79 source "drivers/dma/bestcomm/Kconfig"
80 --- a/drivers/dma/Makefile
81 +++ b/drivers/dma/Makefile
82 @@ -29,6 +29,8 @@ obj-$(CONFIG_DW_DMAC_CORE) += dw/
83 obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
84 obj-$(CONFIG_FSL_DMA) += fsldma.o
85 obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
86 +obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o
87 +obj-$(CONFIG_FSL_DPAA2_QDMA) += dpaa2-qdma/
88 obj-$(CONFIG_FSL_RAID) += fsl_raid.o
89 obj-$(CONFIG_HSU_DMA) += hsu/
90 obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o
91 @@ -67,6 +69,7 @@ obj-$(CONFIG_TI_DMA_CROSSBAR) += ti-dma-
92 obj-$(CONFIG_TI_EDMA) += edma.o
93 obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
94 obj-$(CONFIG_ZX_DMA) += zx296702_dma.o
95 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_DMA) += caam_dma.o
96
97 obj-y += qcom/
98 obj-y += xilinx/
99 --- /dev/null
100 +++ b/drivers/dma/caam_dma.c
101 @@ -0,0 +1,563 @@
102 +/*
103 + * caam support for SG DMA
104 + *
105 + * Copyright 2016 Freescale Semiconductor, Inc
106 + * Copyright 2017 NXP
107 + */
108 +
109 +#include <linux/module.h>
110 +#include <linux/platform_device.h>
111 +#include <linux/dma-mapping.h>
112 +#include <linux/interrupt.h>
113 +#include <linux/slab.h>
114 +#include <linux/debugfs.h>
115 +
116 +#include <linux/dmaengine.h>
117 +#include "dmaengine.h"
118 +
119 +#include "../crypto/caam/regs.h"
120 +#include "../crypto/caam/jr.h"
121 +#include "../crypto/caam/error.h"
122 +#include "../crypto/caam/intern.h"
123 +#include "../crypto/caam/desc_constr.h"
124 +#include "../crypto/caam/sg_sw_sec4.h"
125 +
126 +#define DESC_DMA_MEMCPY_LEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / \
127 + CAAM_CMD_SZ)
128 +
129 +/* This is max chunk size of a DMA transfer. If a buffer is larger than this
130 + * value it is internally broken into chunks of max CAAM_DMA_CHUNK_SIZE bytes
131 + * and for each chunk a DMA transfer request is issued.
132 + * This value is the largest number on 16 bits that is a multiple of 256 bytes
133 + * (the largest configurable CAAM DMA burst size).
134 + */
135 +#define CAAM_DMA_CHUNK_SIZE 65280
136 +
137 +struct caam_dma_sh_desc {
138 + u32 desc[DESC_DMA_MEMCPY_LEN] ____cacheline_aligned;
139 + dma_addr_t desc_dma;
140 +};
141 +
142 +/* caam dma extended descriptor */
143 +struct caam_dma_edesc {
144 + struct dma_async_tx_descriptor async_tx;
145 + struct list_head node;
146 + struct caam_dma_ctx *ctx;
147 + dma_addr_t src_dma;
148 + dma_addr_t dst_dma;
149 + unsigned int src_len;
150 + unsigned int dst_len;
151 + struct sec4_sg_entry *sec4_sg;
152 + u32 jd[] ____cacheline_aligned;
153 +};
154 +
155 +/*
156 + * caam_dma_ctx - per jr/channel context
157 + * @chan: dma channel used by async_tx API
158 + * @node: list_head used to attach to the global dma_ctx_list
159 + * @jrdev: Job Ring device
160 + * @submit_q: queue of pending (submitted, but not enqueued) jobs
161 + * @done_not_acked: jobs that have been completed by jr, but maybe not acked
162 + * @edesc_lock: protects extended descriptor
163 + */
164 +struct caam_dma_ctx {
165 + struct dma_chan chan;
166 + struct list_head node;
167 + struct device *jrdev;
168 + struct list_head submit_q;
169 + struct list_head done_not_acked;
170 + spinlock_t edesc_lock;
171 +};
172 +
173 +static struct dma_device *dma_dev;
174 +static struct caam_dma_sh_desc *dma_sh_desc;
175 +static LIST_HEAD(dma_ctx_list);
176 +
177 +static dma_cookie_t caam_dma_tx_submit(struct dma_async_tx_descriptor *tx)
178 +{
179 + struct caam_dma_edesc *edesc = NULL;
180 + struct caam_dma_ctx *ctx = NULL;
181 + dma_cookie_t cookie;
182 +
183 + edesc = container_of(tx, struct caam_dma_edesc, async_tx);
184 + ctx = container_of(tx->chan, struct caam_dma_ctx, chan);
185 +
186 + spin_lock_bh(&ctx->edesc_lock);
187 +
188 + cookie = dma_cookie_assign(tx);
189 + list_add_tail(&edesc->node, &ctx->submit_q);
190 +
191 + spin_unlock_bh(&ctx->edesc_lock);
192 +
193 + return cookie;
194 +}
195 +
196 +static unsigned int caam_dma_sg_dma_len(struct scatterlist *sg,
197 + unsigned int nents)
198 +{
199 + unsigned int len;
200 +
201 + for (len = 0; sg && nents; sg = sg_next(sg), nents--)
202 + len += sg_dma_len(sg);
203 +
204 + return len;
205 +}
206 +
207 +static struct caam_dma_edesc *
208 +caam_dma_sg_edesc_alloc(struct dma_chan *chan,
209 + struct scatterlist *dst_sg, unsigned int dst_nents,
210 + struct scatterlist *src_sg, unsigned int src_nents,
211 + unsigned long flags)
212 +{
213 + struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
214 + chan);
215 + struct device *jrdev = ctx->jrdev;
216 + struct caam_dma_edesc *edesc;
217 + struct sec4_sg_entry *sec4_sg;
218 + dma_addr_t sec4_sg_dma_src;
219 + unsigned int sec4_sg_bytes;
220 +
221 + if (!dst_sg || !src_sg || !dst_nents || !src_nents)
222 + return NULL;
223 +
224 + sec4_sg_bytes = (src_nents + dst_nents) * sizeof(*sec4_sg);
225 +
226 + edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
227 + GFP_DMA | GFP_NOWAIT);
228 + if (!edesc)
229 + return ERR_PTR(-ENOMEM);
230 +
231 + edesc->src_len = caam_dma_sg_dma_len(src_sg, src_nents);
232 + edesc->dst_len = caam_dma_sg_dma_len(dst_sg, dst_nents);
233 + if (edesc->src_len != edesc->dst_len) {
234 + dev_err(jrdev, "%s: src(%u) and dst(%u) len mismatch.\n",
235 + __func__, edesc->src_len, edesc->dst_len);
236 + kfree(edesc);
237 + return ERR_PTR(-EINVAL);
238 + }
239 +
240 + dma_async_tx_descriptor_init(&edesc->async_tx, chan);
241 + edesc->async_tx.tx_submit = caam_dma_tx_submit;
242 + edesc->async_tx.flags = flags;
243 + edesc->async_tx.cookie = -EBUSY;
244 +
245 + /* Prepare SEC SGs */
246 + edesc->sec4_sg = (void *)edesc + offsetof(struct caam_dma_edesc, jd) +
247 + DESC_JOB_IO_LEN;
248 +
249 + sec4_sg = edesc->sec4_sg;
250 + sg_to_sec4_sg_last(src_sg, src_nents, sec4_sg, 0);
251 +
252 + sec4_sg += src_nents;
253 + sg_to_sec4_sg_last(dst_sg, dst_nents, sec4_sg, 0);
254 +
255 + sec4_sg_dma_src = dma_map_single(jrdev, edesc->sec4_sg, sec4_sg_bytes,
256 + DMA_TO_DEVICE);
257 + if (dma_mapping_error(jrdev, sec4_sg_dma_src)) {
258 + dev_err(jrdev, "error mapping segments to device\n");
259 + kfree(edesc);
260 + return ERR_PTR(-ENOMEM);
261 + }
262 +
263 + edesc->src_dma = sec4_sg_dma_src;
264 + edesc->dst_dma = sec4_sg_dma_src + src_nents * sizeof(*sec4_sg);
265 + edesc->ctx = ctx;
266 +
267 + return edesc;
268 +}
269 +
270 +static void caam_jr_chan_free_edesc(struct caam_dma_edesc *edesc)
271 +{
272 + struct caam_dma_ctx *ctx = edesc->ctx;
273 + struct caam_dma_edesc *_edesc = NULL;
274 +
275 + spin_lock_bh(&ctx->edesc_lock);
276 +
277 + list_add_tail(&edesc->node, &ctx->done_not_acked);
278 + list_for_each_entry_safe(edesc, _edesc, &ctx->done_not_acked, node) {
279 + if (async_tx_test_ack(&edesc->async_tx)) {
280 + list_del(&edesc->node);
281 + kfree(edesc);
282 + }
283 + }
284 +
285 + spin_unlock_bh(&ctx->edesc_lock);
286 +}
287 +
288 +static void caam_dma_done(struct device *dev, u32 *hwdesc, u32 err,
289 + void *context)
290 +{
291 + struct caam_dma_edesc *edesc = context;
292 + struct caam_dma_ctx *ctx = edesc->ctx;
293 + dma_async_tx_callback callback;
294 + void *callback_param;
295 +
296 + if (err)
297 + caam_jr_strstatus(ctx->jrdev, err);
298 +
299 + dma_run_dependencies(&edesc->async_tx);
300 +
301 + spin_lock_bh(&ctx->edesc_lock);
302 + dma_cookie_complete(&edesc->async_tx);
303 + spin_unlock_bh(&ctx->edesc_lock);
304 +
305 + callback = edesc->async_tx.callback;
306 + callback_param = edesc->async_tx.callback_param;
307 +
308 + dma_descriptor_unmap(&edesc->async_tx);
309 +
310 + caam_jr_chan_free_edesc(edesc);
311 +
312 + if (callback)
313 + callback(callback_param);
314 +}
315 +
316 +static void caam_dma_sg_init_job_desc(struct caam_dma_edesc *edesc)
317 +{
318 + u32 *jd = edesc->jd;
319 + u32 *sh_desc = dma_sh_desc->desc;
320 + dma_addr_t desc_dma = dma_sh_desc->desc_dma;
321 +
322 + /* init the job descriptor */
323 + init_job_desc_shared(jd, desc_dma, desc_len(sh_desc), HDR_REVERSE);
324 +
325 + /* set SEQIN PTR */
326 + append_seq_in_ptr(jd, edesc->src_dma, edesc->src_len, LDST_SGF);
327 +
328 + /* set SEQOUT PTR */
329 + append_seq_out_ptr(jd, edesc->dst_dma, edesc->dst_len, LDST_SGF);
330 +
331 +#ifdef DEBUG
332 + print_hex_dump(KERN_ERR, "caam dma desc@" __stringify(__LINE__) ": ",
333 + DUMP_PREFIX_ADDRESS, 16, 4, jd, desc_bytes(jd), 1);
334 +#endif
335 +}
336 +
337 +/* This function can be called from an interrupt context */
338 +static struct dma_async_tx_descriptor *
339 +caam_dma_prep_sg(struct dma_chan *chan, struct scatterlist *dst_sg,
340 + unsigned int dst_nents, struct scatterlist *src_sg,
341 + unsigned int src_nents, unsigned long flags)
342 +{
343 + struct caam_dma_edesc *edesc;
344 +
345 + /* allocate extended descriptor */
346 + edesc = caam_dma_sg_edesc_alloc(chan, dst_sg, dst_nents, src_sg,
347 + src_nents, flags);
348 + if (IS_ERR_OR_NULL(edesc))
349 + return ERR_CAST(edesc);
350 +
351 + /* Initialize job descriptor */
352 + caam_dma_sg_init_job_desc(edesc);
353 +
354 + return &edesc->async_tx;
355 +}
356 +
357 +static void caam_dma_memcpy_init_job_desc(struct caam_dma_edesc *edesc)
358 +{
359 + u32 *jd = edesc->jd;
360 + u32 *sh_desc = dma_sh_desc->desc;
361 + dma_addr_t desc_dma = dma_sh_desc->desc_dma;
362 +
363 + /* init the job descriptor */
364 + init_job_desc_shared(jd, desc_dma, desc_len(sh_desc), HDR_REVERSE);
365 +
366 + /* set SEQIN PTR */
367 + append_seq_in_ptr(jd, edesc->src_dma, edesc->src_len, 0);
368 +
369 + /* set SEQOUT PTR */
370 + append_seq_out_ptr(jd, edesc->dst_dma, edesc->dst_len, 0);
371 +
372 +#ifdef DEBUG
373 + print_hex_dump(KERN_ERR, "caam dma desc@" __stringify(__LINE__) ": ",
374 + DUMP_PREFIX_ADDRESS, 16, 4, jd, desc_bytes(jd), 1);
375 +#endif
376 +}
377 +
378 +static struct dma_async_tx_descriptor *
379 +caam_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
380 + size_t len, unsigned long flags)
381 +{
382 + struct caam_dma_edesc *edesc;
383 + struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
384 + chan);
385 +
386 + edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN, GFP_DMA | GFP_NOWAIT);
387 + if (!edesc)
388 + return ERR_PTR(-ENOMEM);
389 +
390 + dma_async_tx_descriptor_init(&edesc->async_tx, chan);
391 + edesc->async_tx.tx_submit = caam_dma_tx_submit;
392 + edesc->async_tx.flags = flags;
393 + edesc->async_tx.cookie = -EBUSY;
394 +
395 + edesc->src_dma = src;
396 + edesc->src_len = len;
397 + edesc->dst_dma = dst;
398 + edesc->dst_len = len;
399 + edesc->ctx = ctx;
400 +
401 + caam_dma_memcpy_init_job_desc(edesc);
402 +
403 + return &edesc->async_tx;
404 +}
405 +
406 +/* This function can be called in an interrupt context */
407 +static void caam_dma_issue_pending(struct dma_chan *chan)
408 +{
409 + struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
410 + chan);
411 + struct caam_dma_edesc *edesc, *_edesc;
412 +
413 + spin_lock_bh(&ctx->edesc_lock);
414 + list_for_each_entry_safe(edesc, _edesc, &ctx->submit_q, node) {
415 + if (caam_jr_enqueue(ctx->jrdev, edesc->jd,
416 + caam_dma_done, edesc) < 0)
417 + break;
418 + list_del(&edesc->node);
419 + }
420 + spin_unlock_bh(&ctx->edesc_lock);
421 +}
422 +
423 +static void caam_dma_free_chan_resources(struct dma_chan *chan)
424 +{
425 + struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
426 + chan);
427 + struct caam_dma_edesc *edesc, *_edesc;
428 +
429 + spin_lock_bh(&ctx->edesc_lock);
430 + list_for_each_entry_safe(edesc, _edesc, &ctx->submit_q, node) {
431 + list_del(&edesc->node);
432 + kfree(edesc);
433 + }
434 + list_for_each_entry_safe(edesc, _edesc, &ctx->done_not_acked, node) {
435 + list_del(&edesc->node);
436 + kfree(edesc);
437 + }
438 + spin_unlock_bh(&ctx->edesc_lock);
439 +}
440 +
441 +static int caam_dma_jr_chan_bind(void)
442 +{
443 + struct device *jrdev;
444 + struct caam_dma_ctx *ctx;
445 + int bonds = 0;
446 + int i;
447 +
448 + for (i = 0; i < caam_jr_driver_probed(); i++) {
449 + jrdev = caam_jridx_alloc(i);
450 + if (IS_ERR(jrdev)) {
451 + pr_err("job ring device %d allocation failed\n", i);
452 + continue;
453 + }
454 +
455 + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
456 + if (!ctx) {
457 + caam_jr_free(jrdev);
458 + continue;
459 + }
460 +
461 + ctx->chan.device = dma_dev;
462 + ctx->chan.private = ctx;
463 +
464 + ctx->jrdev = jrdev;
465 +
466 + INIT_LIST_HEAD(&ctx->submit_q);
467 + INIT_LIST_HEAD(&ctx->done_not_acked);
468 + INIT_LIST_HEAD(&ctx->node);
469 + spin_lock_init(&ctx->edesc_lock);
470 +
471 + dma_cookie_init(&ctx->chan);
472 +
473 + /* add the context of this channel to the context list */
474 + list_add_tail(&ctx->node, &dma_ctx_list);
475 +
476 + /* add this channel to the device chan list */
477 + list_add_tail(&ctx->chan.device_node, &dma_dev->channels);
478 +
479 + bonds++;
480 + }
481 +
482 + return bonds;
483 +}
484 +
485 +static inline void caam_jr_dma_free(struct dma_chan *chan)
486 +{
487 + struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
488 + chan);
489 +
490 + list_del(&ctx->node);
491 + list_del(&chan->device_node);
492 + caam_jr_free(ctx->jrdev);
493 + kfree(ctx);
494 +}
495 +
496 +static void set_caam_dma_desc(u32 *desc)
497 +{
498 + u32 *jmp_cmd;
499 +
500 + /* dma shared descriptor */
501 + init_sh_desc(desc, HDR_SHARE_NEVER | (1 << HDR_START_IDX_SHIFT));
502 +
503 + /* REG1 = CAAM_DMA_CHUNK_SIZE */
504 + append_math_add_imm_u32(desc, REG1, ZERO, IMM, CAAM_DMA_CHUNK_SIZE);
505 +
506 + /* REG0 = SEQINLEN - CAAM_DMA_CHUNK_SIZE */
507 + append_math_sub_imm_u32(desc, REG0, SEQINLEN, IMM, CAAM_DMA_CHUNK_SIZE);
508 +
509 + /* if (REG0 > 0)
510 + * jmp to LABEL1
511 + */
512 + jmp_cmd = append_jump(desc, JUMP_TEST_INVALL | JUMP_COND_MATH_N |
513 + JUMP_COND_MATH_Z);
514 +
515 + /* REG1 = SEQINLEN */
516 + append_math_sub(desc, REG1, SEQINLEN, ZERO, CAAM_CMD_SZ);
517 +
518 + /* LABEL1 */
519 + set_jump_tgt_here(desc, jmp_cmd);
520 +
521 + /* VARSEQINLEN = REG1 */
522 + append_math_add(desc, VARSEQINLEN, REG1, ZERO, CAAM_CMD_SZ);
523 +
524 + /* VARSEQOUTLEN = REG1 */
525 + append_math_add(desc, VARSEQOUTLEN, REG1, ZERO, CAAM_CMD_SZ);
526 +
527 + /* do FIFO STORE */
528 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_METADATA | LDST_VLF);
529 +
530 + /* do FIFO LOAD */
531 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
532 + FIFOLD_TYPE_IFIFO | LDST_VLF);
533 +
534 + /* if (REG0 > 0)
535 + * jmp 0xF8 (after shared desc header)
536 + */
537 + append_jump(desc, JUMP_TEST_INVALL | JUMP_COND_MATH_N |
538 + JUMP_COND_MATH_Z | 0xF8);
539 +
540 +#ifdef DEBUG
541 + print_hex_dump(KERN_ERR, "caam dma shdesc@" __stringify(__LINE__) ": ",
542 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
543 +#endif
544 +}
545 +
546 +static int __init caam_dma_probe(struct platform_device *pdev)
547 +{
548 + struct device *dev = &pdev->dev;
549 + struct device *ctrldev = dev->parent;
550 + struct dma_chan *chan, *_chan;
551 + u32 *sh_desc;
552 + int err = -ENOMEM;
553 + int bonds;
554 +
555 + if (!caam_jr_driver_probed()) {
556 + dev_info(dev, "Defer probing after JR driver probing\n");
557 + return -EPROBE_DEFER;
558 + }
559 +
560 + dma_dev = kzalloc(sizeof(*dma_dev), GFP_KERNEL);
561 + if (!dma_dev)
562 + return -ENOMEM;
563 +
564 + dma_sh_desc = kzalloc(sizeof(*dma_sh_desc), GFP_KERNEL | GFP_DMA);
565 + if (!dma_sh_desc)
566 + goto desc_err;
567 +
568 + sh_desc = dma_sh_desc->desc;
569 + set_caam_dma_desc(sh_desc);
570 + dma_sh_desc->desc_dma = dma_map_single(ctrldev, sh_desc,
571 + desc_bytes(sh_desc),
572 + DMA_TO_DEVICE);
573 + if (dma_mapping_error(ctrldev, dma_sh_desc->desc_dma)) {
574 + dev_err(dev, "unable to map dma descriptor\n");
575 + goto map_err;
576 + }
577 +
578 + INIT_LIST_HEAD(&dma_dev->channels);
579 +
580 + bonds = caam_dma_jr_chan_bind();
581 + if (!bonds) {
582 + err = -ENODEV;
583 + goto jr_bind_err;
584 + }
585 +
586 + dma_dev->dev = dev;
587 + dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
588 + dma_cap_set(DMA_SG, dma_dev->cap_mask);
589 + dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
590 + dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
591 + dma_dev->device_tx_status = dma_cookie_status;
592 + dma_dev->device_issue_pending = caam_dma_issue_pending;
593 + dma_dev->device_prep_dma_sg = caam_dma_prep_sg;
594 + dma_dev->device_prep_dma_memcpy = caam_dma_prep_memcpy;
595 + dma_dev->device_free_chan_resources = caam_dma_free_chan_resources;
596 +
597 + err = dma_async_device_register(dma_dev);
598 + if (err) {
599 + dev_err(dev, "Failed to register CAAM DMA engine\n");
600 + goto jr_bind_err;
601 + }
602 +
603 + dev_info(dev, "caam dma support with %d job rings\n", bonds);
604 +
605 + return err;
606 +
607 +jr_bind_err:
608 + list_for_each_entry_safe(chan, _chan, &dma_dev->channels, device_node)
609 + caam_jr_dma_free(chan);
610 +
611 + dma_unmap_single(ctrldev, dma_sh_desc->desc_dma, desc_bytes(sh_desc),
612 + DMA_TO_DEVICE);
613 +map_err:
614 + kfree(dma_sh_desc);
615 +desc_err:
616 + kfree(dma_dev);
617 + return err;
618 +}
619 +
620 +static int caam_dma_remove(struct platform_device *pdev)
621 +{
622 + struct device *dev = &pdev->dev;
623 + struct device *ctrldev = dev->parent;
624 + struct caam_dma_ctx *ctx, *_ctx;
625 +
626 + dma_async_device_unregister(dma_dev);
627 +
628 + list_for_each_entry_safe(ctx, _ctx, &dma_ctx_list, node) {
629 + list_del(&ctx->node);
630 + caam_jr_free(ctx->jrdev);
631 + kfree(ctx);
632 + }
633 +
634 + dma_unmap_single(ctrldev, dma_sh_desc->desc_dma,
635 + desc_bytes(dma_sh_desc->desc), DMA_TO_DEVICE);
636 +
637 + kfree(dma_sh_desc);
638 + kfree(dma_dev);
639 +
640 + dev_info(dev, "caam dma support disabled\n");
641 + return 0;
642 +}
643 +
644 +static const struct of_device_id caam_dma_match[] = {
645 + { .compatible = "fsl,sec-v5.4-dma", },
646 + { .compatible = "fsl,sec-v5.0-dma", },
647 + { .compatible = "fsl,sec-v4.0-dma", },
648 + {},
649 +};
650 +MODULE_DEVICE_TABLE(of, caam_dma_match);
651 +
652 +static struct platform_driver caam_dma_driver = {
653 + .driver = {
654 + .name = "caam-dma",
655 + .of_match_table = caam_dma_match,
656 + },
657 + .probe = caam_dma_probe,
658 + .remove = caam_dma_remove,
659 +};
660 +module_platform_driver(caam_dma_driver);
661 +
662 +MODULE_LICENSE("Dual BSD/GPL");
663 +MODULE_DESCRIPTION("NXP CAAM support for SG DMA");
664 +MODULE_AUTHOR("NXP Semiconductors");
665 --- /dev/null
666 +++ b/drivers/dma/dpaa2-qdma/Kconfig
667 @@ -0,0 +1,8 @@
668 +menuconfig FSL_DPAA2_QDMA
669 + tristate "NXP DPAA2 QDMA"
670 + depends on FSL_MC_BUS && FSL_MC_DPIO
671 + select DMA_ENGINE
672 + select DMA_VIRTUAL_CHANNELS
673 + ---help---
674 + NXP Data Path Acceleration Architecture 2 QDMA driver,
675 + using the NXP MC bus driver.
676 --- /dev/null
677 +++ b/drivers/dma/dpaa2-qdma/Makefile
678 @@ -0,0 +1,8 @@
679 +#
680 +# Makefile for the NXP DPAA2 CAAM controllers
681 +#
682 +ccflags-y += -DVERSION=\"\"
683 +
684 +obj-$(CONFIG_FSL_DPAA2_QDMA) += fsl-dpaa2-qdma.o
685 +
686 +fsl-dpaa2-qdma-objs := dpaa2-qdma.o dpdmai.o
687 --- /dev/null
688 +++ b/drivers/dma/dpaa2-qdma/dpaa2-qdma.c
689 @@ -0,0 +1,986 @@
690 +/*
691 + * drivers/dma/dpaa2-qdma/dpaa2-qdma.c
692 + *
693 + * Copyright 2015-2017 NXP Semiconductor, Inc.
694 + * Author: Changming Huang <jerry.huang@nxp.com>
695 + *
696 + * Driver for the NXP QDMA engine with QMan mode.
697 + * Channel virtualization is supported through enqueuing of DMA jobs to,
698 + * or dequeuing DMA jobs from different work queues with QMan portal.
699 + * This module can be found on NXP LS2 SoCs.
700 + *
701 + * This program is free software; you can redistribute it and/or modify it
702 + * under the terms of the GNU General Public License as published by the
703 + * Free Software Foundation; either version 2 of the License, or (at your
704 + * option) any later version.
705 + */
706 +
707 +#include <linux/init.h>
708 +#include <linux/module.h>
709 +#include <linux/interrupt.h>
710 +#include <linux/clk.h>
711 +#include <linux/dma-mapping.h>
712 +#include <linux/dmapool.h>
713 +#include <linux/slab.h>
714 +#include <linux/spinlock.h>
715 +#include <linux/of.h>
716 +#include <linux/of_device.h>
717 +#include <linux/of_address.h>
718 +#include <linux/of_irq.h>
719 +#include <linux/of_dma.h>
720 +#include <linux/types.h>
721 +#include <linux/delay.h>
722 +#include <linux/iommu.h>
723 +
724 +#include "../virt-dma.h"
725 +
726 +#include "../../../drivers/staging/fsl-mc/include/mc.h"
727 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
728 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
729 +#include "fsl_dpdmai_cmd.h"
730 +#include "fsl_dpdmai.h"
731 +#include "dpaa2-qdma.h"
732 +
733 +static bool smmu_disable = true;
734 +
735 +static struct dpaa2_qdma_chan *to_dpaa2_qdma_chan(struct dma_chan *chan)
736 +{
737 + return container_of(chan, struct dpaa2_qdma_chan, vchan.chan);
738 +}
739 +
740 +static struct dpaa2_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
741 +{
742 + return container_of(vd, struct dpaa2_qdma_comp, vdesc);
743 +}
744 +
745 +static int dpaa2_qdma_alloc_chan_resources(struct dma_chan *chan)
746 +{
747 + return 0;
748 +}
749 +
750 +static void dpaa2_qdma_free_chan_resources(struct dma_chan *chan)
751 +{
752 + struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
753 + unsigned long flags;
754 + LIST_HEAD(head);
755 +
756 + spin_lock_irqsave(&dpaa2_chan->vchan.lock, flags);
757 + vchan_get_all_descriptors(&dpaa2_chan->vchan, &head);
758 + spin_unlock_irqrestore(&dpaa2_chan->vchan.lock, flags);
759 +
760 + vchan_dma_desc_free_list(&dpaa2_chan->vchan, &head);
761 +}
762 +
763 +/*
764 + * Request a command descriptor for enqueue.
765 + */
766 +static struct dpaa2_qdma_comp *
767 +dpaa2_qdma_request_desc(struct dpaa2_qdma_chan *dpaa2_chan)
768 +{
769 + struct dpaa2_qdma_comp *comp_temp = NULL;
770 + unsigned long flags;
771 +
772 + spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
773 + if (list_empty(&dpaa2_chan->comp_free)) {
774 + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
775 + comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
776 + if (!comp_temp)
777 + goto err;
778 + comp_temp->fd_virt_addr = dma_pool_alloc(dpaa2_chan->fd_pool,
779 + GFP_NOWAIT, &comp_temp->fd_bus_addr);
780 + if (!comp_temp->fd_virt_addr)
781 + goto err;
782 +
783 + comp_temp->fl_virt_addr =
784 + (void *)((struct dpaa2_fd *)
785 + comp_temp->fd_virt_addr + 1);
786 + comp_temp->fl_bus_addr = comp_temp->fd_bus_addr +
787 + sizeof(struct dpaa2_fd);
788 + comp_temp->desc_virt_addr =
789 + (void *)((struct dpaa2_frame_list *)
790 + comp_temp->fl_virt_addr + 3);
791 + comp_temp->desc_bus_addr = comp_temp->fl_bus_addr +
792 + sizeof(struct dpaa2_frame_list) * 3;
793 +
794 + comp_temp->qchan = dpaa2_chan;
795 + comp_temp->sg_blk_num = 0;
796 + INIT_LIST_HEAD(&comp_temp->sg_src_head);
797 + INIT_LIST_HEAD(&comp_temp->sg_dst_head);
798 + return comp_temp;
799 + }
800 + comp_temp = list_first_entry(&dpaa2_chan->comp_free,
801 + struct dpaa2_qdma_comp, list);
802 + list_del(&comp_temp->list);
803 + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
804 +
805 + comp_temp->qchan = dpaa2_chan;
806 +err:
807 + return comp_temp;
808 +}
809 +
810 +static void dpaa2_qdma_populate_fd(uint32_t format,
811 + struct dpaa2_qdma_comp *dpaa2_comp)
812 +{
813 + struct dpaa2_fd *fd;
814 +
815 + fd = (struct dpaa2_fd *)dpaa2_comp->fd_virt_addr;
816 + memset(fd, 0, sizeof(struct dpaa2_fd));
817 +
818 + /* fd populated */
819 + fd->simple.addr = dpaa2_comp->fl_bus_addr;
820 + /* Bypass memory translation, Frame list format, short length disable */
821 + /* we need to disable BMT if fsl-mc use iova addr */
822 + if (smmu_disable)
823 + fd->simple.bpid = QMAN_FD_BMT_ENABLE;
824 + fd->simple.format_offset = QMAN_FD_FMT_ENABLE | QMAN_FD_SL_DISABLE;
825 +
826 + fd->simple.frc = format | QDMA_SER_CTX;
827 +}
828 +
829 +/* first frame list for descriptor buffer */
830 +static void dpaa2_qdma_populate_first_framel(
831 + struct dpaa2_frame_list *f_list,
832 + struct dpaa2_qdma_comp *dpaa2_comp)
833 +{
834 + struct dpaa2_qdma_sd_d *sdd;
835 +
836 + sdd = (struct dpaa2_qdma_sd_d *)dpaa2_comp->desc_virt_addr;
837 + memset(sdd, 0, 2 * (sizeof(*sdd)));
838 + /* source and destination descriptor */
839 + sdd->cmd = QDMA_SD_CMD_RDTTYPE_COHERENT; /* source descriptor CMD */
840 + sdd++;
841 + sdd->cmd = QDMA_DD_CMD_WRTTYPE_COHERENT; /* dest descriptor CMD */
842 +
843 + memset(f_list, 0, sizeof(struct dpaa2_frame_list));
844 + /* first frame list to source descriptor */
845 + f_list->addr_lo = dpaa2_comp->desc_bus_addr;
846 + f_list->addr_hi = (dpaa2_comp->desc_bus_addr >> 32);
847 + f_list->data_len.data_len_sl0 = 0x20; /* source/destination desc len */
848 + f_list->fmt = QDMA_FL_FMT_SBF; /* single buffer frame */
849 + if (smmu_disable)
850 + f_list->bmt = QDMA_FL_BMT_ENABLE; /* bypass memory translation */
851 + f_list->sl = QDMA_FL_SL_LONG; /* long length */
852 + f_list->f = 0; /* not the last frame list */
853 +}
854 +
855 +/* source and destination frame list */
856 +static void dpaa2_qdma_populate_frames(struct dpaa2_frame_list *f_list,
857 + dma_addr_t dst, dma_addr_t src, size_t len, uint8_t fmt)
858 +{
859 + /* source frame list to source buffer */
860 + memset(f_list, 0, sizeof(struct dpaa2_frame_list));
861 + f_list->addr_lo = src;
862 + f_list->addr_hi = (src >> 32);
863 + f_list->data_len.data_len_sl0 = len;
864 + f_list->fmt = fmt; /* single buffer frame or scatter gather frame */
865 + if (smmu_disable)
866 + f_list->bmt = QDMA_FL_BMT_ENABLE; /* bypass memory translation */
867 + f_list->sl = QDMA_FL_SL_LONG; /* long length */
868 + f_list->f = 0; /* not the last frame list */
869 +
870 + f_list++;
871 + /* destination frame list to destination buffer */
872 + memset(f_list, 0, sizeof(struct dpaa2_frame_list));
873 + f_list->addr_lo = dst;
874 + f_list->addr_hi = (dst >> 32);
875 + f_list->data_len.data_len_sl0 = len;
876 + f_list->fmt = fmt; /* single buffer frame or scatter gather frame */
877 + if (smmu_disable)
878 + f_list->bmt = QDMA_FL_BMT_ENABLE; /* bypass memory translation */
879 + f_list->sl = QDMA_FL_SL_LONG; /* long length */
880 + f_list->f = QDMA_FL_F; /* Final bit: 1, for last frame list */
881 +}
882 +
883 +static struct dma_async_tx_descriptor *dpaa2_qdma_prep_memcpy(
884 + struct dma_chan *chan, dma_addr_t dst,
885 + dma_addr_t src, size_t len, unsigned long flags)
886 +{
887 + struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
888 + struct dpaa2_qdma_comp *dpaa2_comp;
889 + struct dpaa2_frame_list *f_list;
890 + uint32_t format;
891 +
892 + dpaa2_comp = dpaa2_qdma_request_desc(dpaa2_chan);
893 +
894 +#ifdef LONG_FORMAT
895 + format = QDMA_FD_LONG_FORMAT;
896 +#else
897 + format = QDMA_FD_SHORT_FORMAT;
898 +#endif
899 + /* populate Frame descriptor */
900 + dpaa2_qdma_populate_fd(format, dpaa2_comp);
901 +
902 + f_list = (struct dpaa2_frame_list *)dpaa2_comp->fl_virt_addr;
903 +
904 +#ifdef LONG_FORMAT
905 + /* first frame list for descriptor buffer (logn format) */
906 + dpaa2_qdma_populate_first_framel(f_list, dpaa2_comp);
907 +
908 + f_list++;
909 +#endif
910 +
911 + dpaa2_qdma_populate_frames(f_list, dst, src, len, QDMA_FL_FMT_SBF);
912 +
913 + return vchan_tx_prep(&dpaa2_chan->vchan, &dpaa2_comp->vdesc, flags);
914 +}
915 +
916 +static struct qdma_sg_blk *dpaa2_qdma_get_sg_blk(
917 + struct dpaa2_qdma_comp *dpaa2_comp,
918 + struct dpaa2_qdma_chan *dpaa2_chan)
919 +{
920 + struct qdma_sg_blk *sg_blk = NULL;
921 + dma_addr_t phy_sgb;
922 + unsigned long flags;
923 +
924 + spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
925 + if (list_empty(&dpaa2_chan->sgb_free)) {
926 + sg_blk = (struct qdma_sg_blk *)dma_pool_alloc(
927 + dpaa2_chan->sg_blk_pool,
928 + GFP_NOWAIT, &phy_sgb);
929 + if (!sg_blk) {
930 + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
931 + return sg_blk;
932 + }
933 + sg_blk->blk_virt_addr = (void *)(sg_blk + 1);
934 + sg_blk->blk_bus_addr = phy_sgb + sizeof(*sg_blk);
935 + } else {
936 + sg_blk = list_first_entry(&dpaa2_chan->sgb_free,
937 + struct qdma_sg_blk, list);
938 + list_del(&sg_blk->list);
939 + }
940 + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
941 +
942 + return sg_blk;
943 +}
944 +
945 +static uint32_t dpaa2_qdma_populate_sg(struct device *dev,
946 + struct dpaa2_qdma_chan *dpaa2_chan,
947 + struct dpaa2_qdma_comp *dpaa2_comp,
948 + struct scatterlist *dst_sg, u32 dst_nents,
949 + struct scatterlist *src_sg, u32 src_nents)
950 +{
951 + struct dpaa2_qdma_sg *src_sge;
952 + struct dpaa2_qdma_sg *dst_sge;
953 + struct qdma_sg_blk *sg_blk;
954 + struct qdma_sg_blk *sg_blk_dst;
955 + dma_addr_t src;
956 + dma_addr_t dst;
957 + uint32_t num;
958 + uint32_t blocks;
959 + uint32_t len = 0;
960 + uint32_t total_len = 0;
961 + int i, j = 0;
962 +
963 + num = min(dst_nents, src_nents);
964 + blocks = num / (NUM_SG_PER_BLK - 1);
965 + if (num % (NUM_SG_PER_BLK - 1))
966 + blocks += 1;
967 + if (dpaa2_comp->sg_blk_num < blocks) {
968 + len = blocks - dpaa2_comp->sg_blk_num;
969 + for (i = 0; i < len; i++) {
970 + /* source sg blocks */
971 + sg_blk = dpaa2_qdma_get_sg_blk(dpaa2_comp, dpaa2_chan);
972 + if (!sg_blk)
973 + return 0;
974 + list_add_tail(&sg_blk->list, &dpaa2_comp->sg_src_head);
975 + /* destination sg blocks */
976 + sg_blk = dpaa2_qdma_get_sg_blk(dpaa2_comp, dpaa2_chan);
977 + if (!sg_blk)
978 + return 0;
979 + list_add_tail(&sg_blk->list, &dpaa2_comp->sg_dst_head);
980 + }
981 + } else {
982 + len = dpaa2_comp->sg_blk_num - blocks;
983 + for (i = 0; i < len; i++) {
984 + spin_lock(&dpaa2_chan->queue_lock);
985 + /* handle source sg blocks */
986 + sg_blk = list_first_entry(&dpaa2_comp->sg_src_head,
987 + struct qdma_sg_blk, list);
988 + list_del(&sg_blk->list);
989 + list_add_tail(&sg_blk->list, &dpaa2_chan->sgb_free);
990 + /* handle destination sg blocks */
991 + sg_blk = list_first_entry(&dpaa2_comp->sg_dst_head,
992 + struct qdma_sg_blk, list);
993 + list_del(&sg_blk->list);
994 + list_add_tail(&sg_blk->list, &dpaa2_chan->sgb_free);
995 + spin_unlock(&dpaa2_chan->queue_lock);
996 + }
997 + }
998 + dpaa2_comp->sg_blk_num = blocks;
999 +
1000 + /* get the first source sg phy address */
1001 + sg_blk = list_first_entry(&dpaa2_comp->sg_src_head,
1002 + struct qdma_sg_blk, list);
1003 + dpaa2_comp->sge_src_bus_addr = sg_blk->blk_bus_addr;
1004 + /* get the first destinaiton sg phy address */
1005 + sg_blk_dst = list_first_entry(&dpaa2_comp->sg_dst_head,
1006 + struct qdma_sg_blk, list);
1007 + dpaa2_comp->sge_dst_bus_addr = sg_blk_dst->blk_bus_addr;
1008 +
1009 + for (i = 0; i < blocks; i++) {
1010 + src_sge = (struct dpaa2_qdma_sg *)sg_blk->blk_virt_addr;
1011 + dst_sge = (struct dpaa2_qdma_sg *)sg_blk_dst->blk_virt_addr;
1012 +
1013 + for (j = 0; j < (NUM_SG_PER_BLK - 1); j++) {
1014 + len = min(sg_dma_len(dst_sg), sg_dma_len(src_sg));
1015 + if (0 == len)
1016 + goto fetch;
1017 + total_len += len;
1018 + src = sg_dma_address(src_sg);
1019 + dst = sg_dma_address(dst_sg);
1020 +
1021 + /* source SG */
1022 + src_sge->addr_lo = src;
1023 + src_sge->addr_hi = (src >> 32);
1024 + src_sge->data_len.data_len_sl0 = len;
1025 + src_sge->ctrl.sl = QDMA_SG_SL_LONG;
1026 + src_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
1027 + /* destination SG */
1028 + dst_sge->addr_lo = dst;
1029 + dst_sge->addr_hi = (dst >> 32);
1030 + dst_sge->data_len.data_len_sl0 = len;
1031 + dst_sge->ctrl.sl = QDMA_SG_SL_LONG;
1032 + dst_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
1033 +fetch:
1034 + num--;
1035 + if (0 == num) {
1036 + src_sge->ctrl.f = QDMA_SG_F;
1037 + dst_sge->ctrl.f = QDMA_SG_F;
1038 + goto end;
1039 + }
1040 + dst_sg = sg_next(dst_sg);
1041 + src_sg = sg_next(src_sg);
1042 + src_sge++;
1043 + dst_sge++;
1044 + if (j == (NUM_SG_PER_BLK - 2)) {
1045 + /* for next blocks, extension */
1046 + sg_blk = list_next_entry(sg_blk, list);
1047 + sg_blk_dst = list_next_entry(sg_blk_dst, list);
1048 + src_sge->addr_lo = sg_blk->blk_bus_addr;
1049 + src_sge->addr_hi = sg_blk->blk_bus_addr >> 32;
1050 + src_sge->ctrl.sl = QDMA_SG_SL_LONG;
1051 + src_sge->ctrl.fmt = QDMA_SG_FMT_SGTE;
1052 + dst_sge->addr_lo = sg_blk_dst->blk_bus_addr;
1053 + dst_sge->addr_hi =
1054 + sg_blk_dst->blk_bus_addr >> 32;
1055 + dst_sge->ctrl.sl = QDMA_SG_SL_LONG;
1056 + dst_sge->ctrl.fmt = QDMA_SG_FMT_SGTE;
1057 + }
1058 + }
1059 + }
1060 +
1061 +end:
1062 + return total_len;
1063 +}
1064 +
1065 +static struct dma_async_tx_descriptor *dpaa2_qdma_prep_sg(
1066 + struct dma_chan *chan,
1067 + struct scatterlist *dst_sg, u32 dst_nents,
1068 + struct scatterlist *src_sg, u32 src_nents,
1069 + unsigned long flags)
1070 +{
1071 + struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
1072 + struct dpaa2_qdma_comp *dpaa2_comp;
1073 + struct dpaa2_frame_list *f_list;
1074 + struct device *dev = dpaa2_chan->qdma->priv->dev;
1075 + uint32_t total_len = 0;
1076 +
1077 + /* basic sanity checks */
1078 + if (dst_nents == 0 || src_nents == 0)
1079 + return NULL;
1080 +
1081 + if (dst_sg == NULL || src_sg == NULL)
1082 + return NULL;
1083 +
1084 + /* get the descriptors required */
1085 + dpaa2_comp = dpaa2_qdma_request_desc(dpaa2_chan);
1086 +
1087 + /* populate Frame descriptor */
1088 + dpaa2_qdma_populate_fd(QDMA_FD_LONG_FORMAT, dpaa2_comp);
1089 +
1090 + /* prepare Scatter gather entry for source and destination */
1091 + total_len = dpaa2_qdma_populate_sg(dev, dpaa2_chan,
1092 + dpaa2_comp, dst_sg, dst_nents, src_sg, src_nents);
1093 +
1094 + f_list = (struct dpaa2_frame_list *)dpaa2_comp->fl_virt_addr;
1095 + /* first frame list for descriptor buffer */
1096 + dpaa2_qdma_populate_first_framel(f_list, dpaa2_comp);
1097 + f_list++;
1098 + /* prepare Scatter gather entry for source and destination */
1099 + /* populate source and destination frame list table */
1100 + dpaa2_qdma_populate_frames(f_list, dpaa2_comp->sge_dst_bus_addr,
1101 + dpaa2_comp->sge_src_bus_addr,
1102 + total_len, QDMA_FL_FMT_SGE);
1103 +
1104 + return vchan_tx_prep(&dpaa2_chan->vchan, &dpaa2_comp->vdesc, flags);
1105 +}
1106 +
1107 +static enum dma_status dpaa2_qdma_tx_status(struct dma_chan *chan,
1108 + dma_cookie_t cookie, struct dma_tx_state *txstate)
1109 +{
1110 + return dma_cookie_status(chan, cookie, txstate);
1111 +}
1112 +
1113 +static void dpaa2_qdma_free_desc(struct virt_dma_desc *vdesc)
1114 +{
1115 +}
1116 +
1117 +static void dpaa2_qdma_issue_pending(struct dma_chan *chan)
1118 +{
1119 + struct dpaa2_qdma_comp *dpaa2_comp;
1120 + struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
1121 + struct dpaa2_qdma_engine *dpaa2_qdma = dpaa2_chan->qdma;
1122 + struct dpaa2_qdma_priv *priv = dpaa2_qdma->priv;
1123 + struct virt_dma_desc *vdesc;
1124 + struct dpaa2_fd *fd;
1125 + int err;
1126 + unsigned long flags;
1127 +
1128 + spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
1129 + spin_lock(&dpaa2_chan->vchan.lock);
1130 + if (vchan_issue_pending(&dpaa2_chan->vchan)) {
1131 + vdesc = vchan_next_desc(&dpaa2_chan->vchan);
1132 + if (!vdesc)
1133 + goto err_enqueue;
1134 + dpaa2_comp = to_fsl_qdma_comp(vdesc);
1135 +
1136 + fd = (struct dpaa2_fd *)dpaa2_comp->fd_virt_addr;
1137 +
1138 + list_del(&vdesc->node);
1139 + list_add_tail(&dpaa2_comp->list, &dpaa2_chan->comp_used);
1140 +
1141 + /* TOBO: priority hard-coded to zero */
1142 + err = dpaa2_io_service_enqueue_fq(NULL,
1143 + priv->tx_queue_attr[0].fqid, fd);
1144 + if (err) {
1145 + list_del(&dpaa2_comp->list);
1146 + list_add_tail(&dpaa2_comp->list,
1147 + &dpaa2_chan->comp_free);
1148 + }
1149 +
1150 + }
1151 +err_enqueue:
1152 + spin_unlock(&dpaa2_chan->vchan.lock);
1153 + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
1154 +}
1155 +
1156 +static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
1157 +{
1158 + struct device *dev = &ls_dev->dev;
1159 + struct dpaa2_qdma_priv *priv;
1160 + struct dpaa2_qdma_priv_per_prio *ppriv;
1161 + uint8_t prio_def = DPDMAI_PRIO_NUM;
1162 + int err;
1163 + int i;
1164 +
1165 + priv = dev_get_drvdata(dev);
1166 +
1167 + priv->dev = dev;
1168 + priv->dpqdma_id = ls_dev->obj_desc.id;
1169 +
1170 + /*Get the handle for the DPDMAI this interface is associate with */
1171 + err = dpdmai_open(priv->mc_io, 0, priv->dpqdma_id, &ls_dev->mc_handle);
1172 + if (err) {
1173 + dev_err(dev, "dpdmai_open() failed\n");
1174 + return err;
1175 + }
1176 + dev_info(dev, "Opened dpdmai object successfully\n");
1177 +
1178 + err = dpdmai_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
1179 + &priv->dpdmai_attr);
1180 + if (err) {
1181 + dev_err(dev, "dpdmai_get_attributes() failed\n");
1182 + return err;
1183 + }
1184 +
1185 + if (priv->dpdmai_attr.version.major > DPDMAI_VER_MAJOR) {
1186 + dev_err(dev, "DPDMAI major version mismatch\n"
1187 + "Found %u.%u, supported version is %u.%u\n",
1188 + priv->dpdmai_attr.version.major,
1189 + priv->dpdmai_attr.version.minor,
1190 + DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
1191 + }
1192 +
1193 + if (priv->dpdmai_attr.version.minor > DPDMAI_VER_MINOR) {
1194 + dev_err(dev, "DPDMAI minor version mismatch\n"
1195 + "Found %u.%u, supported version is %u.%u\n",
1196 + priv->dpdmai_attr.version.major,
1197 + priv->dpdmai_attr.version.minor,
1198 + DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
1199 + }
1200 +
1201 + priv->num_pairs = min(priv->dpdmai_attr.num_of_priorities, prio_def);
1202 + ppriv = kcalloc(priv->num_pairs, sizeof(*ppriv), GFP_KERNEL);
1203 + if (!ppriv) {
1204 + dev_err(dev, "kzalloc for ppriv failed\n");
1205 + return -1;
1206 + }
1207 + priv->ppriv = ppriv;
1208 +
1209 + for (i = 0; i < priv->num_pairs; i++) {
1210 + err = dpdmai_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
1211 + i, &priv->rx_queue_attr[i]);
1212 + if (err) {
1213 + dev_err(dev, "dpdmai_get_rx_queue() failed\n");
1214 + return err;
1215 + }
1216 + ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
1217 +
1218 + err = dpdmai_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle,
1219 + i, &priv->tx_queue_attr[i]);
1220 + if (err) {
1221 + dev_err(dev, "dpdmai_get_tx_queue() failed\n");
1222 + return err;
1223 + }
1224 + ppriv->req_fqid = priv->tx_queue_attr[i].fqid;
1225 + ppriv->prio = i;
1226 + ppriv->priv = priv;
1227 + ppriv++;
1228 + }
1229 +
1230 + return 0;
1231 +}
1232 +
1233 +static void dpaa2_qdma_fqdan_cb(struct dpaa2_io_notification_ctx *ctx)
1234 +{
1235 + struct dpaa2_qdma_priv_per_prio *ppriv = container_of(ctx,
1236 + struct dpaa2_qdma_priv_per_prio, nctx);
1237 + struct dpaa2_qdma_priv *priv = ppriv->priv;
1238 + struct dpaa2_qdma_comp *dpaa2_comp, *_comp_tmp;
1239 + struct dpaa2_qdma_chan *qchan;
1240 + const struct dpaa2_fd *fd;
1241 + const struct dpaa2_fd *fd_eq;
1242 + struct dpaa2_dq *dq;
1243 + int err;
1244 + int is_last = 0;
1245 + uint8_t status;
1246 + int i;
1247 + int found;
1248 + uint32_t n_chans = priv->dpaa2_qdma->n_chans;
1249 +
1250 + do {
1251 + err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
1252 + ppriv->store);
1253 + } while (err);
1254 +
1255 + while (!is_last) {
1256 + do {
1257 + dq = dpaa2_io_store_next(ppriv->store, &is_last);
1258 + } while (!is_last && !dq);
1259 + if (!dq) {
1260 + dev_err(priv->dev, "FQID returned no valid frames!\n");
1261 + continue;
1262 + }
1263 +
1264 + /* obtain FD and process the error */
1265 + fd = dpaa2_dq_fd(dq);
1266 + status = fd->simple.ctrl & 0xff;
1267 + if (status)
1268 + dev_err(priv->dev, "FD error occurred\n");
1269 + found = 0;
1270 + for (i = 0; i < n_chans; i++) {
1271 + qchan = &priv->dpaa2_qdma->chans[i];
1272 + spin_lock(&qchan->queue_lock);
1273 + if (list_empty(&qchan->comp_used)) {
1274 + spin_unlock(&qchan->queue_lock);
1275 + continue;
1276 + }
1277 + list_for_each_entry_safe(dpaa2_comp, _comp_tmp,
1278 + &qchan->comp_used, list) {
1279 + fd_eq = (struct dpaa2_fd *)
1280 + dpaa2_comp->fd_virt_addr;
1281 +
1282 + if (fd_eq->simple.addr ==
1283 + fd->simple.addr) {
1284 +
1285 + list_del(&dpaa2_comp->list);
1286 + list_add_tail(&dpaa2_comp->list,
1287 + &qchan->comp_free);
1288 +
1289 + spin_lock(&qchan->vchan.lock);
1290 + vchan_cookie_complete(
1291 + &dpaa2_comp->vdesc);
1292 + spin_unlock(&qchan->vchan.lock);
1293 + found = 1;
1294 + break;
1295 + }
1296 + }
1297 + spin_unlock(&qchan->queue_lock);
1298 + if (found)
1299 + break;
1300 + }
1301 + }
1302 +
1303 + dpaa2_io_service_rearm(NULL, ctx);
1304 +}
1305 +
1306 +static int __cold dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv *priv)
1307 +{
1308 + int err, i, num;
1309 + struct device *dev = priv->dev;
1310 + struct dpaa2_qdma_priv_per_prio *ppriv;
1311 +
1312 + num = priv->num_pairs;
1313 + ppriv = priv->ppriv;
1314 + for (i = 0; i < num; i++) {
1315 + ppriv->nctx.is_cdan = 0;
1316 + ppriv->nctx.desired_cpu = 1;
1317 + ppriv->nctx.id = ppriv->rsp_fqid;
1318 + ppriv->nctx.cb = dpaa2_qdma_fqdan_cb;
1319 + err = dpaa2_io_service_register(NULL, &ppriv->nctx);
1320 + if (err) {
1321 + dev_err(dev, "Notification register failed\n");
1322 + goto err_service;
1323 + }
1324 +
1325 + ppriv->store = dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE,
1326 + dev);
1327 + if (!ppriv->store) {
1328 + dev_err(dev, "dpaa2_io_store_create() failed\n");
1329 + goto err_store;
1330 + }
1331 +
1332 + ppriv++;
1333 + }
1334 + return 0;
1335 +
1336 +err_store:
1337 + dpaa2_io_service_deregister(NULL, &ppriv->nctx);
1338 +err_service:
1339 + ppriv--;
1340 + while (ppriv >= priv->ppriv) {
1341 + dpaa2_io_service_deregister(NULL, &ppriv->nctx);
1342 + dpaa2_io_store_destroy(ppriv->store);
1343 + ppriv--;
1344 + }
1345 + return -1;
1346 +}
1347 +
1348 +static void __cold dpaa2_dpmai_store_free(struct dpaa2_qdma_priv *priv)
1349 +{
1350 + struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
1351 + int i;
1352 +
1353 + for (i = 0; i < priv->num_pairs; i++) {
1354 + dpaa2_io_store_destroy(ppriv->store);
1355 + ppriv++;
1356 + }
1357 +}
1358 +
1359 +static void __cold dpaa2_dpdmai_dpio_free(struct dpaa2_qdma_priv *priv)
1360 +{
1361 + struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
1362 + int i;
1363 +
1364 + for (i = 0; i < priv->num_pairs; i++) {
1365 + dpaa2_io_service_deregister(NULL, &ppriv->nctx);
1366 + ppriv++;
1367 + }
1368 +}
1369 +
1370 +static int __cold dpaa2_dpdmai_bind(struct dpaa2_qdma_priv *priv)
1371 +{
1372 + int err;
1373 + struct dpdmai_rx_queue_cfg rx_queue_cfg;
1374 + struct device *dev = priv->dev;
1375 + struct dpaa2_qdma_priv_per_prio *ppriv;
1376 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
1377 + int i, num;
1378 +
1379 + num = priv->num_pairs;
1380 + ppriv = priv->ppriv;
1381 + for (i = 0; i < num; i++) {
1382 + rx_queue_cfg.options = DPDMAI_QUEUE_OPT_USER_CTX |
1383 + DPDMAI_QUEUE_OPT_DEST;
1384 + rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
1385 + rx_queue_cfg.dest_cfg.dest_type = DPDMAI_DEST_DPIO;
1386 + rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
1387 + rx_queue_cfg.dest_cfg.priority = ppriv->prio;
1388 + err = dpdmai_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
1389 + rx_queue_cfg.dest_cfg.priority, &rx_queue_cfg);
1390 + if (err) {
1391 + dev_err(dev, "dpdmai_set_rx_queue() failed\n");
1392 + return err;
1393 + }
1394 +
1395 + ppriv++;
1396 + }
1397 +
1398 + return 0;
1399 +}
1400 +
1401 +static int __cold dpaa2_dpdmai_dpio_unbind(struct dpaa2_qdma_priv *priv)
1402 +{
1403 + int err = 0;
1404 + struct device *dev = priv->dev;
1405 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
1406 + struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
1407 + int i;
1408 +
1409 + for (i = 0; i < priv->num_pairs; i++) {
1410 + ppriv->nctx.qman64 = 0;
1411 + ppriv->nctx.dpio_id = 0;
1412 + ppriv++;
1413 + }
1414 +
1415 + err = dpdmai_reset(priv->mc_io, 0, ls_dev->mc_handle);
1416 + if (err)
1417 + dev_err(dev, "dpdmai_reset() failed\n");
1418 +
1419 + return err;
1420 +}
1421 +
1422 +static void __cold dpaa2_dpdmai_free_pool(struct dpaa2_qdma_chan *qchan,
1423 + struct list_head *head)
1424 +{
1425 + struct qdma_sg_blk *sgb_tmp, *_sgb_tmp;
1426 + /* free the QDMA SG pool block */
1427 + list_for_each_entry_safe(sgb_tmp, _sgb_tmp, head, list) {
1428 + sgb_tmp->blk_virt_addr = (void *)((struct qdma_sg_blk *)
1429 + sgb_tmp->blk_virt_addr - 1);
1430 + sgb_tmp->blk_bus_addr = sgb_tmp->blk_bus_addr
1431 + - sizeof(*sgb_tmp);
1432 + dma_pool_free(qchan->sg_blk_pool, sgb_tmp->blk_virt_addr,
1433 + sgb_tmp->blk_bus_addr);
1434 + }
1435 +
1436 +}
1437 +
1438 +static void __cold dpaa2_dpdmai_free_comp(struct dpaa2_qdma_chan *qchan,
1439 + struct list_head *head)
1440 +{
1441 + struct dpaa2_qdma_comp *comp_tmp, *_comp_tmp;
1442 + /* free the QDMA comp resource */
1443 + list_for_each_entry_safe(comp_tmp, _comp_tmp,
1444 + head, list) {
1445 + dma_pool_free(qchan->fd_pool,
1446 + comp_tmp->fd_virt_addr,
1447 + comp_tmp->fd_bus_addr);
1448 + /* free the SG source block on comp */
1449 + dpaa2_dpdmai_free_pool(qchan, &comp_tmp->sg_src_head);
1450 + /* free the SG destination block on comp */
1451 + dpaa2_dpdmai_free_pool(qchan, &comp_tmp->sg_dst_head);
1452 + list_del(&comp_tmp->list);
1453 + kfree(comp_tmp);
1454 + }
1455 +
1456 +}
1457 +
1458 +static void __cold dpaa2_dpdmai_free_channels(
1459 + struct dpaa2_qdma_engine *dpaa2_qdma)
1460 +{
1461 + struct dpaa2_qdma_chan *qchan;
1462 + int num, i;
1463 +
1464 + num = dpaa2_qdma->n_chans;
1465 + for (i = 0; i < num; i++) {
1466 + qchan = &dpaa2_qdma->chans[i];
1467 + dpaa2_dpdmai_free_comp(qchan, &qchan->comp_used);
1468 + dpaa2_dpdmai_free_comp(qchan, &qchan->comp_free);
1469 + dpaa2_dpdmai_free_pool(qchan, &qchan->sgb_free);
1470 + dma_pool_destroy(qchan->fd_pool);
1471 + dma_pool_destroy(qchan->sg_blk_pool);
1472 + }
1473 +}
1474 +
1475 +static int dpaa2_dpdmai_alloc_channels(struct dpaa2_qdma_engine *dpaa2_qdma)
1476 +{
1477 + struct dpaa2_qdma_chan *dpaa2_chan;
1478 + struct device *dev = &dpaa2_qdma->priv->dpdmai_dev->dev;
1479 + int i;
1480 +
1481 + INIT_LIST_HEAD(&dpaa2_qdma->dma_dev.channels);
1482 + for (i = 0; i < dpaa2_qdma->n_chans; i++) {
1483 + dpaa2_chan = &dpaa2_qdma->chans[i];
1484 + dpaa2_chan->qdma = dpaa2_qdma;
1485 + dpaa2_chan->vchan.desc_free = dpaa2_qdma_free_desc;
1486 + vchan_init(&dpaa2_chan->vchan, &dpaa2_qdma->dma_dev);
1487 +
1488 + dpaa2_chan->fd_pool = dma_pool_create("fd_pool",
1489 + dev, FD_POOL_SIZE, 32, 0);
1490 + if (!dpaa2_chan->fd_pool)
1491 + return -1;
1492 + dpaa2_chan->sg_blk_pool = dma_pool_create("sg_blk_pool",
1493 + dev, SG_POOL_SIZE, 32, 0);
1494 + if (!dpaa2_chan->sg_blk_pool)
1495 + return -1;
1496 +
1497 + spin_lock_init(&dpaa2_chan->queue_lock);
1498 + INIT_LIST_HEAD(&dpaa2_chan->comp_used);
1499 + INIT_LIST_HEAD(&dpaa2_chan->comp_free);
1500 + INIT_LIST_HEAD(&dpaa2_chan->sgb_free);
1501 + }
1502 + return 0;
1503 +}
1504 +
1505 +static int dpaa2_qdma_probe(struct fsl_mc_device *dpdmai_dev)
1506 +{
1507 + struct dpaa2_qdma_priv *priv;
1508 + struct device *dev = &dpdmai_dev->dev;
1509 + struct dpaa2_qdma_engine *dpaa2_qdma;
1510 + int err;
1511 +
1512 + priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1513 + if (!priv)
1514 + return -ENOMEM;
1515 + dev_set_drvdata(dev, priv);
1516 + priv->dpdmai_dev = dpdmai_dev;
1517 +
1518 + priv->iommu_domain = iommu_get_domain_for_dev(dev);
1519 + if (priv->iommu_domain)
1520 + smmu_disable = false;
1521 +
1522 + /* obtain a MC portal */
1523 + err = fsl_mc_portal_allocate(dpdmai_dev, 0, &priv->mc_io);
1524 + if (err) {
1525 + dev_err(dev, "MC portal allocation failed\n");
1526 + goto err_mcportal;
1527 + }
1528 +
1529 + /* DPDMAI initialization */
1530 + err = dpaa2_qdma_setup(dpdmai_dev);
1531 + if (err) {
1532 + dev_err(dev, "dpaa2_dpdmai_setup() failed\n");
1533 + goto err_dpdmai_setup;
1534 + }
1535 +
1536 + /* DPIO */
1537 + err = dpaa2_qdma_dpio_setup(priv);
1538 + if (err) {
1539 + dev_err(dev, "dpaa2_dpdmai_dpio_setup() failed\n");
1540 + goto err_dpio_setup;
1541 + }
1542 +
1543 + /* DPDMAI binding to DPIO */
1544 + err = dpaa2_dpdmai_bind(priv);
1545 + if (err) {
1546 + dev_err(dev, "dpaa2_dpdmai_bind() failed\n");
1547 + goto err_bind;
1548 + }
1549 +
1550 + /* DPDMAI enable */
1551 + err = dpdmai_enable(priv->mc_io, 0, dpdmai_dev->mc_handle);
1552 + if (err) {
1553 + dev_err(dev, "dpdmai_enable() faile\n");
1554 + goto err_enable;
1555 + }
1556 +
1557 + dpaa2_qdma = kzalloc(sizeof(*dpaa2_qdma), GFP_KERNEL);
1558 + if (!dpaa2_qdma) {
1559 + err = -ENOMEM;
1560 + goto err_eng;
1561 + }
1562 +
1563 + priv->dpaa2_qdma = dpaa2_qdma;
1564 + dpaa2_qdma->priv = priv;
1565 +
1566 + dpaa2_qdma->n_chans = NUM_CH;
1567 +
1568 + err = dpaa2_dpdmai_alloc_channels(dpaa2_qdma);
1569 + if (err) {
1570 + dev_err(dev, "QDMA alloc channels faile\n");
1571 + goto err_reg;
1572 + }
1573 +
1574 + dma_cap_set(DMA_PRIVATE, dpaa2_qdma->dma_dev.cap_mask);
1575 + dma_cap_set(DMA_SLAVE, dpaa2_qdma->dma_dev.cap_mask);
1576 + dma_cap_set(DMA_MEMCPY, dpaa2_qdma->dma_dev.cap_mask);
1577 + dma_cap_set(DMA_SG, dpaa2_qdma->dma_dev.cap_mask);
1578 +
1579 + dpaa2_qdma->dma_dev.dev = dev;
1580 + dpaa2_qdma->dma_dev.device_alloc_chan_resources
1581 + = dpaa2_qdma_alloc_chan_resources;
1582 + dpaa2_qdma->dma_dev.device_free_chan_resources
1583 + = dpaa2_qdma_free_chan_resources;
1584 + dpaa2_qdma->dma_dev.device_tx_status = dpaa2_qdma_tx_status;
1585 + dpaa2_qdma->dma_dev.device_prep_dma_memcpy = dpaa2_qdma_prep_memcpy;
1586 + dpaa2_qdma->dma_dev.device_prep_dma_sg = dpaa2_qdma_prep_sg;
1587 + dpaa2_qdma->dma_dev.device_issue_pending = dpaa2_qdma_issue_pending;
1588 +
1589 + err = dma_async_device_register(&dpaa2_qdma->dma_dev);
1590 + if (err) {
1591 + dev_err(dev, "Can't register NXP QDMA engine.\n");
1592 + goto err_reg;
1593 + }
1594 +
1595 + return 0;
1596 +
1597 +err_reg:
1598 + dpaa2_dpdmai_free_channels(dpaa2_qdma);
1599 + kfree(dpaa2_qdma);
1600 +err_eng:
1601 + dpdmai_disable(priv->mc_io, 0, dpdmai_dev->mc_handle);
1602 +err_enable:
1603 + dpaa2_dpdmai_dpio_unbind(priv);
1604 +err_bind:
1605 + dpaa2_dpmai_store_free(priv);
1606 + dpaa2_dpdmai_dpio_free(priv);
1607 +err_dpio_setup:
1608 + dpdmai_close(priv->mc_io, 0, dpdmai_dev->mc_handle);
1609 +err_dpdmai_setup:
1610 + fsl_mc_portal_free(priv->mc_io);
1611 +err_mcportal:
1612 + kfree(priv->ppriv);
1613 + kfree(priv);
1614 + dev_set_drvdata(dev, NULL);
1615 + return err;
1616 +}
1617 +
1618 +static int dpaa2_qdma_remove(struct fsl_mc_device *ls_dev)
1619 +{
1620 + struct device *dev;
1621 + struct dpaa2_qdma_priv *priv;
1622 + struct dpaa2_qdma_engine *dpaa2_qdma;
1623 +
1624 + dev = &ls_dev->dev;
1625 + priv = dev_get_drvdata(dev);
1626 + dpaa2_qdma = priv->dpaa2_qdma;
1627 +
1628 + dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle);
1629 + dpaa2_dpdmai_dpio_unbind(priv);
1630 + dpaa2_dpmai_store_free(priv);
1631 + dpaa2_dpdmai_dpio_free(priv);
1632 + dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
1633 + fsl_mc_portal_free(priv->mc_io);
1634 + dev_set_drvdata(dev, NULL);
1635 + dpaa2_dpdmai_free_channels(dpaa2_qdma);
1636 +
1637 + dma_async_device_unregister(&dpaa2_qdma->dma_dev);
1638 + kfree(priv);
1639 + kfree(dpaa2_qdma);
1640 +
1641 + return 0;
1642 +}
1643 +
1644 +static const struct fsl_mc_device_id dpaa2_qdma_id_table[] = {
1645 + {
1646 + .vendor = FSL_MC_VENDOR_FREESCALE,
1647 + .obj_type = "dpdmai",
1648 + },
1649 + { .vendor = 0x0 }
1650 +};
1651 +
1652 +static struct fsl_mc_driver dpaa2_qdma_driver = {
1653 + .driver = {
1654 + .name = "dpaa2-qdma",
1655 + .owner = THIS_MODULE,
1656 + },
1657 + .probe = dpaa2_qdma_probe,
1658 + .remove = dpaa2_qdma_remove,
1659 + .match_id_table = dpaa2_qdma_id_table
1660 +};
1661 +
1662 +static int __init dpaa2_qdma_driver_init(void)
1663 +{
1664 + return fsl_mc_driver_register(&(dpaa2_qdma_driver));
1665 +}
1666 +late_initcall(dpaa2_qdma_driver_init);
1667 +
1668 +static void __exit fsl_qdma_exit(void)
1669 +{
1670 + fsl_mc_driver_unregister(&(dpaa2_qdma_driver));
1671 +}
1672 +module_exit(fsl_qdma_exit);
1673 +
1674 +MODULE_DESCRIPTION("NXP DPAA2 qDMA driver");
1675 +MODULE_LICENSE("Dual BSD/GPL");
1676 --- /dev/null
1677 +++ b/drivers/dma/dpaa2-qdma/dpaa2-qdma.h
1678 @@ -0,0 +1,262 @@
1679 +/* Copyright 2015 NXP Semiconductor Inc.
1680 + *
1681 + * Redistribution and use in source and binary forms, with or without
1682 + * modification, are permitted provided that the following conditions are met:
1683 + * * Redistributions of source code must retain the above copyright
1684 + * notice, this list of conditions and the following disclaimer.
1685 + * * Redistributions in binary form must reproduce the above copyright
1686 + * notice, this list of conditions and the following disclaimer in the
1687 + * documentation and/or other materials provided with the distribution.
1688 + * * Neither the name of NXP Semiconductor nor the
1689 + * names of its contributors may be used to endorse or promote products
1690 + * derived from this software without specific prior written permission.
1691 + *
1692 + *
1693 + * ALTERNATIVELY, this software may be distributed under the terms of the
1694 + * GNU General Public License ("GPL") as published by the Free Software
1695 + * Foundation, either version 2 of that License or (at your option) any
1696 + * later version.
1697 + *
1698 + * THIS SOFTWARE IS PROVIDED BY NXP Semiconductor ``AS IS'' AND ANY
1699 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1700 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1701 + * DISCLAIMED. IN NO EVENT SHALL NXP Semiconductor BE LIABLE FOR ANY
1702 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1703 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
1704 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
1705 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1706 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1707 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1708 + */
1709 +
1710 +#ifndef __DPAA2_QDMA_H
1711 +#define __DPAA2_QDMA_H
1712 +
1713 +#define LONG_FORMAT 1
1714 +
1715 +#define DPAA2_QDMA_STORE_SIZE 16
1716 +#define NUM_CH 8
1717 +#define NUM_SG_PER_BLK 16
1718 +
1719 +#define QDMA_DMR_OFFSET 0x0
1720 +#define QDMA_DQ_EN (0 << 30)
1721 +#define QDMA_DQ_DIS (1 << 30)
1722 +
1723 +#define QDMA_DSR_M_OFFSET 0x10004
1724 +
1725 +struct dpaa2_qdma_sd_d {
1726 + uint32_t rsv:32;
1727 + union {
1728 + struct {
1729 + uint32_t ssd:12; /* souce stride distance */
1730 + uint32_t sss:12; /* souce stride size */
1731 + uint32_t rsv1:8;
1732 + } sdf;
1733 + struct {
1734 + uint32_t dsd:12; /* Destination stride distance */
1735 + uint32_t dss:12; /* Destination stride size */
1736 + uint32_t rsv2:8;
1737 + } ddf;
1738 + } df;
1739 + uint32_t rbpcmd; /* Route-by-port command */
1740 + uint32_t cmd;
1741 +} __attribute__((__packed__));
1742 +/* Source descriptor command read transaction type for RBP=0:
1743 + coherent copy of cacheable memory */
1744 +#define QDMA_SD_CMD_RDTTYPE_COHERENT (0xb << 28)
1745 +/* Destination descriptor command write transaction type for RBP=0:
1746 + coherent copy of cacheable memory */
1747 +#define QDMA_DD_CMD_WRTTYPE_COHERENT (0x6 << 28)
1748 +
1749 +#define QDMA_SG_FMT_SDB 0x0 /* single data buffer */
1750 +#define QDMA_SG_FMT_FDS 0x1 /* frame data section */
1751 +#define QDMA_SG_FMT_SGTE 0x2 /* SGT extension */
1752 +#define QDMA_SG_SL_SHORT 0x1 /* short length */
1753 +#define QDMA_SG_SL_LONG 0x0 /* short length */
1754 +#define QDMA_SG_F 0x1 /* last sg entry */
1755 +struct dpaa2_qdma_sg {
1756 + uint32_t addr_lo; /* address 0:31 */
1757 + uint32_t addr_hi:17; /* address 32:48 */
1758 + uint32_t rsv:15;
1759 + union {
1760 + uint32_t data_len_sl0; /* SL=0, the long format */
1761 + struct {
1762 + uint32_t len:17; /* SL=1, the short format */
1763 + uint32_t reserve:3;
1764 + uint32_t sf:1;
1765 + uint32_t sr:1;
1766 + uint32_t size:10; /* buff size */
1767 + } data_len_sl1;
1768 + } data_len; /* AVAIL_LENGTH */
1769 + struct {
1770 + uint32_t bpid:14;
1771 + uint32_t ivp:1;
1772 + uint32_t mbt:1;
1773 + uint32_t offset:12;
1774 + uint32_t fmt:2;
1775 + uint32_t sl:1;
1776 + uint32_t f:1;
1777 + } ctrl;
1778 +} __attribute__((__packed__));
1779 +
1780 +#define QMAN_FD_FMT_ENABLE (1 << 12) /* frame list table enable */
1781 +#define QMAN_FD_BMT_ENABLE (1 << 15) /* bypass memory translation */
1782 +#define QMAN_FD_BMT_DISABLE (0 << 15) /* bypass memory translation */
1783 +#define QMAN_FD_SL_DISABLE (0 << 14) /* short lengthe disabled */
1784 +#define QMAN_FD_SL_ENABLE (1 << 14) /* short lengthe enabled */
1785 +
1786 +#define QDMA_SB_FRAME (0 << 28) /* single frame */
1787 +#define QDMA_SG_FRAME (2 << 28) /* scatter gather frames */
1788 +#define QDMA_FINAL_BIT_DISABLE (0 << 31) /* final bit disable */
1789 +#define QDMA_FINAL_BIT_ENABLE (1 << 31) /* final bit enable */
1790 +
1791 +#define QDMA_FD_SHORT_FORMAT (1 << 11) /* short format */
1792 +#define QDMA_FD_LONG_FORMAT (0 << 11) /* long format */
1793 +#define QDMA_SER_DISABLE (0 << 8) /* no notification */
1794 +#define QDMA_SER_CTX (1 << 8) /* notification by FQD_CTX[fqid] */
1795 +#define QDMA_SER_DEST (2 << 8) /* notification by destination desc */
1796 +#define QDMA_SER_BOTH (3 << 8) /* soruce and dest notification */
1797 +#define QDMA_FD_SPF_ENALBE (1 << 30) /* source prefetch enable */
1798 +
1799 +#define QMAN_FD_VA_ENABLE (1 << 14) /* Address used is virtual address */
1800 +#define QMAN_FD_VA_DISABLE (0 << 14)/* Address used is a real address */
1801 +#define QMAN_FD_CBMT_ENABLE (1 << 15) /* Flow Context: 49bit physical address */
1802 +#define QMAN_FD_CBMT_DISABLE (0 << 15) /* Flow Context: 64bit virtual address */
1803 +#define QMAN_FD_SC_DISABLE (0 << 27) /* stashing control */
1804 +
1805 +#define QDMA_FL_FMT_SBF 0x0 /* Single buffer frame */
1806 +#define QDMA_FL_FMT_SGE 0x2 /* Scatter gather frame */
1807 +#define QDMA_FL_BMT_ENABLE 0x1 /* enable bypass memory translation */
1808 +#define QDMA_FL_BMT_DISABLE 0x0 /* enable bypass memory translation */
1809 +#define QDMA_FL_SL_LONG 0x0 /* long length */
1810 +#define QDMA_FL_SL_SHORT 0x1 /* short length */
1811 +#define QDMA_FL_F 0x1 /* last frame list bit */
1812 +/*Description of Frame list table structure*/
1813 +struct dpaa2_frame_list {
1814 + uint32_t addr_lo; /* lower 32 bits of address */
1815 + uint32_t addr_hi:17; /* upper 17 bits of address */
1816 + uint32_t resrvd:15;
1817 + union {
1818 + uint32_t data_len_sl0; /* If SL=0, then data length is 32 */
1819 + struct {
1820 + uint32_t data_len:18; /* IF SL=1; length is 18bit */
1821 + uint32_t resrvd:2;
1822 + uint32_t mem:12; /* Valid only when SL=1 */
1823 + } data_len_sl1;
1824 + } data_len;
1825 + /* word 4 */
1826 + uint32_t bpid:14; /* Frame buffer pool ID */
1827 + uint32_t ivp:1; /* Invalid Pool ID. */
1828 + uint32_t bmt:1; /* Bypass Memory Translation */
1829 + uint32_t offset:12; /* Frame offset */
1830 + uint32_t fmt:2; /* Frame Format */
1831 + uint32_t sl:1; /* Short Length */
1832 + uint32_t f:1; /* Final bit */
1833 +
1834 + uint32_t frc; /* Frame Context */
1835 + /* word 6 */
1836 + uint32_t err:8; /* Frame errors */
1837 + uint32_t resrvd0:8;
1838 + uint32_t asal:4; /* accelerator-specific annotation length */
1839 + uint32_t resrvd1:1;
1840 + uint32_t ptv2:1;
1841 + uint32_t ptv1:1;
1842 + uint32_t pta:1; /* pass-through annotation */
1843 + uint32_t resrvd2:8;
1844 +
1845 + uint32_t flc_lo; /* lower 32 bits fo flow context */
1846 + uint32_t flc_hi; /* higher 32 bits fo flow context */
1847 +} __attribute__((__packed__));
1848 +
1849 +struct dpaa2_qdma_chan {
1850 + struct virt_dma_chan vchan;
1851 + struct virt_dma_desc vdesc;
1852 + enum dma_status status;
1853 + struct dpaa2_qdma_engine *qdma;
1854 +
1855 + struct mutex dpaa2_queue_mutex;
1856 + spinlock_t queue_lock;
1857 + struct dma_pool *fd_pool;
1858 + struct dma_pool *sg_blk_pool;
1859 +
1860 + struct list_head comp_used;
1861 + struct list_head comp_free;
1862 +
1863 + struct list_head sgb_free;
1864 +};
1865 +
1866 +struct qdma_sg_blk {
1867 + dma_addr_t blk_bus_addr;
1868 + void *blk_virt_addr;
1869 + struct list_head list;
1870 +};
1871 +
1872 +struct dpaa2_qdma_comp {
1873 + dma_addr_t fd_bus_addr;
1874 + dma_addr_t fl_bus_addr;
1875 + dma_addr_t desc_bus_addr;
1876 + dma_addr_t sge_src_bus_addr;
1877 + dma_addr_t sge_dst_bus_addr;
1878 + void *fd_virt_addr;
1879 + void *fl_virt_addr;
1880 + void *desc_virt_addr;
1881 + void *sg_src_virt_addr;
1882 + void *sg_dst_virt_addr;
1883 + struct qdma_sg_blk *sg_blk;
1884 + uint32_t sg_blk_num;
1885 + struct list_head sg_src_head;
1886 + struct list_head sg_dst_head;
1887 + struct dpaa2_qdma_chan *qchan;
1888 + struct virt_dma_desc vdesc;
1889 + struct list_head list;
1890 +};
1891 +
1892 +struct dpaa2_qdma_engine {
1893 + struct dma_device dma_dev;
1894 + u32 n_chans;
1895 + struct dpaa2_qdma_chan chans[NUM_CH];
1896 +
1897 + struct dpaa2_qdma_priv *priv;
1898 +};
1899 +
1900 +/*
1901 + * dpaa2_qdma_priv - driver private data
1902 + */
1903 +struct dpaa2_qdma_priv {
1904 + int dpqdma_id;
1905 +
1906 + struct iommu_domain *iommu_domain;
1907 + struct dpdmai_attr dpdmai_attr;
1908 + struct device *dev;
1909 + struct fsl_mc_io *mc_io;
1910 + struct fsl_mc_device *dpdmai_dev;
1911 +
1912 + struct dpdmai_rx_queue_attr rx_queue_attr[DPDMAI_PRIO_NUM];
1913 + struct dpdmai_tx_queue_attr tx_queue_attr[DPDMAI_PRIO_NUM];
1914 +
1915 + uint8_t num_pairs;
1916 +
1917 + struct dpaa2_qdma_engine *dpaa2_qdma;
1918 + struct dpaa2_qdma_priv_per_prio *ppriv;
1919 +};
1920 +
1921 +struct dpaa2_qdma_priv_per_prio {
1922 + int req_fqid;
1923 + int rsp_fqid;
1924 + int prio;
1925 +
1926 + struct dpaa2_io_store *store;
1927 + struct dpaa2_io_notification_ctx nctx;
1928 +
1929 + struct dpaa2_qdma_priv *priv;
1930 +};
1931 +
1932 +/* FD pool size: one FD + 3 Frame list + 2 source/destination descriptor */
1933 +#define FD_POOL_SIZE (sizeof(struct dpaa2_fd) + \
1934 + sizeof(struct dpaa2_frame_list) * 3 + \
1935 + sizeof(struct dpaa2_qdma_sd_d) * 2)
1936 +
1937 +/* qdma_sg_blk + 16 SGs */
1938 +#define SG_POOL_SIZE (sizeof(struct qdma_sg_blk) +\
1939 + sizeof(struct dpaa2_qdma_sg) * NUM_SG_PER_BLK)
1940 +#endif /* __DPAA2_QDMA_H */
1941 --- /dev/null
1942 +++ b/drivers/dma/dpaa2-qdma/dpdmai.c
1943 @@ -0,0 +1,454 @@
1944 +/* Copyright 2013-2015 Freescale Semiconductor Inc.
1945 + *
1946 + * Redistribution and use in source and binary forms, with or without
1947 + * modification, are permitted provided that the following conditions are met:
1948 + * * Redistributions of source code must retain the above copyright
1949 + * notice, this list of conditions and the following disclaimer.
1950 + * * Redistributions in binary form must reproduce the above copyright
1951 + * notice, this list of conditions and the following disclaimer in the
1952 + * documentation and/or other materials provided with the distribution.
1953 + * * Neither the name of the above-listed copyright holders nor the
1954 + * names of any contributors may be used to endorse or promote products
1955 + * derived from this software without specific prior written permission.
1956 + *
1957 + *
1958 + * ALTERNATIVELY, this software may be distributed under the terms of the
1959 + * GNU General Public License ("GPL") as published by the Free Software
1960 + * Foundation, either version 2 of that License or (at your option) any
1961 + * later version.
1962 + *
1963 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
1964 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1965 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1966 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
1967 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
1968 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
1969 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
1970 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
1971 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
1972 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
1973 + * POSSIBILITY OF SUCH DAMAGE.
1974 + */
1975 +#include <linux/types.h>
1976 +#include <linux/io.h>
1977 +#include "fsl_dpdmai.h"
1978 +#include "fsl_dpdmai_cmd.h"
1979 +#include "../../../drivers/staging/fsl-mc/include/mc-sys.h"
1980 +#include "../../../drivers/staging/fsl-mc/include/mc-cmd.h"
1981 +
1982 +int dpdmai_open(struct fsl_mc_io *mc_io,
1983 + uint32_t cmd_flags,
1984 + int dpdmai_id,
1985 + uint16_t *token)
1986 +{
1987 + struct mc_command cmd = { 0 };
1988 + int err;
1989 +
1990 + /* prepare command */
1991 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_OPEN,
1992 + cmd_flags,
1993 + 0);
1994 + DPDMAI_CMD_OPEN(cmd, dpdmai_id);
1995 +
1996 + /* send command to mc*/
1997 + err = mc_send_command(mc_io, &cmd);
1998 + if (err)
1999 + return err;
2000 +
2001 + /* retrieve response parameters */
2002 + *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
2003 +
2004 + return 0;
2005 +}
2006 +
2007 +int dpdmai_close(struct fsl_mc_io *mc_io,
2008 + uint32_t cmd_flags,
2009 + uint16_t token)
2010 +{
2011 + struct mc_command cmd = { 0 };
2012 +
2013 + /* prepare command */
2014 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLOSE,
2015 + cmd_flags, token);
2016 +
2017 + /* send command to mc*/
2018 + return mc_send_command(mc_io, &cmd);
2019 +}
2020 +
2021 +int dpdmai_create(struct fsl_mc_io *mc_io,
2022 + uint32_t cmd_flags,
2023 + const struct dpdmai_cfg *cfg,
2024 + uint16_t *token)
2025 +{
2026 + struct mc_command cmd = { 0 };
2027 + int err;
2028 +
2029 + /* prepare command */
2030 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CREATE,
2031 + cmd_flags,
2032 + 0);
2033 + DPDMAI_CMD_CREATE(cmd, cfg);
2034 +
2035 + /* send command to mc*/
2036 + err = mc_send_command(mc_io, &cmd);
2037 + if (err)
2038 + return err;
2039 +
2040 + /* retrieve response parameters */
2041 + *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
2042 +
2043 + return 0;
2044 +}
2045 +
2046 +int dpdmai_destroy(struct fsl_mc_io *mc_io,
2047 + uint32_t cmd_flags,
2048 + uint16_t token)
2049 +{
2050 + struct mc_command cmd = { 0 };
2051 +
2052 + /* prepare command */
2053 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DESTROY,
2054 + cmd_flags,
2055 + token);
2056 +
2057 + /* send command to mc*/
2058 + return mc_send_command(mc_io, &cmd);
2059 +}
2060 +
2061 +int dpdmai_enable(struct fsl_mc_io *mc_io,
2062 + uint32_t cmd_flags,
2063 + uint16_t token)
2064 +{
2065 + struct mc_command cmd = { 0 };
2066 +
2067 + /* prepare command */
2068 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_ENABLE,
2069 + cmd_flags,
2070 + token);
2071 +
2072 + /* send command to mc*/
2073 + return mc_send_command(mc_io, &cmd);
2074 +}
2075 +
2076 +int dpdmai_disable(struct fsl_mc_io *mc_io,
2077 + uint32_t cmd_flags,
2078 + uint16_t token)
2079 +{
2080 + struct mc_command cmd = { 0 };
2081 +
2082 + /* prepare command */
2083 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DISABLE,
2084 + cmd_flags,
2085 + token);
2086 +
2087 + /* send command to mc*/
2088 + return mc_send_command(mc_io, &cmd);
2089 +}
2090 +
2091 +int dpdmai_is_enabled(struct fsl_mc_io *mc_io,
2092 + uint32_t cmd_flags,
2093 + uint16_t token,
2094 + int *en)
2095 +{
2096 + struct mc_command cmd = { 0 };
2097 + int err;
2098 + /* prepare command */
2099 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_IS_ENABLED,
2100 + cmd_flags,
2101 + token);
2102 +
2103 + /* send command to mc*/
2104 + err = mc_send_command(mc_io, &cmd);
2105 + if (err)
2106 + return err;
2107 +
2108 + /* retrieve response parameters */
2109 + DPDMAI_RSP_IS_ENABLED(cmd, *en);
2110 +
2111 + return 0;
2112 +}
2113 +
2114 +int dpdmai_reset(struct fsl_mc_io *mc_io,
2115 + uint32_t cmd_flags,
2116 + uint16_t token)
2117 +{
2118 + struct mc_command cmd = { 0 };
2119 +
2120 + /* prepare command */
2121 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_RESET,
2122 + cmd_flags,
2123 + token);
2124 +
2125 + /* send command to mc*/
2126 + return mc_send_command(mc_io, &cmd);
2127 +}
2128 +
2129 +int dpdmai_get_irq(struct fsl_mc_io *mc_io,
2130 + uint32_t cmd_flags,
2131 + uint16_t token,
2132 + uint8_t irq_index,
2133 + int *type,
2134 + struct dpdmai_irq_cfg *irq_cfg)
2135 +{
2136 + struct mc_command cmd = { 0 };
2137 + int err;
2138 +
2139 + /* prepare command */
2140 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ,
2141 + cmd_flags,
2142 + token);
2143 + DPDMAI_CMD_GET_IRQ(cmd, irq_index);
2144 +
2145 + /* send command to mc*/
2146 + err = mc_send_command(mc_io, &cmd);
2147 + if (err)
2148 + return err;
2149 +
2150 + /* retrieve response parameters */
2151 + DPDMAI_RSP_GET_IRQ(cmd, *type, irq_cfg);
2152 +
2153 + return 0;
2154 +}
2155 +
2156 +int dpdmai_set_irq(struct fsl_mc_io *mc_io,
2157 + uint32_t cmd_flags,
2158 + uint16_t token,
2159 + uint8_t irq_index,
2160 + struct dpdmai_irq_cfg *irq_cfg)
2161 +{
2162 + struct mc_command cmd = { 0 };
2163 +
2164 + /* prepare command */
2165 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ,
2166 + cmd_flags,
2167 + token);
2168 + DPDMAI_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
2169 +
2170 + /* send command to mc*/
2171 + return mc_send_command(mc_io, &cmd);
2172 +}
2173 +
2174 +int dpdmai_get_irq_enable(struct fsl_mc_io *mc_io,
2175 + uint32_t cmd_flags,
2176 + uint16_t token,
2177 + uint8_t irq_index,
2178 + uint8_t *en)
2179 +{
2180 + struct mc_command cmd = { 0 };
2181 + int err;
2182 +
2183 + /* prepare command */
2184 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_ENABLE,
2185 + cmd_flags,
2186 + token);
2187 + DPDMAI_CMD_GET_IRQ_ENABLE(cmd, irq_index);
2188 +
2189 + /* send command to mc*/
2190 + err = mc_send_command(mc_io, &cmd);
2191 + if (err)
2192 + return err;
2193 +
2194 + /* retrieve response parameters */
2195 + DPDMAI_RSP_GET_IRQ_ENABLE(cmd, *en);
2196 +
2197 + return 0;
2198 +}
2199 +
2200 +int dpdmai_set_irq_enable(struct fsl_mc_io *mc_io,
2201 + uint32_t cmd_flags,
2202 + uint16_t token,
2203 + uint8_t irq_index,
2204 + uint8_t en)
2205 +{
2206 + struct mc_command cmd = { 0 };
2207 +
2208 + /* prepare command */
2209 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ_ENABLE,
2210 + cmd_flags,
2211 + token);
2212 + DPDMAI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
2213 +
2214 + /* send command to mc*/
2215 + return mc_send_command(mc_io, &cmd);
2216 +}
2217 +
2218 +int dpdmai_get_irq_mask(struct fsl_mc_io *mc_io,
2219 + uint32_t cmd_flags,
2220 + uint16_t token,
2221 + uint8_t irq_index,
2222 + uint32_t *mask)
2223 +{
2224 + struct mc_command cmd = { 0 };
2225 + int err;
2226 +
2227 + /* prepare command */
2228 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_MASK,
2229 + cmd_flags,
2230 + token);
2231 + DPDMAI_CMD_GET_IRQ_MASK(cmd, irq_index);
2232 +
2233 + /* send command to mc*/
2234 + err = mc_send_command(mc_io, &cmd);
2235 + if (err)
2236 + return err;
2237 +
2238 + /* retrieve response parameters */
2239 + DPDMAI_RSP_GET_IRQ_MASK(cmd, *mask);
2240 +
2241 + return 0;
2242 +}
2243 +
2244 +int dpdmai_set_irq_mask(struct fsl_mc_io *mc_io,
2245 + uint32_t cmd_flags,
2246 + uint16_t token,
2247 + uint8_t irq_index,
2248 + uint32_t mask)
2249 +{
2250 + struct mc_command cmd = { 0 };
2251 +
2252 + /* prepare command */
2253 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ_MASK,
2254 + cmd_flags,
2255 + token);
2256 + DPDMAI_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
2257 +
2258 + /* send command to mc*/
2259 + return mc_send_command(mc_io, &cmd);
2260 +}
2261 +
2262 +int dpdmai_get_irq_status(struct fsl_mc_io *mc_io,
2263 + uint32_t cmd_flags,
2264 + uint16_t token,
2265 + uint8_t irq_index,
2266 + uint32_t *status)
2267 +{
2268 + struct mc_command cmd = { 0 };
2269 + int err;
2270 +
2271 + /* prepare command */
2272 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_STATUS,
2273 + cmd_flags,
2274 + token);
2275 + DPDMAI_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
2276 +
2277 + /* send command to mc*/
2278 + err = mc_send_command(mc_io, &cmd);
2279 + if (err)
2280 + return err;
2281 +
2282 + /* retrieve response parameters */
2283 + DPDMAI_RSP_GET_IRQ_STATUS(cmd, *status);
2284 +
2285 + return 0;
2286 +}
2287 +
2288 +int dpdmai_clear_irq_status(struct fsl_mc_io *mc_io,
2289 + uint32_t cmd_flags,
2290 + uint16_t token,
2291 + uint8_t irq_index,
2292 + uint32_t status)
2293 +{
2294 + struct mc_command cmd = { 0 };
2295 +
2296 + /* prepare command */
2297 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLEAR_IRQ_STATUS,
2298 + cmd_flags,
2299 + token);
2300 + DPDMAI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
2301 +
2302 + /* send command to mc*/
2303 + return mc_send_command(mc_io, &cmd);
2304 +}
2305 +
2306 +int dpdmai_get_attributes(struct fsl_mc_io *mc_io,
2307 + uint32_t cmd_flags,
2308 + uint16_t token,
2309 + struct dpdmai_attr *attr)
2310 +{
2311 + struct mc_command cmd = { 0 };
2312 + int err;
2313 +
2314 + /* prepare command */
2315 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_ATTR,
2316 + cmd_flags,
2317 + token);
2318 +
2319 + /* send command to mc*/
2320 + err = mc_send_command(mc_io, &cmd);
2321 + if (err)
2322 + return err;
2323 +
2324 + /* retrieve response parameters */
2325 + DPDMAI_RSP_GET_ATTR(cmd, attr);
2326 +
2327 + return 0;
2328 +}
2329 +
2330 +int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io,
2331 + uint32_t cmd_flags,
2332 + uint16_t token,
2333 + uint8_t priority,
2334 + const struct dpdmai_rx_queue_cfg *cfg)
2335 +{
2336 + struct mc_command cmd = { 0 };
2337 +
2338 + /* prepare command */
2339 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_RX_QUEUE,
2340 + cmd_flags,
2341 + token);
2342 + DPDMAI_CMD_SET_RX_QUEUE(cmd, priority, cfg);
2343 +
2344 + /* send command to mc*/
2345 + return mc_send_command(mc_io, &cmd);
2346 +}
2347 +
2348 +int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io,
2349 + uint32_t cmd_flags,
2350 + uint16_t token,
2351 + uint8_t priority, struct dpdmai_rx_queue_attr *attr)
2352 +{
2353 + struct mc_command cmd = { 0 };
2354 + int err;
2355 +
2356 + /* prepare command */
2357 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_RX_QUEUE,
2358 + cmd_flags,
2359 + token);
2360 + DPDMAI_CMD_GET_RX_QUEUE(cmd, priority);
2361 +
2362 + /* send command to mc*/
2363 + err = mc_send_command(mc_io, &cmd);
2364 + if (err)
2365 + return err;
2366 +
2367 + /* retrieve response parameters */
2368 + DPDMAI_RSP_GET_RX_QUEUE(cmd, attr);
2369 +
2370 + return 0;
2371 +}
2372 +
2373 +int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io,
2374 + uint32_t cmd_flags,
2375 + uint16_t token,
2376 + uint8_t priority,
2377 + struct dpdmai_tx_queue_attr *attr)
2378 +{
2379 + struct mc_command cmd = { 0 };
2380 + int err;
2381 +
2382 + /* prepare command */
2383 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_TX_QUEUE,
2384 + cmd_flags,
2385 + token);
2386 + DPDMAI_CMD_GET_TX_QUEUE(cmd, priority);
2387 +
2388 + /* send command to mc*/
2389 + err = mc_send_command(mc_io, &cmd);
2390 + if (err)
2391 + return err;
2392 +
2393 + /* retrieve response parameters */
2394 + DPDMAI_RSP_GET_TX_QUEUE(cmd, attr);
2395 +
2396 + return 0;
2397 +}
2398 --- /dev/null
2399 +++ b/drivers/dma/dpaa2-qdma/fsl_dpdmai.h
2400 @@ -0,0 +1,521 @@
2401 +/* Copyright 2013-2015 Freescale Semiconductor Inc.
2402 + *
2403 + * Redistribution and use in source and binary forms, with or without
2404 + * modification, are permitted provided that the following conditions are met:
2405 + * * Redistributions of source code must retain the above copyright
2406 + * notice, this list of conditions and the following disclaimer.
2407 + * * Redistributions in binary form must reproduce the above copyright
2408 + * notice, this list of conditions and the following disclaimer in the
2409 + * documentation and/or other materials provided with the distribution.
2410 + * * Neither the name of the above-listed copyright holders nor the
2411 + * names of any contributors may be used to endorse or promote products
2412 + * derived from this software without specific prior written permission.
2413 + *
2414 + *
2415 + * ALTERNATIVELY, this software may be distributed under the terms of the
2416 + * GNU General Public License ("GPL") as published by the Free Software
2417 + * Foundation, either version 2 of that License or (at your option) any
2418 + * later version.
2419 + *
2420 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
2421 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2422 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2423 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
2424 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2425 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
2426 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
2427 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
2428 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
2429 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
2430 + * POSSIBILITY OF SUCH DAMAGE.
2431 + */
2432 +#ifndef __FSL_DPDMAI_H
2433 +#define __FSL_DPDMAI_H
2434 +
2435 +struct fsl_mc_io;
2436 +
2437 +/* Data Path DMA Interface API
2438 + * Contains initialization APIs and runtime control APIs for DPDMAI
2439 + */
2440 +
2441 +/* General DPDMAI macros */
2442 +
2443 +/**
2444 + * Maximum number of Tx/Rx priorities per DPDMAI object
2445 + */
2446 +#define DPDMAI_PRIO_NUM 2
2447 +
2448 +/**
2449 + * All queues considered; see dpdmai_set_rx_queue()
2450 + */
2451 +#define DPDMAI_ALL_QUEUES (uint8_t)(-1)
2452 +
2453 +/**
2454 + * dpdmai_open() - Open a control session for the specified object
2455 + * @mc_io: Pointer to MC portal's I/O object
2456 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2457 + * @dpdmai_id: DPDMAI unique ID
2458 + * @token: Returned token; use in subsequent API calls
2459 + *
2460 + * This function can be used to open a control session for an
2461 + * already created object; an object may have been declared in
2462 + * the DPL or by calling the dpdmai_create() function.
2463 + * This function returns a unique authentication token,
2464 + * associated with the specific object ID and the specific MC
2465 + * portal; this token must be used in all subsequent commands for
2466 + * this specific object.
2467 + *
2468 + * Return: '0' on Success; Error code otherwise.
2469 + */
2470 +int dpdmai_open(struct fsl_mc_io *mc_io,
2471 + uint32_t cmd_flags,
2472 + int dpdmai_id,
2473 + uint16_t *token);
2474 +
2475 +/**
2476 + * dpdmai_close() - Close the control session of the object
2477 + * @mc_io: Pointer to MC portal's I/O object
2478 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2479 + * @token: Token of DPDMAI object
2480 + *
2481 + * After this function is called, no further operations are
2482 + * allowed on the object without opening a new control session.
2483 + *
2484 + * Return: '0' on Success; Error code otherwise.
2485 + */
2486 +int dpdmai_close(struct fsl_mc_io *mc_io,
2487 + uint32_t cmd_flags,
2488 + uint16_t token);
2489 +
2490 +/**
2491 + * struct dpdmai_cfg - Structure representing DPDMAI configuration
2492 + * @priorities: Priorities for the DMA hardware processing; valid priorities are
2493 + * configured with values 1-8; the entry following last valid entry
2494 + * should be configured with 0
2495 + */
2496 +struct dpdmai_cfg {
2497 + uint8_t priorities[DPDMAI_PRIO_NUM];
2498 +};
2499 +
2500 +/**
2501 + * dpdmai_create() - Create the DPDMAI object
2502 + * @mc_io: Pointer to MC portal's I/O object
2503 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2504 + * @cfg: Configuration structure
2505 + * @token: Returned token; use in subsequent API calls
2506 + *
2507 + * Create the DPDMAI object, allocate required resources and
2508 + * perform required initialization.
2509 + *
2510 + * The object can be created either by declaring it in the
2511 + * DPL file, or by calling this function.
2512 + *
2513 + * This function returns a unique authentication token,
2514 + * associated with the specific object ID and the specific MC
2515 + * portal; this token must be used in all subsequent calls to
2516 + * this specific object. For objects that are created using the
2517 + * DPL file, call dpdmai_open() function to get an authentication
2518 + * token first.
2519 + *
2520 + * Return: '0' on Success; Error code otherwise.
2521 + */
2522 +int dpdmai_create(struct fsl_mc_io *mc_io,
2523 + uint32_t cmd_flags,
2524 + const struct dpdmai_cfg *cfg,
2525 + uint16_t *token);
2526 +
2527 +/**
2528 + * dpdmai_destroy() - Destroy the DPDMAI object and release all its resources.
2529 + * @mc_io: Pointer to MC portal's I/O object
2530 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2531 + * @token: Token of DPDMAI object
2532 + *
2533 + * Return: '0' on Success; error code otherwise.
2534 + */
2535 +int dpdmai_destroy(struct fsl_mc_io *mc_io,
2536 + uint32_t cmd_flags,
2537 + uint16_t token);
2538 +
2539 +/**
2540 + * dpdmai_enable() - Enable the DPDMAI, allow sending and receiving frames.
2541 + * @mc_io: Pointer to MC portal's I/O object
2542 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2543 + * @token: Token of DPDMAI object
2544 + *
2545 + * Return: '0' on Success; Error code otherwise.
2546 + */
2547 +int dpdmai_enable(struct fsl_mc_io *mc_io,
2548 + uint32_t cmd_flags,
2549 + uint16_t token);
2550 +
2551 +/**
2552 + * dpdmai_disable() - Disable the DPDMAI, stop sending and receiving frames.
2553 + * @mc_io: Pointer to MC portal's I/O object
2554 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2555 + * @token: Token of DPDMAI object
2556 + *
2557 + * Return: '0' on Success; Error code otherwise.
2558 + */
2559 +int dpdmai_disable(struct fsl_mc_io *mc_io,
2560 + uint32_t cmd_flags,
2561 + uint16_t token);
2562 +
2563 +/**
2564 + * dpdmai_is_enabled() - Check if the DPDMAI is enabled.
2565 + * @mc_io: Pointer to MC portal's I/O object
2566 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2567 + * @token: Token of DPDMAI object
2568 + * @en: Returns '1' if object is enabled; '0' otherwise
2569 + *
2570 + * Return: '0' on Success; Error code otherwise.
2571 + */
2572 +int dpdmai_is_enabled(struct fsl_mc_io *mc_io,
2573 + uint32_t cmd_flags,
2574 + uint16_t token,
2575 + int *en);
2576 +
2577 +/**
2578 + * dpdmai_reset() - Reset the DPDMAI, returns the object to initial state.
2579 + * @mc_io: Pointer to MC portal's I/O object
2580 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2581 + * @token: Token of DPDMAI object
2582 + *
2583 + * Return: '0' on Success; Error code otherwise.
2584 + */
2585 +int dpdmai_reset(struct fsl_mc_io *mc_io,
2586 + uint32_t cmd_flags,
2587 + uint16_t token);
2588 +
2589 +/**
2590 + * struct dpdmai_irq_cfg - IRQ configuration
2591 + * @addr: Address that must be written to signal a message-based interrupt
2592 + * @val: Value to write into irq_addr address
2593 + * @irq_num: A user defined number associated with this IRQ
2594 + */
2595 +struct dpdmai_irq_cfg {
2596 + uint64_t addr;
2597 + uint32_t val;
2598 + int irq_num;
2599 +};
2600 +
2601 +/**
2602 + * dpdmai_set_irq() - Set IRQ information for the DPDMAI to trigger an interrupt.
2603 + * @mc_io: Pointer to MC portal's I/O object
2604 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2605 + * @token: Token of DPDMAI object
2606 + * @irq_index: Identifies the interrupt index to configure
2607 + * @irq_cfg: IRQ configuration
2608 + *
2609 + * Return: '0' on Success; Error code otherwise.
2610 + */
2611 +int dpdmai_set_irq(struct fsl_mc_io *mc_io,
2612 + uint32_t cmd_flags,
2613 + uint16_t token,
2614 + uint8_t irq_index,
2615 + struct dpdmai_irq_cfg *irq_cfg);
2616 +
2617 +/**
2618 + * dpdmai_get_irq() - Get IRQ information from the DPDMAI
2619 + *
2620 + * @mc_io: Pointer to MC portal's I/O object
2621 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2622 + * @token: Token of DPDMAI object
2623 + * @irq_index: The interrupt index to configure
2624 + * @type: Interrupt type: 0 represents message interrupt
2625 + * type (both irq_addr and irq_val are valid)
2626 + * @irq_cfg: IRQ attributes
2627 + *
2628 + * Return: '0' on Success; Error code otherwise.
2629 + */
2630 +int dpdmai_get_irq(struct fsl_mc_io *mc_io,
2631 + uint32_t cmd_flags,
2632 + uint16_t token,
2633 + uint8_t irq_index,
2634 + int *type,
2635 + struct dpdmai_irq_cfg *irq_cfg);
2636 +
2637 +/**
2638 + * dpdmai_set_irq_enable() - Set overall interrupt state.
2639 + * @mc_io: Pointer to MC portal's I/O object
2640 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2641 + * @token: Token of DPDMAI object
2642 + * @irq_index: The interrupt index to configure
2643 + * @en: Interrupt state - enable = 1, disable = 0
2644 + *
2645 + * Allows GPP software to control when interrupts are generated.
2646 + * Each interrupt can have up to 32 causes. The enable/disable control's the
2647 + * overall interrupt state. if the interrupt is disabled no causes will cause
2648 + * an interrupt
2649 + *
2650 + * Return: '0' on Success; Error code otherwise.
2651 + */
2652 +int dpdmai_set_irq_enable(struct fsl_mc_io *mc_io,
2653 + uint32_t cmd_flags,
2654 + uint16_t token,
2655 + uint8_t irq_index,
2656 + uint8_t en);
2657 +
2658 +/**
2659 + * dpdmai_get_irq_enable() - Get overall interrupt state
2660 + * @mc_io: Pointer to MC portal's I/O object
2661 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2662 + * @token: Token of DPDMAI object
2663 + * @irq_index: The interrupt index to configure
2664 + * @en: Returned Interrupt state - enable = 1, disable = 0
2665 + *
2666 + * Return: '0' on Success; Error code otherwise.
2667 + */
2668 +int dpdmai_get_irq_enable(struct fsl_mc_io *mc_io,
2669 + uint32_t cmd_flags,
2670 + uint16_t token,
2671 + uint8_t irq_index,
2672 + uint8_t *en);
2673 +
2674 +/**
2675 + * dpdmai_set_irq_mask() - Set interrupt mask.
2676 + * @mc_io: Pointer to MC portal's I/O object
2677 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2678 + * @token: Token of DPDMAI object
2679 + * @irq_index: The interrupt index to configure
2680 + * @mask: event mask to trigger interrupt;
2681 + * each bit:
2682 + * 0 = ignore event
2683 + * 1 = consider event for asserting IRQ
2684 + *
2685 + * Every interrupt can have up to 32 causes and the interrupt model supports
2686 + * masking/unmasking each cause independently
2687 + *
2688 + * Return: '0' on Success; Error code otherwise.
2689 + */
2690 +int dpdmai_set_irq_mask(struct fsl_mc_io *mc_io,
2691 + uint32_t cmd_flags,
2692 + uint16_t token,
2693 + uint8_t irq_index,
2694 + uint32_t mask);
2695 +
2696 +/**
2697 + * dpdmai_get_irq_mask() - Get interrupt mask.
2698 + * @mc_io: Pointer to MC portal's I/O object
2699 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2700 + * @token: Token of DPDMAI object
2701 + * @irq_index: The interrupt index to configure
2702 + * @mask: Returned event mask to trigger interrupt
2703 + *
2704 + * Every interrupt can have up to 32 causes and the interrupt model supports
2705 + * masking/unmasking each cause independently
2706 + *
2707 + * Return: '0' on Success; Error code otherwise.
2708 + */
2709 +int dpdmai_get_irq_mask(struct fsl_mc_io *mc_io,
2710 + uint32_t cmd_flags,
2711 + uint16_t token,
2712 + uint8_t irq_index,
2713 + uint32_t *mask);
2714 +
2715 +/**
2716 + * dpdmai_get_irq_status() - Get the current status of any pending interrupts
2717 + * @mc_io: Pointer to MC portal's I/O object
2718 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2719 + * @token: Token of DPDMAI object
2720 + * @irq_index: The interrupt index to configure
2721 + * @status: Returned interrupts status - one bit per cause:
2722 + * 0 = no interrupt pending
2723 + * 1 = interrupt pending
2724 + *
2725 + * Return: '0' on Success; Error code otherwise.
2726 + */
2727 +int dpdmai_get_irq_status(struct fsl_mc_io *mc_io,
2728 + uint32_t cmd_flags,
2729 + uint16_t token,
2730 + uint8_t irq_index,
2731 + uint32_t *status);
2732 +
2733 +/**
2734 + * dpdmai_clear_irq_status() - Clear a pending interrupt's status
2735 + * @mc_io: Pointer to MC portal's I/O object
2736 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2737 + * @token: Token of DPDMAI object
2738 + * @irq_index: The interrupt index to configure
2739 + * @status: bits to clear (W1C) - one bit per cause:
2740 + * 0 = don't change
2741 + * 1 = clear status bit
2742 + *
2743 + * Return: '0' on Success; Error code otherwise.
2744 + */
2745 +int dpdmai_clear_irq_status(struct fsl_mc_io *mc_io,
2746 + uint32_t cmd_flags,
2747 + uint16_t token,
2748 + uint8_t irq_index,
2749 + uint32_t status);
2750 +
2751 +/**
2752 + * struct dpdmai_attr - Structure representing DPDMAI attributes
2753 + * @id: DPDMAI object ID
2754 + * @version: DPDMAI version
2755 + * @num_of_priorities: number of priorities
2756 + */
2757 +struct dpdmai_attr {
2758 + int id;
2759 + /**
2760 + * struct version - DPDMAI version
2761 + * @major: DPDMAI major version
2762 + * @minor: DPDMAI minor version
2763 + */
2764 + struct {
2765 + uint16_t major;
2766 + uint16_t minor;
2767 + } version;
2768 + uint8_t num_of_priorities;
2769 +};
2770 +
2771 +/**
2772 + * dpdmai_get_attributes() - Retrieve DPDMAI attributes.
2773 + * @mc_io: Pointer to MC portal's I/O object
2774 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2775 + * @token: Token of DPDMAI object
2776 + * @attr: Returned object's attributes
2777 + *
2778 + * Return: '0' on Success; Error code otherwise.
2779 + */
2780 +int dpdmai_get_attributes(struct fsl_mc_io *mc_io,
2781 + uint32_t cmd_flags,
2782 + uint16_t token,
2783 + struct dpdmai_attr *attr);
2784 +
2785 +/**
2786 + * enum dpdmai_dest - DPDMAI destination types
2787 + * @DPDMAI_DEST_NONE: Unassigned destination; The queue is set in parked mode
2788 + * and does not generate FQDAN notifications; user is expected to dequeue
2789 + * from the queue based on polling or other user-defined method
2790 + * @DPDMAI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
2791 + * notifications to the specified DPIO; user is expected to dequeue
2792 + * from the queue only after notification is received
2793 + * @DPDMAI_DEST_DPCON: The queue is set in schedule mode and does not generate
2794 + * FQDAN notifications, but is connected to the specified DPCON object;
2795 + * user is expected to dequeue from the DPCON channel
2796 + */
2797 +enum dpdmai_dest {
2798 + DPDMAI_DEST_NONE = 0,
2799 + DPDMAI_DEST_DPIO = 1,
2800 + DPDMAI_DEST_DPCON = 2
2801 +};
2802 +
2803 +/**
2804 + * struct dpdmai_dest_cfg - Structure representing DPDMAI destination parameters
2805 + * @dest_type: Destination type
2806 + * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
2807 + * @priority: Priority selection within the DPIO or DPCON channel; valid values
2808 + * are 0-1 or 0-7, depending on the number of priorities in that
2809 + * channel; not relevant for 'DPDMAI_DEST_NONE' option
2810 + */
2811 +struct dpdmai_dest_cfg {
2812 + enum dpdmai_dest dest_type;
2813 + int dest_id;
2814 + uint8_t priority;
2815 +};
2816 +
2817 +/* DPDMAI queue modification options */
2818 +
2819 +/**
2820 + * Select to modify the user's context associated with the queue
2821 + */
2822 +#define DPDMAI_QUEUE_OPT_USER_CTX 0x00000001
2823 +
2824 +/**
2825 + * Select to modify the queue's destination
2826 + */
2827 +#define DPDMAI_QUEUE_OPT_DEST 0x00000002
2828 +
2829 +/**
2830 + * struct dpdmai_rx_queue_cfg - DPDMAI RX queue configuration
2831 + * @options: Flags representing the suggested modifications to the queue;
2832 + * Use any combination of 'DPDMAI_QUEUE_OPT_<X>' flags
2833 + * @user_ctx: User context value provided in the frame descriptor of each
2834 + * dequeued frame;
2835 + * valid only if 'DPDMAI_QUEUE_OPT_USER_CTX' is contained in 'options'
2836 + * @dest_cfg: Queue destination parameters;
2837 + * valid only if 'DPDMAI_QUEUE_OPT_DEST' is contained in 'options'
2838 + */
2839 +struct dpdmai_rx_queue_cfg {
2840 + uint32_t options;
2841 + uint64_t user_ctx;
2842 + struct dpdmai_dest_cfg dest_cfg;
2843 +
2844 +};
2845 +
2846 +/**
2847 + * dpdmai_set_rx_queue() - Set Rx queue configuration
2848 + * @mc_io: Pointer to MC portal's I/O object
2849 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2850 + * @token: Token of DPDMAI object
2851 + * @priority: Select the queue relative to number of
2852 + * priorities configured at DPDMAI creation; use
2853 + * DPDMAI_ALL_QUEUES to configure all Rx queues
2854 + * identically.
2855 + * @cfg: Rx queue configuration
2856 + *
2857 + * Return: '0' on Success; Error code otherwise.
2858 + */
2859 +int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io,
2860 + uint32_t cmd_flags,
2861 + uint16_t token,
2862 + uint8_t priority,
2863 + const struct dpdmai_rx_queue_cfg *cfg);
2864 +
2865 +/**
2866 + * struct dpdmai_rx_queue_attr - Structure representing attributes of Rx queues
2867 + * @user_ctx: User context value provided in the frame descriptor of each
2868 + * dequeued frame
2869 + * @dest_cfg: Queue destination configuration
2870 + * @fqid: Virtual FQID value to be used for dequeue operations
2871 + */
2872 +struct dpdmai_rx_queue_attr {
2873 + uint64_t user_ctx;
2874 + struct dpdmai_dest_cfg dest_cfg;
2875 + uint32_t fqid;
2876 +};
2877 +
2878 +/**
2879 + * dpdmai_get_rx_queue() - Retrieve Rx queue attributes.
2880 + * @mc_io: Pointer to MC portal's I/O object
2881 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2882 + * @token: Token of DPDMAI object
2883 + * @priority: Select the queue relative to number of
2884 + * priorities configured at DPDMAI creation
2885 + * @attr: Returned Rx queue attributes
2886 + *
2887 + * Return: '0' on Success; Error code otherwise.
2888 + */
2889 +int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io,
2890 + uint32_t cmd_flags,
2891 + uint16_t token,
2892 + uint8_t priority,
2893 + struct dpdmai_rx_queue_attr *attr);
2894 +
2895 +/**
2896 + * struct dpdmai_tx_queue_attr - Structure representing attributes of Tx queues
2897 + * @fqid: Virtual FQID to be used for sending frames to DMA hardware
2898 + */
2899 +
2900 +struct dpdmai_tx_queue_attr {
2901 + uint32_t fqid;
2902 +};
2903 +
2904 +/**
2905 + * dpdmai_get_tx_queue() - Retrieve Tx queue attributes.
2906 + * @mc_io: Pointer to MC portal's I/O object
2907 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2908 + * @token: Token of DPDMAI object
2909 + * @priority: Select the queue relative to number of
2910 + * priorities configured at DPDMAI creation
2911 + * @attr: Returned Tx queue attributes
2912 + *
2913 + * Return: '0' on Success; Error code otherwise.
2914 + */
2915 +int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io,
2916 + uint32_t cmd_flags,
2917 + uint16_t token,
2918 + uint8_t priority,
2919 + struct dpdmai_tx_queue_attr *attr);
2920 +
2921 +#endif /* __FSL_DPDMAI_H */
2922 --- /dev/null
2923 +++ b/drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h
2924 @@ -0,0 +1,222 @@
2925 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
2926 + *
2927 + * Redistribution and use in source and binary forms, with or without
2928 + * modification, are permitted provided that the following conditions are met:
2929 + * * Redistributions of source code must retain the above copyright
2930 + * notice, this list of conditions and the following disclaimer.
2931 + * * Redistributions in binary form must reproduce the above copyright
2932 + * notice, this list of conditions and the following disclaimer in the
2933 + * documentation and/or other materials provided with the distribution.
2934 + * * Neither the name of the above-listed copyright holders nor the
2935 + * names of any contributors may be used to endorse or promote products
2936 + * derived from this software without specific prior written permission.
2937 + *
2938 + *
2939 + * ALTERNATIVELY, this software may be distributed under the terms of the
2940 + * GNU General Public License ("GPL") as published by the Free Software
2941 + * Foundation, either version 2 of that License or (at your option) any
2942 + * later version.
2943 + *
2944 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
2945 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2946 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2947 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
2948 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2949 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
2950 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
2951 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
2952 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
2953 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
2954 + * POSSIBILITY OF SUCH DAMAGE.
2955 + */
2956 +#ifndef _FSL_DPDMAI_CMD_H
2957 +#define _FSL_DPDMAI_CMD_H
2958 +
2959 +/* DPDMAI Version */
2960 +#define DPDMAI_VER_MAJOR 2
2961 +#define DPDMAI_VER_MINOR 2
2962 +
2963 +#define DPDMAI_CMD_BASE_VERSION 0
2964 +#define DPDMAI_CMD_ID_OFFSET 4
2965 +
2966 +/* Command IDs */
2967 +#define DPDMAI_CMDID_CLOSE ((0x800 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2968 +#define DPDMAI_CMDID_OPEN ((0x80E << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2969 +#define DPDMAI_CMDID_CREATE ((0x90E << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2970 +#define DPDMAI_CMDID_DESTROY ((0x900 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2971 +
2972 +#define DPDMAI_CMDID_ENABLE ((0x002 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2973 +#define DPDMAI_CMDID_DISABLE ((0x003 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2974 +#define DPDMAI_CMDID_GET_ATTR ((0x004 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2975 +#define DPDMAI_CMDID_RESET ((0x005 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2976 +#define DPDMAI_CMDID_IS_ENABLED ((0x006 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2977 +
2978 +#define DPDMAI_CMDID_SET_IRQ ((0x010 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2979 +#define DPDMAI_CMDID_GET_IRQ ((0x011 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2980 +#define DPDMAI_CMDID_SET_IRQ_ENABLE ((0x012 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2981 +#define DPDMAI_CMDID_GET_IRQ_ENABLE ((0x013 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2982 +#define DPDMAI_CMDID_SET_IRQ_MASK ((0x014 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2983 +#define DPDMAI_CMDID_GET_IRQ_MASK ((0x015 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2984 +#define DPDMAI_CMDID_GET_IRQ_STATUS ((0x016 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2985 +#define DPDMAI_CMDID_CLEAR_IRQ_STATUS ((0x017 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2986 +
2987 +#define DPDMAI_CMDID_SET_RX_QUEUE ((0x1A0 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2988 +#define DPDMAI_CMDID_GET_RX_QUEUE ((0x1A1 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2989 +#define DPDMAI_CMDID_GET_TX_QUEUE ((0x1A2 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2990 +
2991 +
2992 +#define MC_CMD_HDR_TOKEN_O 32 /* Token field offset */
2993 +#define MC_CMD_HDR_TOKEN_S 16 /* Token field size */
2994 +
2995 +
2996 +#define MAKE_UMASK64(_width) \
2997 + ((uint64_t)((_width) < 64 ? ((uint64_t)1 << (_width)) - 1 : \
2998 + (uint64_t)-1))
2999 +
3000 +static inline uint64_t mc_enc(int lsoffset, int width, uint64_t val)
3001 +{
3002 + return (uint64_t)(((uint64_t)val & MAKE_UMASK64(width)) << lsoffset);
3003 +}
3004 +
3005 +static inline uint64_t mc_dec(uint64_t val, int lsoffset, int width)
3006 +{
3007 + return (uint64_t)((val >> lsoffset) & MAKE_UMASK64(width));
3008 +}
3009 +
3010 +#define MC_CMD_OP(_cmd, _param, _offset, _width, _type, _arg) \
3011 + ((_cmd).params[_param] |= mc_enc((_offset), (_width), _arg))
3012 +
3013 +#define MC_RSP_OP(_cmd, _param, _offset, _width, _type, _arg) \
3014 + (_arg = (_type)mc_dec(_cmd.params[_param], (_offset), (_width)))
3015 +
3016 +#define MC_CMD_HDR_READ_TOKEN(_hdr) \
3017 + ((uint16_t)mc_dec((_hdr), MC_CMD_HDR_TOKEN_O, MC_CMD_HDR_TOKEN_S))
3018 +
3019 +/* cmd, param, offset, width, type, arg_name */
3020 +#define DPDMAI_CMD_OPEN(cmd, dpdmai_id) \
3021 + MC_CMD_OP(cmd, 0, 0, 32, int, dpdmai_id)
3022 +
3023 +/* cmd, param, offset, width, type, arg_name */
3024 +#define DPDMAI_CMD_CREATE(cmd, cfg) \
3025 +do { \
3026 + MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->priorities[0]);\
3027 + MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->priorities[1]);\
3028 +} while (0)
3029 +
3030 +/* cmd, param, offset, width, type, arg_name */
3031 +#define DPDMAI_RSP_IS_ENABLED(cmd, en) \
3032 + MC_RSP_OP(cmd, 0, 0, 1, int, en)
3033 +
3034 +/* cmd, param, offset, width, type, arg_name */
3035 +#define DPDMAI_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
3036 +do { \
3037 + MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
3038 + MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
3039 + MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
3040 + MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
3041 +} while (0)
3042 +
3043 +/* cmd, param, offset, width, type, arg_name */
3044 +#define DPDMAI_CMD_GET_IRQ(cmd, irq_index) \
3045 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
3046 +
3047 +/* cmd, param, offset, width, type, arg_name */
3048 +#define DPDMAI_RSP_GET_IRQ(cmd, type, irq_cfg) \
3049 +do { \
3050 + MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
3051 + MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
3052 + MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
3053 + MC_RSP_OP(cmd, 2, 32, 32, int, type); \
3054 +} while (0)
3055 +
3056 +/* cmd, param, offset, width, type, arg_name */
3057 +#define DPDMAI_CMD_SET_IRQ_ENABLE(cmd, irq_index, enable_state) \
3058 +do { \
3059 + MC_CMD_OP(cmd, 0, 0, 8, uint8_t, enable_state); \
3060 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
3061 +} while (0)
3062 +
3063 +/* cmd, param, offset, width, type, arg_name */
3064 +#define DPDMAI_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
3065 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
3066 +
3067 +/* cmd, param, offset, width, type, arg_name */
3068 +#define DPDMAI_RSP_GET_IRQ_ENABLE(cmd, enable_state) \
3069 + MC_RSP_OP(cmd, 0, 0, 8, uint8_t, enable_state)
3070 +
3071 +/* cmd, param, offset, width, type, arg_name */
3072 +#define DPDMAI_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
3073 +do { \
3074 + MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \
3075 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
3076 +} while (0)
3077 +
3078 +/* cmd, param, offset, width, type, arg_name */
3079 +#define DPDMAI_CMD_GET_IRQ_MASK(cmd, irq_index) \
3080 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
3081 +
3082 +/* cmd, param, offset, width, type, arg_name */
3083 +#define DPDMAI_RSP_GET_IRQ_MASK(cmd, mask) \
3084 + MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
3085 +
3086 +/* cmd, param, offset, width, type, arg_name */
3087 +#define DPDMAI_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
3088 +do { \
3089 + MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
3090 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
3091 +} while (0)
3092 +
3093 +/* cmd, param, offset, width, type, arg_name */
3094 +#define DPDMAI_RSP_GET_IRQ_STATUS(cmd, status) \
3095 + MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
3096 +
3097 +/* cmd, param, offset, width, type, arg_name */
3098 +#define DPDMAI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
3099 +do { \
3100 + MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
3101 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
3102 +} while (0)
3103 +
3104 +/* cmd, param, offset, width, type, arg_name */
3105 +#define DPDMAI_RSP_GET_ATTR(cmd, attr) \
3106 +do { \
3107 + MC_RSP_OP(cmd, 0, 0, 32, int, attr->id); \
3108 + MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->num_of_priorities); \
3109 + MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\
3110 + MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\
3111 +} while (0)
3112 +
3113 +/* cmd, param, offset, width, type, arg_name */
3114 +#define DPDMAI_CMD_SET_RX_QUEUE(cmd, priority, cfg) \
3115 +do { \
3116 + MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \
3117 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority); \
3118 + MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority); \
3119 + MC_CMD_OP(cmd, 0, 48, 4, enum dpdmai_dest, cfg->dest_cfg.dest_type); \
3120 + MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \
3121 + MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\
3122 +} while (0)
3123 +
3124 +/* cmd, param, offset, width, type, arg_name */
3125 +#define DPDMAI_CMD_GET_RX_QUEUE(cmd, priority) \
3126 + MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority)
3127 +
3128 +/* cmd, param, offset, width, type, arg_name */
3129 +#define DPDMAI_RSP_GET_RX_QUEUE(cmd, attr) \
3130 +do { \
3131 + MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id);\
3132 + MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\
3133 + MC_RSP_OP(cmd, 0, 48, 4, enum dpdmai_dest, attr->dest_cfg.dest_type);\
3134 + MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->user_ctx);\
3135 + MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->fqid);\
3136 +} while (0)
3137 +
3138 +/* cmd, param, offset, width, type, arg_name */
3139 +#define DPDMAI_CMD_GET_TX_QUEUE(cmd, priority) \
3140 + MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority)
3141 +
3142 +/* cmd, param, offset, width, type, arg_name */
3143 +#define DPDMAI_RSP_GET_TX_QUEUE(cmd, attr) \
3144 + MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->fqid)
3145 +
3146 +#endif /* _FSL_DPDMAI_CMD_H */
3147 --- /dev/null
3148 +++ b/drivers/dma/fsl-qdma.c
3149 @@ -0,0 +1,1201 @@
3150 +/*
3151 + * drivers/dma/fsl-qdma.c
3152 + *
3153 + * Copyright 2014-2015 Freescale Semiconductor, Inc.
3154 + *
3155 + * Driver for the Freescale qDMA engine with software command queue mode.
3156 + * Channel virtualization is supported through enqueuing of DMA jobs to,
3157 + * or dequeuing DMA jobs from, different work queues.
3158 + * This module can be found on Freescale LS SoCs.
3159 + *
3160 + * This program is free software; you can redistribute it and/or modify it
3161 + * under the terms of the GNU General Public License as published by the
3162 + * Free Software Foundation; either version 2 of the License, or (at your
3163 + * option) any later version.
3164 + */
3165 +
3166 +#include <asm/cacheflush.h>
3167 +#include <linux/clk.h>
3168 +#include <linux/delay.h>
3169 +#include <linux/dma-mapping.h>
3170 +#include <linux/dmapool.h>
3171 +#include <linux/init.h>
3172 +#include <linux/interrupt.h>
3173 +#include <linux/module.h>
3174 +#include <linux/of.h>
3175 +#include <linux/of_address.h>
3176 +#include <linux/of_device.h>
3177 +#include <linux/of_dma.h>
3178 +#include <linux/of_irq.h>
3179 +#include <linux/slab.h>
3180 +#include <linux/spinlock.h>
3181 +
3182 +#include "virt-dma.h"
3183 +
3184 +#define FSL_QDMA_DMR 0x0
3185 +#define FSL_QDMA_DSR 0x4
3186 +#define FSL_QDMA_DEIER 0xe00
3187 +#define FSL_QDMA_DEDR 0xe04
3188 +#define FSL_QDMA_DECFDW0R 0xe10
3189 +#define FSL_QDMA_DECFDW1R 0xe14
3190 +#define FSL_QDMA_DECFDW2R 0xe18
3191 +#define FSL_QDMA_DECFDW3R 0xe1c
3192 +#define FSL_QDMA_DECFQIDR 0xe30
3193 +#define FSL_QDMA_DECBR 0xe34
3194 +
3195 +#define FSL_QDMA_BCQMR(x) (0xc0 + 0x100 * (x))
3196 +#define FSL_QDMA_BCQSR(x) (0xc4 + 0x100 * (x))
3197 +#define FSL_QDMA_BCQEDPA_SADDR(x) (0xc8 + 0x100 * (x))
3198 +#define FSL_QDMA_BCQDPA_SADDR(x) (0xcc + 0x100 * (x))
3199 +#define FSL_QDMA_BCQEEPA_SADDR(x) (0xd0 + 0x100 * (x))
3200 +#define FSL_QDMA_BCQEPA_SADDR(x) (0xd4 + 0x100 * (x))
3201 +#define FSL_QDMA_BCQIER(x) (0xe0 + 0x100 * (x))
3202 +#define FSL_QDMA_BCQIDR(x) (0xe4 + 0x100 * (x))
3203 +
3204 +#define FSL_QDMA_SQDPAR 0x80c
3205 +#define FSL_QDMA_SQEPAR 0x814
3206 +#define FSL_QDMA_BSQMR 0x800
3207 +#define FSL_QDMA_BSQSR 0x804
3208 +#define FSL_QDMA_BSQICR 0x828
3209 +#define FSL_QDMA_CQMR 0xa00
3210 +#define FSL_QDMA_CQDSCR1 0xa08
3211 +#define FSL_QDMA_CQDSCR2 0xa0c
3212 +#define FSL_QDMA_CQIER 0xa10
3213 +#define FSL_QDMA_CQEDR 0xa14
3214 +#define FSL_QDMA_SQCCMR 0xa20
3215 +
3216 +#define FSL_QDMA_SQICR_ICEN
3217 +
3218 +#define FSL_QDMA_CQIDR_CQT 0xff000000
3219 +#define FSL_QDMA_CQIDR_SQPE 0x800000
3220 +#define FSL_QDMA_CQIDR_SQT 0x8000
3221 +
3222 +#define FSL_QDMA_BCQIER_CQTIE 0x8000
3223 +#define FSL_QDMA_BCQIER_CQPEIE 0x800000
3224 +#define FSL_QDMA_BSQICR_ICEN 0x80000000
3225 +#define FSL_QDMA_BSQICR_ICST(x) ((x) << 16)
3226 +#define FSL_QDMA_CQIER_MEIE 0x80000000
3227 +#define FSL_QDMA_CQIER_TEIE 0x1
3228 +#define FSL_QDMA_SQCCMR_ENTER_WM 0x200000
3229 +
3230 +#define FSL_QDMA_QUEUE_MAX 8
3231 +
3232 +#define FSL_QDMA_BCQMR_EN 0x80000000
3233 +#define FSL_QDMA_BCQMR_EI 0x40000000
3234 +#define FSL_QDMA_BCQMR_CD_THLD(x) ((x) << 20)
3235 +#define FSL_QDMA_BCQMR_CQ_SIZE(x) ((x) << 16)
3236 +
3237 +#define FSL_QDMA_BCQSR_QF 0x10000
3238 +#define FSL_QDMA_BCQSR_XOFF 0x1
3239 +
3240 +#define FSL_QDMA_BSQMR_EN 0x80000000
3241 +#define FSL_QDMA_BSQMR_DI 0x40000000
3242 +#define FSL_QDMA_BSQMR_CQ_SIZE(x) ((x) << 16)
3243 +
3244 +#define FSL_QDMA_BSQSR_QE 0x20000
3245 +
3246 +#define FSL_QDMA_DMR_DQD 0x40000000
3247 +#define FSL_QDMA_DSR_DB 0x80000000
3248 +
3249 +#define FSL_QDMA_BASE_BUFFER_SIZE 96
3250 +#define FSL_QDMA_EXPECT_SG_ENTRY_NUM 16
3251 +#define FSL_QDMA_CIRCULAR_DESC_SIZE_MIN 64
3252 +#define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX 16384
3253 +#define FSL_QDMA_QUEUE_NUM_MAX 8
3254 +
3255 +#define FSL_QDMA_CMD_RWTTYPE 0x4
3256 +#define FSL_QDMA_CMD_LWC 0x2
3257 +
3258 +#define FSL_QDMA_CMD_RWTTYPE_OFFSET 28
3259 +#define FSL_QDMA_CMD_NS_OFFSET 27
3260 +#define FSL_QDMA_CMD_DQOS_OFFSET 24
3261 +#define FSL_QDMA_CMD_WTHROTL_OFFSET 20
3262 +#define FSL_QDMA_CMD_DSEN_OFFSET 19
3263 +#define FSL_QDMA_CMD_LWC_OFFSET 16
3264 +
3265 +#define FSL_QDMA_E_SG_TABLE 1
3266 +#define FSL_QDMA_E_DATA_BUFFER 0
3267 +#define FSL_QDMA_F_LAST_ENTRY 1
3268 +
3269 +u64 pre_addr, pre_queue;
3270 +
3271 +struct fsl_qdma_ccdf {
3272 + u8 status;
3273 + u32 rev1:22;
3274 + u32 ser:1;
3275 + u32 rev2:1;
3276 + u32 rev3:20;
3277 + u32 offset:9;
3278 + u32 format:3;
3279 + union {
3280 + struct {
3281 + u32 addr_lo; /* low 32-bits of 40-bit address */
3282 + u32 addr_hi:8; /* high 8-bits of 40-bit address */
3283 + u32 rev4:16;
3284 + u32 queue:3;
3285 + u32 rev5:3;
3286 + u32 dd:2; /* dynamic debug */
3287 + };
3288 + struct {
3289 + u64 addr:40;
3290 + /* More efficient address accessor */
3291 + u64 __notaddress:24;
3292 + };
3293 + };
3294 +} __packed;
3295 +
3296 +struct fsl_qdma_csgf {
3297 + u32 offset:13;
3298 + u32 rev1:19;
3299 + u32 length:30;
3300 + u32 f:1;
3301 + u32 e:1;
3302 + union {
3303 + struct {
3304 + u32 addr_lo; /* low 32-bits of 40-bit address */
3305 + u32 addr_hi:8; /* high 8-bits of 40-bit address */
3306 + u32 rev2:24;
3307 + };
3308 + struct {
3309 + u64 addr:40;
3310 + /* More efficient address accessor */
3311 + u64 __notaddress:24;
3312 + };
3313 + };
3314 +} __packed;
3315 +
3316 +struct fsl_qdma_sdf {
3317 + u32 rev3:32;
3318 + u32 ssd:12; /* souce stride distance */
3319 + u32 sss:12; /* souce stride size */
3320 + u32 rev4:8;
3321 + u32 rev5:32;
3322 + u32 cmd;
3323 +} __packed;
3324 +
3325 +struct fsl_qdma_ddf {
3326 + u32 rev1:32;
3327 + u32 dsd:12; /* Destination stride distance */
3328 + u32 dss:12; /* Destination stride size */
3329 + u32 rev2:8;
3330 + u32 rev3:32;
3331 + u32 cmd;
3332 +} __packed;
3333 +
3334 +struct fsl_qdma_chan {
3335 + struct virt_dma_chan vchan;
3336 + struct virt_dma_desc vdesc;
3337 + enum dma_status status;
3338 + u32 slave_id;
3339 + struct fsl_qdma_engine *qdma;
3340 + struct fsl_qdma_queue *queue;
3341 + struct list_head qcomp;
3342 +};
3343 +
3344 +struct fsl_qdma_queue {
3345 + struct fsl_qdma_ccdf *virt_head;
3346 + struct fsl_qdma_ccdf *virt_tail;
3347 + struct list_head comp_used;
3348 + struct list_head comp_free;
3349 + struct dma_pool *comp_pool;
3350 + struct dma_pool *sg_pool;
3351 + spinlock_t queue_lock;
3352 + dma_addr_t bus_addr;
3353 + u32 n_cq;
3354 + u32 id;
3355 + struct fsl_qdma_ccdf *cq;
3356 +};
3357 +
3358 +struct fsl_qdma_sg {
3359 + dma_addr_t bus_addr;
3360 + void *virt_addr;
3361 +};
3362 +
3363 +struct fsl_qdma_comp {
3364 + dma_addr_t bus_addr;
3365 + void *virt_addr;
3366 + struct fsl_qdma_chan *qchan;
3367 + struct fsl_qdma_sg *sg_block;
3368 + struct virt_dma_desc vdesc;
3369 + struct list_head list;
3370 + u32 sg_block_src;
3371 + u32 sg_block_dst;
3372 +};
3373 +
3374 +struct fsl_qdma_engine {
3375 + struct dma_device dma_dev;
3376 + void __iomem *ctrl_base;
3377 + void __iomem *status_base;
3378 + void __iomem *block_base;
3379 + u32 n_chans;
3380 + u32 n_queues;
3381 + struct mutex fsl_qdma_mutex;
3382 + int error_irq;
3383 + int queue_irq;
3384 + bool big_endian;
3385 + struct fsl_qdma_queue *queue;
3386 + struct fsl_qdma_queue *status;
3387 + struct fsl_qdma_chan chans[];
3388 +
3389 +};
3390 +
3391 +static u32 qdma_readl(struct fsl_qdma_engine *qdma, void __iomem *addr)
3392 +{
3393 + if (qdma->big_endian)
3394 + return ioread32be(addr);
3395 + else
3396 + return ioread32(addr);
3397 +}
3398 +
3399 +static void qdma_writel(struct fsl_qdma_engine *qdma, u32 val,
3400 + void __iomem *addr)
3401 +{
3402 + if (qdma->big_endian)
3403 + iowrite32be(val, addr);
3404 + else
3405 + iowrite32(val, addr);
3406 +}
3407 +
3408 +static struct fsl_qdma_chan *to_fsl_qdma_chan(struct dma_chan *chan)
3409 +{
3410 + return container_of(chan, struct fsl_qdma_chan, vchan.chan);
3411 +}
3412 +
3413 +static struct fsl_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
3414 +{
3415 + return container_of(vd, struct fsl_qdma_comp, vdesc);
3416 +}
3417 +
3418 +static int fsl_qdma_alloc_chan_resources(struct dma_chan *chan)
3419 +{
3420 + /*
3421 + * In QDMA mode, We don't need to do anything.
3422 + */
3423 + return 0;
3424 +}
3425 +
3426 +static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
3427 +{
3428 + struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
3429 + unsigned long flags;
3430 + LIST_HEAD(head);
3431 +
3432 + spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
3433 + vchan_get_all_descriptors(&fsl_chan->vchan, &head);
3434 + spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
3435 +
3436 + vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
3437 +}
3438 +
3439 +static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
3440 + dma_addr_t dst, dma_addr_t src, u32 len)
3441 +{
3442 + struct fsl_qdma_ccdf *ccdf;
3443 + struct fsl_qdma_csgf *csgf_desc, *csgf_src, *csgf_dest;
3444 + struct fsl_qdma_sdf *sdf;
3445 + struct fsl_qdma_ddf *ddf;
3446 +
3447 + ccdf = (struct fsl_qdma_ccdf *)fsl_comp->virt_addr;
3448 + csgf_desc = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 1;
3449 + csgf_src = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 2;
3450 + csgf_dest = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 3;
3451 + sdf = (struct fsl_qdma_sdf *)fsl_comp->virt_addr + 4;
3452 + ddf = (struct fsl_qdma_ddf *)fsl_comp->virt_addr + 5;
3453 +
3454 + memset(fsl_comp->virt_addr, 0, FSL_QDMA_BASE_BUFFER_SIZE);
3455 + /* Head Command Descriptor(Frame Descriptor) */
3456 + ccdf->addr = fsl_comp->bus_addr + 16;
3457 + ccdf->format = 1; /* Compound S/G format */
3458 + /* Status notification is enqueued to status queue. */
3459 + ccdf->ser = 1;
3460 + /* Compound Command Descriptor(Frame List Table) */
3461 + csgf_desc->addr = fsl_comp->bus_addr + 64;
3462 + /* It must be 32 as Compound S/G Descriptor */
3463 + csgf_desc->length = 32;
3464 + csgf_src->addr = src;
3465 + csgf_src->length = len;
3466 + csgf_dest->addr = dst;
3467 + csgf_dest->length = len;
3468 + /* This entry is the last entry. */
3469 + csgf_dest->f = FSL_QDMA_F_LAST_ENTRY;
3470 + /* Descriptor Buffer */
3471 + sdf->cmd = FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET;
3472 + ddf->cmd = FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET;
3473 + ddf->cmd |= FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET;
3474 +}
3475 +
3476 +static void fsl_qdma_comp_fill_sg(
3477 + struct fsl_qdma_comp *fsl_comp,
3478 + struct scatterlist *dst_sg, unsigned int dst_nents,
3479 + struct scatterlist *src_sg, unsigned int src_nents)
3480 +{
3481 + struct fsl_qdma_ccdf *ccdf;
3482 + struct fsl_qdma_csgf *csgf_desc, *csgf_src, *csgf_dest, *csgf_sg;
3483 + struct fsl_qdma_sdf *sdf;
3484 + struct fsl_qdma_ddf *ddf;
3485 + struct fsl_qdma_sg *sg_block, *temp;
3486 + struct scatterlist *sg;
3487 + u64 total_src_len = 0;
3488 + u64 total_dst_len = 0;
3489 + u32 i;
3490 +
3491 + ccdf = (struct fsl_qdma_ccdf *)fsl_comp->virt_addr;
3492 + csgf_desc = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 1;
3493 + csgf_src = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 2;
3494 + csgf_dest = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 3;
3495 + sdf = (struct fsl_qdma_sdf *)fsl_comp->virt_addr + 4;
3496 + ddf = (struct fsl_qdma_ddf *)fsl_comp->virt_addr + 5;
3497 +
3498 + memset(fsl_comp->virt_addr, 0, FSL_QDMA_BASE_BUFFER_SIZE);
3499 + /* Head Command Descriptor(Frame Descriptor) */
3500 + ccdf->addr = fsl_comp->bus_addr + 16;
3501 + ccdf->format = 1; /* Compound S/G format */
3502 + /* Status notification is enqueued to status queue. */
3503 + ccdf->ser = 1;
3504 +
3505 + /* Compound Command Descriptor(Frame List Table) */
3506 + csgf_desc->addr = fsl_comp->bus_addr + 64;
3507 + /* It must be 32 as Compound S/G Descriptor */
3508 + csgf_desc->length = 32;
3509 +
3510 + sg_block = fsl_comp->sg_block;
3511 + csgf_src->addr = sg_block->bus_addr;
3512 + /* This entry link to the s/g entry. */
3513 + csgf_src->e = FSL_QDMA_E_SG_TABLE;
3514 +
3515 + temp = sg_block + fsl_comp->sg_block_src;
3516 + csgf_dest->addr = temp->bus_addr;
3517 + /* This entry is the last entry. */
3518 + csgf_dest->f = FSL_QDMA_F_LAST_ENTRY;
3519 + /* This entry link to the s/g entry. */
3520 + csgf_dest->e = FSL_QDMA_E_SG_TABLE;
3521 +
3522 + for_each_sg(src_sg, sg, src_nents, i) {
3523 + temp = sg_block + i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1);
3524 + csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr +
3525 + i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1);
3526 + csgf_sg->addr = sg_dma_address(sg);
3527 + csgf_sg->length = sg_dma_len(sg);
3528 + total_src_len += sg_dma_len(sg);
3529 +
3530 + if (i == src_nents - 1)
3531 + csgf_sg->f = FSL_QDMA_F_LAST_ENTRY;
3532 + if (i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) ==
3533 + FSL_QDMA_EXPECT_SG_ENTRY_NUM - 2) {
3534 + csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr +
3535 + FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1;
3536 + temp = sg_block +
3537 + i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;
3538 + csgf_sg->addr = temp->bus_addr;
3539 + csgf_sg->e = FSL_QDMA_E_SG_TABLE;
3540 + }
3541 + }
3542 +
3543 + sg_block += fsl_comp->sg_block_src;
3544 + for_each_sg(dst_sg, sg, dst_nents, i) {
3545 + temp = sg_block + i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1);
3546 + csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr +
3547 + i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1);
3548 + csgf_sg->addr = sg_dma_address(sg);
3549 + csgf_sg->length = sg_dma_len(sg);
3550 + total_dst_len += sg_dma_len(sg);
3551 +
3552 + if (i == dst_nents - 1)
3553 + csgf_sg->f = FSL_QDMA_F_LAST_ENTRY;
3554 + if (i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) ==
3555 + FSL_QDMA_EXPECT_SG_ENTRY_NUM - 2) {
3556 + csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr +
3557 + FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1;
3558 + temp = sg_block +
3559 + i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;
3560 + csgf_sg->addr = temp->bus_addr;
3561 + csgf_sg->e = FSL_QDMA_E_SG_TABLE;
3562 + }
3563 + }
3564 +
3565 + if (total_src_len != total_dst_len)
3566 + dev_err(&fsl_comp->qchan->vchan.chan.dev->device,
3567 + "The data length for src and dst isn't match.\n");
3568 +
3569 + csgf_src->length = total_src_len;
3570 + csgf_dest->length = total_dst_len;
3571 +
3572 + /* Descriptor Buffer */
3573 + sdf->cmd = FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET;
3574 + ddf->cmd = FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET;
3575 +}
3576 +
3577 +/*
3578 + * Prei-request full command descriptor for enqueue.
3579 + */
3580 +static int fsl_qdma_pre_request_enqueue_desc(struct fsl_qdma_queue *queue)
3581 +{
3582 + struct fsl_qdma_comp *comp_temp;
3583 + int i;
3584 +
3585 + for (i = 0; i < queue->n_cq; i++) {
3586 + comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
3587 + if (!comp_temp)
3588 + return -1;
3589 + comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool,
3590 + GFP_NOWAIT,
3591 + &comp_temp->bus_addr);
3592 + if (!comp_temp->virt_addr)
3593 + return -1;
3594 + list_add_tail(&comp_temp->list, &queue->comp_free);
3595 + }
3596 + return 0;
3597 +}
3598 +
3599 +/*
3600 + * Request a command descriptor for enqueue.
3601 + */
3602 +static struct fsl_qdma_comp *fsl_qdma_request_enqueue_desc(
3603 + struct fsl_qdma_chan *fsl_chan,
3604 + unsigned int dst_nents,
3605 + unsigned int src_nents)
3606 +{
3607 + struct fsl_qdma_comp *comp_temp;
3608 + struct fsl_qdma_sg *sg_block;
3609 + struct fsl_qdma_queue *queue = fsl_chan->queue;
3610 + unsigned long flags;
3611 + unsigned int dst_sg_entry_block, src_sg_entry_block, sg_entry_total, i;
3612 +
3613 + spin_lock_irqsave(&queue->queue_lock, flags);
3614 + if (list_empty(&queue->comp_free)) {
3615 + spin_unlock_irqrestore(&queue->queue_lock, flags);
3616 + comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
3617 + if (!comp_temp)
3618 + return NULL;
3619 + comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool,
3620 + GFP_NOWAIT,
3621 + &comp_temp->bus_addr);
3622 + if (!comp_temp->virt_addr)
3623 + return NULL;
3624 + } else {
3625 + comp_temp = list_first_entry(&queue->comp_free,
3626 + struct fsl_qdma_comp,
3627 + list);
3628 + list_del(&comp_temp->list);
3629 + spin_unlock_irqrestore(&queue->queue_lock, flags);
3630 + }
3631 +
3632 + if (dst_nents != 0)
3633 + dst_sg_entry_block = dst_nents /
3634 + (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;
3635 + else
3636 + dst_sg_entry_block = 0;
3637 +
3638 + if (src_nents != 0)
3639 + src_sg_entry_block = src_nents /
3640 + (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;
3641 + else
3642 + src_sg_entry_block = 0;
3643 +
3644 + sg_entry_total = dst_sg_entry_block + src_sg_entry_block;
3645 + if (sg_entry_total) {
3646 + sg_block = kzalloc(sizeof(*sg_block) *
3647 + sg_entry_total,
3648 + GFP_KERNEL);
3649 + if (!sg_block)
3650 + return NULL;
3651 + comp_temp->sg_block = sg_block;
3652 + for (i = 0; i < sg_entry_total; i++) {
3653 + sg_block->virt_addr = dma_pool_alloc(queue->sg_pool,
3654 + GFP_NOWAIT,
3655 + &sg_block->bus_addr);
3656 + memset(sg_block->virt_addr, 0,
3657 + FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16);
3658 + sg_block++;
3659 + }
3660 + }
3661 +
3662 + comp_temp->sg_block_src = src_sg_entry_block;
3663 + comp_temp->sg_block_dst = dst_sg_entry_block;
3664 + comp_temp->qchan = fsl_chan;
3665 +
3666 + return comp_temp;
3667 +}
3668 +
3669 +static struct fsl_qdma_queue *fsl_qdma_alloc_queue_resources(
3670 + struct platform_device *pdev,
3671 + unsigned int queue_num)
3672 +{
3673 + struct device_node *np = pdev->dev.of_node;
3674 + struct fsl_qdma_queue *queue_head, *queue_temp;
3675 + int ret, len, i;
3676 + unsigned int queue_size[FSL_QDMA_QUEUE_MAX];
3677 +
3678 + if (queue_num > FSL_QDMA_QUEUE_MAX)
3679 + queue_num = FSL_QDMA_QUEUE_MAX;
3680 + len = sizeof(*queue_head) * queue_num;
3681 + queue_head = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
3682 + if (!queue_head)
3683 + return NULL;
3684 +
3685 + ret = of_property_read_u32_array(np, "queue-sizes", queue_size,
3686 + queue_num);
3687 + if (ret) {
3688 + dev_err(&pdev->dev, "Can't get queue-sizes.\n");
3689 + return NULL;
3690 + }
3691 +
3692 + for (i = 0; i < queue_num; i++) {
3693 + if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX
3694 + || queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
3695 + dev_err(&pdev->dev, "Get wrong queue-sizes.\n");
3696 + return NULL;
3697 + }
3698 + queue_temp = queue_head + i;
3699 + queue_temp->cq = dma_alloc_coherent(&pdev->dev,
3700 + sizeof(struct fsl_qdma_ccdf) *
3701 + queue_size[i],
3702 + &queue_temp->bus_addr,
3703 + GFP_KERNEL);
3704 + if (!queue_temp->cq)
3705 + return NULL;
3706 + queue_temp->n_cq = queue_size[i];
3707 + queue_temp->id = i;
3708 + queue_temp->virt_head = queue_temp->cq;
3709 + queue_temp->virt_tail = queue_temp->cq;
3710 + /*
3711 + * The dma pool for queue command buffer
3712 + */
3713 + queue_temp->comp_pool = dma_pool_create("comp_pool",
3714 + &pdev->dev,
3715 + FSL_QDMA_BASE_BUFFER_SIZE,
3716 + 16, 0);
3717 + if (!queue_temp->comp_pool) {
3718 + dma_free_coherent(&pdev->dev,
3719 + sizeof(struct fsl_qdma_ccdf) *
3720 + queue_size[i],
3721 + queue_temp->cq,
3722 + queue_temp->bus_addr);
3723 + return NULL;
3724 + }
3725 + /*
3726 + * The dma pool for queue command buffer
3727 + */
3728 + queue_temp->sg_pool = dma_pool_create("sg_pool",
3729 + &pdev->dev,
3730 + FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16,
3731 + 64, 0);
3732 + if (!queue_temp->sg_pool) {
3733 + dma_free_coherent(&pdev->dev,
3734 + sizeof(struct fsl_qdma_ccdf) *
3735 + queue_size[i],
3736 + queue_temp->cq,
3737 + queue_temp->bus_addr);
3738 + dma_pool_destroy(queue_temp->comp_pool);
3739 + return NULL;
3740 + }
3741 + /*
3742 + * List for queue command buffer
3743 + */
3744 + INIT_LIST_HEAD(&queue_temp->comp_used);
3745 + INIT_LIST_HEAD(&queue_temp->comp_free);
3746 + spin_lock_init(&queue_temp->queue_lock);
3747 + }
3748 +
3749 + return queue_head;
3750 +}
3751 +
3752 +static struct fsl_qdma_queue *fsl_qdma_prep_status_queue(
3753 + struct platform_device *pdev)
3754 +{
3755 + struct device_node *np = pdev->dev.of_node;
3756 + struct fsl_qdma_queue *status_head;
3757 + unsigned int status_size;
3758 + int ret;
3759 +
3760 + ret = of_property_read_u32(np, "status-sizes", &status_size);
3761 + if (ret) {
3762 + dev_err(&pdev->dev, "Can't get status-sizes.\n");
3763 + return NULL;
3764 + }
3765 + if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX
3766 + || status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
3767 + dev_err(&pdev->dev, "Get wrong status_size.\n");
3768 + return NULL;
3769 + }
3770 + status_head = devm_kzalloc(&pdev->dev, sizeof(*status_head),
3771 + GFP_KERNEL);
3772 + if (!status_head)
3773 + return NULL;
3774 +
3775 + /*
3776 + * Buffer for queue command
3777 + */
3778 + status_head->cq = dma_alloc_coherent(&pdev->dev,
3779 + sizeof(struct fsl_qdma_ccdf) *
3780 + status_size,
3781 + &status_head->bus_addr,
3782 + GFP_KERNEL);
3783 + if (!status_head->cq)
3784 + return NULL;
3785 + status_head->n_cq = status_size;
3786 + status_head->virt_head = status_head->cq;
3787 + status_head->virt_tail = status_head->cq;
3788 + status_head->comp_pool = NULL;
3789 +
3790 + return status_head;
3791 +}
3792 +
3793 +static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
3794 +{
3795 + void __iomem *ctrl = fsl_qdma->ctrl_base;
3796 + void __iomem *block = fsl_qdma->block_base;
3797 + int i, count = 5;
3798 + u32 reg;
3799 +
3800 + /* Disable the command queue and wait for idle state. */
3801 + reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
3802 + reg |= FSL_QDMA_DMR_DQD;
3803 + qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
3804 + for (i = 0; i < FSL_QDMA_QUEUE_NUM_MAX; i++)
3805 + qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQMR(i));
3806 +
3807 + while (1) {
3808 + reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DSR);
3809 + if (!(reg & FSL_QDMA_DSR_DB))
3810 + break;
3811 + if (count-- < 0)
3812 + return -EBUSY;
3813 + udelay(100);
3814 + }
3815 +
3816 + /* Disable status queue. */
3817 + qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BSQMR);
3818 +
3819 + /*
3820 + * Clear the command queue interrupt detect register for all queues.
3821 + */
3822 + qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
3823 +
3824 + return 0;
3825 +}
3826 +
3827 +static int fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma)
3828 +{
3829 + struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
3830 + struct fsl_qdma_queue *fsl_status = fsl_qdma->status;
3831 + struct fsl_qdma_queue *temp_queue;
3832 + struct fsl_qdma_comp *fsl_comp;
3833 + struct fsl_qdma_ccdf *status_addr;
3834 + struct fsl_qdma_csgf *csgf_src;
3835 + void __iomem *block = fsl_qdma->block_base;
3836 + u32 reg, i;
3837 + bool duplicate, duplicate_handle;
3838 +
3839 + while (1) {
3840 + duplicate = 0;
3841 + duplicate_handle = 0;
3842 + reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQSR);
3843 + if (reg & FSL_QDMA_BSQSR_QE)
3844 + return 0;
3845 + status_addr = fsl_status->virt_head;
3846 + if (status_addr->queue == pre_queue &&
3847 + status_addr->addr == pre_addr)
3848 + duplicate = 1;
3849 +
3850 + i = status_addr->queue;
3851 + pre_queue = status_addr->queue;
3852 + pre_addr = status_addr->addr;
3853 + temp_queue = fsl_queue + i;
3854 + spin_lock(&temp_queue->queue_lock);
3855 + if (list_empty(&temp_queue->comp_used)) {
3856 + if (duplicate)
3857 + duplicate_handle = 1;
3858 + else {
3859 + spin_unlock(&temp_queue->queue_lock);
3860 + return -1;
3861 + }
3862 + } else {
3863 + fsl_comp = list_first_entry(&temp_queue->comp_used,
3864 + struct fsl_qdma_comp,
3865 + list);
3866 + csgf_src = (struct fsl_qdma_csgf *)fsl_comp->virt_addr
3867 + + 2;
3868 + if (fsl_comp->bus_addr + 16 !=
3869 + (dma_addr_t)status_addr->addr) {
3870 + if (duplicate)
3871 + duplicate_handle = 1;
3872 + else {
3873 + spin_unlock(&temp_queue->queue_lock);
3874 + return -1;
3875 + }
3876 + }
3877 + }
3878 +
3879 + if (duplicate_handle) {
3880 + reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
3881 + reg |= FSL_QDMA_BSQMR_DI;
3882 + status_addr->addr = 0x0;
3883 + fsl_status->virt_head++;
3884 + if (fsl_status->virt_head == fsl_status->cq
3885 + + fsl_status->n_cq)
3886 + fsl_status->virt_head = fsl_status->cq;
3887 + qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
3888 + spin_unlock(&temp_queue->queue_lock);
3889 + continue;
3890 + }
3891 + list_del(&fsl_comp->list);
3892 +
3893 + reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
3894 + reg |= FSL_QDMA_BSQMR_DI;
3895 + status_addr->addr = 0x0;
3896 + fsl_status->virt_head++;
3897 + if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq)
3898 + fsl_status->virt_head = fsl_status->cq;
3899 + qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
3900 + spin_unlock(&temp_queue->queue_lock);
3901 +
3902 + spin_lock(&fsl_comp->qchan->vchan.lock);
3903 + vchan_cookie_complete(&fsl_comp->vdesc);
3904 + fsl_comp->qchan->status = DMA_COMPLETE;
3905 + spin_unlock(&fsl_comp->qchan->vchan.lock);
3906 + }
3907 + return 0;
3908 +}
3909 +
3910 +static irqreturn_t fsl_qdma_error_handler(int irq, void *dev_id)
3911 +{
3912 + struct fsl_qdma_engine *fsl_qdma = dev_id;
3913 + unsigned int intr;
3914 + void __iomem *status = fsl_qdma->status_base;
3915 +
3916 + intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR);
3917 +
3918 + if (intr)
3919 + dev_err(fsl_qdma->dma_dev.dev, "DMA transaction error!\n");
3920 +
3921 + qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEDR);
3922 + return IRQ_HANDLED;
3923 +}
3924 +
3925 +static irqreturn_t fsl_qdma_queue_handler(int irq, void *dev_id)
3926 +{
3927 + struct fsl_qdma_engine *fsl_qdma = dev_id;
3928 + unsigned int intr, reg;
3929 + void __iomem *block = fsl_qdma->block_base;
3930 + void __iomem *ctrl = fsl_qdma->ctrl_base;
3931 +
3932 + intr = qdma_readl(fsl_qdma, block + FSL_QDMA_BCQIDR(0));
3933 +
3934 + if ((intr & FSL_QDMA_CQIDR_SQT) != 0)
3935 + intr = fsl_qdma_queue_transfer_complete(fsl_qdma);
3936 +
3937 + if (intr != 0) {
3938 + reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
3939 + reg |= FSL_QDMA_DMR_DQD;
3940 + qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
3941 + qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQIER(0));
3942 + dev_err(fsl_qdma->dma_dev.dev, "QDMA: status err!\n");
3943 + }
3944 +
3945 + qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
3946 +
3947 + return IRQ_HANDLED;
3948 +}
3949 +
3950 +static int
3951 +fsl_qdma_irq_init(struct platform_device *pdev,
3952 + struct fsl_qdma_engine *fsl_qdma)
3953 +{
3954 + int ret;
3955 +
3956 + fsl_qdma->error_irq = platform_get_irq_byname(pdev,
3957 + "qdma-error");
3958 + if (fsl_qdma->error_irq < 0) {
3959 + dev_err(&pdev->dev, "Can't get qdma controller irq.\n");
3960 + return fsl_qdma->error_irq;
3961 + }
3962 +
3963 + fsl_qdma->queue_irq = platform_get_irq_byname(pdev, "qdma-queue");
3964 + if (fsl_qdma->queue_irq < 0) {
3965 + dev_err(&pdev->dev, "Can't get qdma queue irq.\n");
3966 + return fsl_qdma->queue_irq;
3967 + }
3968 +
3969 + ret = devm_request_irq(&pdev->dev, fsl_qdma->error_irq,
3970 + fsl_qdma_error_handler, 0, "qDMA error", fsl_qdma);
3971 + if (ret) {
3972 + dev_err(&pdev->dev, "Can't register qDMA controller IRQ.\n");
3973 + return ret;
3974 + }
3975 + ret = devm_request_irq(&pdev->dev, fsl_qdma->queue_irq,
3976 + fsl_qdma_queue_handler, 0, "qDMA queue", fsl_qdma);
3977 + if (ret) {
3978 + dev_err(&pdev->dev, "Can't register qDMA queue IRQ.\n");
3979 + return ret;
3980 + }
3981 +
3982 + return 0;
3983 +}
3984 +
3985 +static int fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
3986 +{
3987 + struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
3988 + struct fsl_qdma_queue *temp;
3989 + void __iomem *ctrl = fsl_qdma->ctrl_base;
3990 + void __iomem *status = fsl_qdma->status_base;
3991 + void __iomem *block = fsl_qdma->block_base;
3992 + int i, ret;
3993 + u32 reg;
3994 +
3995 + /* Try to halt the qDMA engine first. */
3996 + ret = fsl_qdma_halt(fsl_qdma);
3997 + if (ret) {
3998 + dev_err(fsl_qdma->dma_dev.dev, "DMA halt failed!");
3999 + return ret;
4000 + }
4001 +
4002 + /*
4003 + * Clear the command queue interrupt detect register for all queues.
4004 + */
4005 + qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
4006 +
4007 + for (i = 0; i < fsl_qdma->n_queues; i++) {
4008 + temp = fsl_queue + i;
4009 + /*
4010 + * Initialize Command Queue registers to point to the first
4011 + * command descriptor in memory.
4012 + * Dequeue Pointer Address Registers
4013 + * Enqueue Pointer Address Registers
4014 + */
4015 + qdma_writel(fsl_qdma, temp->bus_addr,
4016 + block + FSL_QDMA_BCQDPA_SADDR(i));
4017 + qdma_writel(fsl_qdma, temp->bus_addr,
4018 + block + FSL_QDMA_BCQEPA_SADDR(i));
4019 +
4020 + /* Initialize the queue mode. */
4021 + reg = FSL_QDMA_BCQMR_EN;
4022 + reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq)-4);
4023 + reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq)-6);
4024 + qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BCQMR(i));
4025 + }
4026 +
4027 + /*
4028 + * Workaround for erratum: ERR010812.
4029 + * We must enable XOFF to avoid the enqueue rejection occurs.
4030 + * Setting SQCCMR ENTER_WM to 0x20.
4031 + */
4032 + qdma_writel(fsl_qdma, FSL_QDMA_SQCCMR_ENTER_WM,
4033 + block + FSL_QDMA_SQCCMR);
4034 + /*
4035 + * Initialize status queue registers to point to the first
4036 + * command descriptor in memory.
4037 + * Dequeue Pointer Address Registers
4038 + * Enqueue Pointer Address Registers
4039 + */
4040 + qdma_writel(fsl_qdma, fsl_qdma->status->bus_addr,
4041 + block + FSL_QDMA_SQEPAR);
4042 + qdma_writel(fsl_qdma, fsl_qdma->status->bus_addr,
4043 + block + FSL_QDMA_SQDPAR);
4044 + /* Initialize status queue interrupt. */
4045 + qdma_writel(fsl_qdma, FSL_QDMA_BCQIER_CQTIE,
4046 + block + FSL_QDMA_BCQIER(0));
4047 + qdma_writel(fsl_qdma, FSL_QDMA_BSQICR_ICEN | FSL_QDMA_BSQICR_ICST(5)
4048 + | 0x8000,
4049 + block + FSL_QDMA_BSQICR);
4050 + qdma_writel(fsl_qdma, FSL_QDMA_CQIER_MEIE | FSL_QDMA_CQIER_TEIE,
4051 + block + FSL_QDMA_CQIER);
4052 + /* Initialize controller interrupt register. */
4053 + qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEDR);
4054 + qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEIER);
4055 +
4056 + /* Initialize the status queue mode. */
4057 + reg = FSL_QDMA_BSQMR_EN;
4058 + reg |= FSL_QDMA_BSQMR_CQ_SIZE(ilog2(fsl_qdma->status->n_cq)-6);
4059 + qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
4060 +
4061 + reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
4062 + reg &= ~FSL_QDMA_DMR_DQD;
4063 + qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
4064 +
4065 + return 0;
4066 +}
4067 +
4068 +static struct dma_async_tx_descriptor *fsl_qdma_prep_dma_sg(
4069 + struct dma_chan *chan,
4070 + struct scatterlist *dst_sg, unsigned int dst_nents,
4071 + struct scatterlist *src_sg, unsigned int src_nents,
4072 + unsigned long flags)
4073 +{
4074 + struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
4075 + struct fsl_qdma_comp *fsl_comp;
4076 +
4077 + fsl_comp = fsl_qdma_request_enqueue_desc(fsl_chan,
4078 + dst_nents,
4079 + src_nents);
4080 + fsl_qdma_comp_fill_sg(fsl_comp, dst_sg, dst_nents, src_sg, src_nents);
4081 +
4082 + return vchan_tx_prep(&fsl_chan->vchan, &fsl_comp->vdesc, flags);
4083 +}
4084 +
4085 +static struct dma_async_tx_descriptor *
4086 +fsl_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
4087 + dma_addr_t src, size_t len, unsigned long flags)
4088 +{
4089 + struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
4090 + struct fsl_qdma_comp *fsl_comp;
4091 +
4092 + fsl_comp = fsl_qdma_request_enqueue_desc(fsl_chan, 0, 0);
4093 + fsl_qdma_comp_fill_memcpy(fsl_comp, dst, src, len);
4094 +
4095 + return vchan_tx_prep(&fsl_chan->vchan, &fsl_comp->vdesc, flags);
4096 +}
4097 +
4098 +static void fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
4099 +{
4100 + void __iomem *block = fsl_chan->qdma->block_base;
4101 + struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
4102 + struct fsl_qdma_comp *fsl_comp;
4103 + struct virt_dma_desc *vdesc;
4104 + u32 reg;
4105 +
4106 + reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQSR(fsl_queue->id));
4107 + if (reg & (FSL_QDMA_BCQSR_QF | FSL_QDMA_BCQSR_XOFF))
4108 + return;
4109 + vdesc = vchan_next_desc(&fsl_chan->vchan);
4110 + if (!vdesc)
4111 + return;
4112 + list_del(&vdesc->node);
4113 + fsl_comp = to_fsl_qdma_comp(vdesc);
4114 +
4115 + memcpy(fsl_queue->virt_head++, fsl_comp->virt_addr, 16);
4116 + if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq)
4117 + fsl_queue->virt_head = fsl_queue->cq;
4118 +
4119 + list_add_tail(&fsl_comp->list, &fsl_queue->comp_used);
4120 + barrier();
4121 + reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQMR(fsl_queue->id));
4122 + reg |= FSL_QDMA_BCQMR_EI;
4123 + qdma_writel(fsl_chan->qdma, reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
4124 + fsl_chan->status = DMA_IN_PROGRESS;
4125 +}
4126 +
4127 +static enum dma_status fsl_qdma_tx_status(struct dma_chan *chan,
4128 + dma_cookie_t cookie, struct dma_tx_state *txstate)
4129 +{
4130 + return dma_cookie_status(chan, cookie, txstate);
4131 +}
4132 +
4133 +static void fsl_qdma_free_desc(struct virt_dma_desc *vdesc)
4134 +{
4135 + struct fsl_qdma_comp *fsl_comp;
4136 + struct fsl_qdma_queue *fsl_queue;
4137 + struct fsl_qdma_sg *sg_block;
4138 + unsigned long flags;
4139 + unsigned int i;
4140 +
4141 + fsl_comp = to_fsl_qdma_comp(vdesc);
4142 + fsl_queue = fsl_comp->qchan->queue;
4143 +
4144 + if (fsl_comp->sg_block) {
4145 + for (i = 0; i < fsl_comp->sg_block_src +
4146 + fsl_comp->sg_block_dst; i++) {
4147 + sg_block = fsl_comp->sg_block + i;
4148 + dma_pool_free(fsl_queue->sg_pool,
4149 + sg_block->virt_addr,
4150 + sg_block->bus_addr);
4151 + }
4152 + kfree(fsl_comp->sg_block);
4153 + }
4154 +
4155 + spin_lock_irqsave(&fsl_queue->queue_lock, flags);
4156 + list_add_tail(&fsl_comp->list, &fsl_queue->comp_free);
4157 + spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
4158 +}
4159 +
4160 +static void fsl_qdma_issue_pending(struct dma_chan *chan)
4161 +{
4162 + struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
4163 + struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
4164 + unsigned long flags;
4165 +
4166 + spin_lock_irqsave(&fsl_queue->queue_lock, flags);
4167 + spin_lock(&fsl_chan->vchan.lock);
4168 + if (vchan_issue_pending(&fsl_chan->vchan))
4169 + fsl_qdma_enqueue_desc(fsl_chan);
4170 + spin_unlock(&fsl_chan->vchan.lock);
4171 + spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
4172 +}
4173 +
4174 +static int fsl_qdma_probe(struct platform_device *pdev)
4175 +{
4176 + struct device_node *np = pdev->dev.of_node;
4177 + struct fsl_qdma_engine *fsl_qdma;
4178 + struct fsl_qdma_chan *fsl_chan;
4179 + struct resource *res;
4180 + unsigned int len, chans, queues;
4181 + int ret, i;
4182 +
4183 + ret = of_property_read_u32(np, "channels", &chans);
4184 + if (ret) {
4185 + dev_err(&pdev->dev, "Can't get channels.\n");
4186 + return ret;
4187 + }
4188 +
4189 + len = sizeof(*fsl_qdma) + sizeof(*fsl_chan) * chans;
4190 + fsl_qdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
4191 + if (!fsl_qdma)
4192 + return -ENOMEM;
4193 +
4194 + ret = of_property_read_u32(np, "queues", &queues);
4195 + if (ret) {
4196 + dev_err(&pdev->dev, "Can't get queues.\n");
4197 + return ret;
4198 + }
4199 +
4200 + fsl_qdma->queue = fsl_qdma_alloc_queue_resources(pdev, queues);
4201 + if (!fsl_qdma->queue)
4202 + return -ENOMEM;
4203 +
4204 + fsl_qdma->status = fsl_qdma_prep_status_queue(pdev);
4205 + if (!fsl_qdma->status)
4206 + return -ENOMEM;
4207 +
4208 + fsl_qdma->n_chans = chans;
4209 + fsl_qdma->n_queues = queues;
4210 + mutex_init(&fsl_qdma->fsl_qdma_mutex);
4211 +
4212 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4213 + fsl_qdma->ctrl_base = devm_ioremap_resource(&pdev->dev, res);
4214 + if (IS_ERR(fsl_qdma->ctrl_base))
4215 + return PTR_ERR(fsl_qdma->ctrl_base);
4216 +
4217 + res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
4218 + fsl_qdma->status_base = devm_ioremap_resource(&pdev->dev, res);
4219 + if (IS_ERR(fsl_qdma->status_base))
4220 + return PTR_ERR(fsl_qdma->status_base);
4221 +
4222 + res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
4223 + fsl_qdma->block_base = devm_ioremap_resource(&pdev->dev, res);
4224 + if (IS_ERR(fsl_qdma->block_base))
4225 + return PTR_ERR(fsl_qdma->block_base);
4226 +
4227 + ret = fsl_qdma_irq_init(pdev, fsl_qdma);
4228 + if (ret)
4229 + return ret;
4230 +
4231 + fsl_qdma->big_endian = of_property_read_bool(np, "big-endian");
4232 + INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels);
4233 + for (i = 0; i < fsl_qdma->n_chans; i++) {
4234 + struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
4235 +
4236 + fsl_chan->qdma = fsl_qdma;
4237 + fsl_chan->queue = fsl_qdma->queue + i % fsl_qdma->n_queues;
4238 + fsl_chan->vchan.desc_free = fsl_qdma_free_desc;
4239 + INIT_LIST_HEAD(&fsl_chan->qcomp);
4240 + vchan_init(&fsl_chan->vchan, &fsl_qdma->dma_dev);
4241 + }
4242 + for (i = 0; i < fsl_qdma->n_queues; i++)
4243 + fsl_qdma_pre_request_enqueue_desc(fsl_qdma->queue + i);
4244 +
4245 + dma_cap_set(DMA_MEMCPY, fsl_qdma->dma_dev.cap_mask);
4246 + dma_cap_set(DMA_SG, fsl_qdma->dma_dev.cap_mask);
4247 +
4248 + fsl_qdma->dma_dev.dev = &pdev->dev;
4249 + fsl_qdma->dma_dev.device_alloc_chan_resources
4250 + = fsl_qdma_alloc_chan_resources;
4251 + fsl_qdma->dma_dev.device_free_chan_resources
4252 + = fsl_qdma_free_chan_resources;
4253 + fsl_qdma->dma_dev.device_tx_status = fsl_qdma_tx_status;
4254 + fsl_qdma->dma_dev.device_prep_dma_memcpy = fsl_qdma_prep_memcpy;
4255 + fsl_qdma->dma_dev.device_prep_dma_sg = fsl_qdma_prep_dma_sg;
4256 + fsl_qdma->dma_dev.device_issue_pending = fsl_qdma_issue_pending;
4257 +
4258 + dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
4259 +
4260 + platform_set_drvdata(pdev, fsl_qdma);
4261 +
4262 + ret = dma_async_device_register(&fsl_qdma->dma_dev);
4263 + if (ret) {
4264 + dev_err(&pdev->dev, "Can't register Freescale qDMA engine.\n");
4265 + return ret;
4266 + }
4267 +
4268 + ret = fsl_qdma_reg_init(fsl_qdma);
4269 + if (ret) {
4270 + dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
4271 + return ret;
4272 + }
4273 +
4274 +
4275 + return 0;
4276 +}
4277 +
4278 +static int fsl_qdma_remove(struct platform_device *pdev)
4279 +{
4280 + struct device_node *np = pdev->dev.of_node;
4281 + struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev);
4282 + struct fsl_qdma_queue *queue_temp;
4283 + struct fsl_qdma_queue *status = fsl_qdma->status;
4284 + struct fsl_qdma_comp *comp_temp, *_comp_temp;
4285 + int i;
4286 +
4287 + of_dma_controller_free(np);
4288 + dma_async_device_unregister(&fsl_qdma->dma_dev);
4289 +
4290 + /* Free descriptor areas */
4291 + for (i = 0; i < fsl_qdma->n_queues; i++) {
4292 + queue_temp = fsl_qdma->queue + i;
4293 + list_for_each_entry_safe(comp_temp, _comp_temp,
4294 + &queue_temp->comp_used, list) {
4295 + dma_pool_free(queue_temp->comp_pool,
4296 + comp_temp->virt_addr,
4297 + comp_temp->bus_addr);
4298 + list_del(&comp_temp->list);
4299 + kfree(comp_temp);
4300 + }
4301 + list_for_each_entry_safe(comp_temp, _comp_temp,
4302 + &queue_temp->comp_free, list) {
4303 + dma_pool_free(queue_temp->comp_pool,
4304 + comp_temp->virt_addr,
4305 + comp_temp->bus_addr);
4306 + list_del(&comp_temp->list);
4307 + kfree(comp_temp);
4308 + }
4309 + dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_ccdf) *
4310 + queue_temp->n_cq, queue_temp->cq,
4311 + queue_temp->bus_addr);
4312 + dma_pool_destroy(queue_temp->comp_pool);
4313 + }
4314 +
4315 + dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_ccdf) *
4316 + status->n_cq, status->cq, status->bus_addr);
4317 + return 0;
4318 +}
4319 +
4320 +static const struct of_device_id fsl_qdma_dt_ids[] = {
4321 + { .compatible = "fsl,ls1021a-qdma", },
4322 + { /* sentinel */ }
4323 +};
4324 +MODULE_DEVICE_TABLE(of, fsl_qdma_dt_ids);
4325 +
4326 +static struct platform_driver fsl_qdma_driver = {
4327 + .driver = {
4328 + .name = "fsl-qdma",
4329 + .owner = THIS_MODULE,
4330 + .of_match_table = fsl_qdma_dt_ids,
4331 + },
4332 + .probe = fsl_qdma_probe,
4333 + .remove = fsl_qdma_remove,
4334 +};
4335 +
4336 +static int __init fsl_qdma_init(void)
4337 +{
4338 + return platform_driver_register(&fsl_qdma_driver);
4339 +}
4340 +subsys_initcall(fsl_qdma_init);
4341 +
4342 +static void __exit fsl_qdma_exit(void)
4343 +{
4344 + platform_driver_unregister(&fsl_qdma_driver);
4345 +}
4346 +module_exit(fsl_qdma_exit);
4347 +
4348 +MODULE_ALIAS("platform:fsl-qdma");
4349 +MODULE_DESCRIPTION("Freescale qDMA engine driver");
4350 +MODULE_LICENSE("GPL v2");