0df88bfca6743ecbec0b246febdb1af290f84509
[openwrt/staging/wigyori.git] / target / linux / layerscape / patches-4.9 / 805-dma-support-layerscape.patch
1 From 854c1f0e9574e9b25a55b439608c71e013b34a56 Mon Sep 17 00:00:00 2001
2 From: Yangbo Lu <yangbo.lu@nxp.com>
3 Date: Mon, 25 Sep 2017 12:12:20 +0800
4 Subject: [PATCH] dma: support layerscape
5
6 This is a integrated patch for layerscape dma support.
7
8 Signed-off-by: jiaheng.fan <jiaheng.fan@nxp.com>
9 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
10 ---
11 drivers/dma/Kconfig | 31 +
12 drivers/dma/Makefile | 3 +
13 drivers/dma/caam_dma.c | 563 +++++++++++++++
14 drivers/dma/dpaa2-qdma/Kconfig | 8 +
15 drivers/dma/dpaa2-qdma/Makefile | 8 +
16 drivers/dma/dpaa2-qdma/dpaa2-qdma.c | 986 +++++++++++++++++++++++++
17 drivers/dma/dpaa2-qdma/dpaa2-qdma.h | 262 +++++++
18 drivers/dma/dpaa2-qdma/dpdmai.c | 454 ++++++++++++
19 drivers/dma/dpaa2-qdma/fsl_dpdmai.h | 521 ++++++++++++++
20 drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h | 222 ++++++
21 drivers/dma/fsl-qdma.c | 1201 +++++++++++++++++++++++++++++++
22 11 files changed, 4259 insertions(+)
23 create mode 100644 drivers/dma/caam_dma.c
24 create mode 100644 drivers/dma/dpaa2-qdma/Kconfig
25 create mode 100644 drivers/dma/dpaa2-qdma/Makefile
26 create mode 100644 drivers/dma/dpaa2-qdma/dpaa2-qdma.c
27 create mode 100644 drivers/dma/dpaa2-qdma/dpaa2-qdma.h
28 create mode 100644 drivers/dma/dpaa2-qdma/dpdmai.c
29 create mode 100644 drivers/dma/dpaa2-qdma/fsl_dpdmai.h
30 create mode 100644 drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h
31 create mode 100644 drivers/dma/fsl-qdma.c
32
33 diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
34 index 141aefbe..8caaf091 100644
35 --- a/drivers/dma/Kconfig
36 +++ b/drivers/dma/Kconfig
37 @@ -192,6 +192,20 @@ config FSL_EDMA
38 multiplexing capability for DMA request sources(slot).
39 This module can be found on Freescale Vybrid and LS-1 SoCs.
40
41 +config FSL_QDMA
42 + tristate "Freescale qDMA engine support"
43 + select DMA_ENGINE
44 + select DMA_VIRTUAL_CHANNELS
45 + select DMA_ENGINE_RAID
46 + select ASYNC_TX_ENABLE_CHANNEL_SWITCH
47 + help
48 + Support the Freescale qDMA engine with command queue and legacy mode.
49 + Channel virtualization is supported through enqueuing of DMA jobs to,
50 + or dequeuing DMA jobs from, different work queues.
51 + This module can be found on Freescale LS SoCs.
52 +
53 +source drivers/dma/dpaa2-qdma/Kconfig
54 +
55 config FSL_RAID
56 tristate "Freescale RAID engine Support"
57 depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH
58 @@ -564,6 +578,23 @@ config ZX_DMA
59 help
60 Support the DMA engine for ZTE ZX296702 platform devices.
61
62 +config CRYPTO_DEV_FSL_CAAM_DMA
63 + tristate "CAAM DMA engine support"
64 + depends on CRYPTO_DEV_FSL_CAAM_JR
65 + default y
66 + select DMA_ENGINE
67 + select ASYNC_CORE
68 + select ASYNC_TX_ENABLE_CHANNEL_SWITCH
69 + help
70 + Selecting this will offload the DMA operations for users of
71 + the scatter gather memcopy API to the CAAM via job rings. The
72 + CAAM is a hardware module that provides hardware acceleration to
73 + cryptographic operations. It has a built-in DMA controller that can
74 + be programmed to read/write cryptographic data. This module defines
75 + a DMA driver that uses the DMA capabilities of the CAAM.
76 +
77 + To compile this as a module, choose M here: the module
78 + will be called caam_dma.
79
80 # driver files
81 source "drivers/dma/bestcomm/Kconfig"
82 diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
83 index e4dc9cac..a694da0e 100644
84 --- a/drivers/dma/Makefile
85 +++ b/drivers/dma/Makefile
86 @@ -29,6 +29,8 @@ obj-$(CONFIG_DW_DMAC_CORE) += dw/
87 obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
88 obj-$(CONFIG_FSL_DMA) += fsldma.o
89 obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
90 +obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o
91 +obj-$(CONFIG_FSL_DPAA2_QDMA) += dpaa2-qdma/
92 obj-$(CONFIG_FSL_RAID) += fsl_raid.o
93 obj-$(CONFIG_HSU_DMA) += hsu/
94 obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o
95 @@ -67,6 +69,7 @@ obj-$(CONFIG_TI_DMA_CROSSBAR) += ti-dma-crossbar.o
96 obj-$(CONFIG_TI_EDMA) += edma.o
97 obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
98 obj-$(CONFIG_ZX_DMA) += zx296702_dma.o
99 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_DMA) += caam_dma.o
100
101 obj-y += qcom/
102 obj-y += xilinx/
103 diff --git a/drivers/dma/caam_dma.c b/drivers/dma/caam_dma.c
104 new file mode 100644
105 index 00000000..e430b320
106 --- /dev/null
107 +++ b/drivers/dma/caam_dma.c
108 @@ -0,0 +1,563 @@
109 +/*
110 + * caam support for SG DMA
111 + *
112 + * Copyright 2016 Freescale Semiconductor, Inc
113 + * Copyright 2017 NXP
114 + */
115 +
116 +#include <linux/module.h>
117 +#include <linux/platform_device.h>
118 +#include <linux/dma-mapping.h>
119 +#include <linux/interrupt.h>
120 +#include <linux/slab.h>
121 +#include <linux/debugfs.h>
122 +
123 +#include <linux/dmaengine.h>
124 +#include "dmaengine.h"
125 +
126 +#include "../crypto/caam/regs.h"
127 +#include "../crypto/caam/jr.h"
128 +#include "../crypto/caam/error.h"
129 +#include "../crypto/caam/intern.h"
130 +#include "../crypto/caam/desc_constr.h"
131 +#include "../crypto/caam/sg_sw_sec4.h"
132 +
133 +#define DESC_DMA_MEMCPY_LEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / \
134 + CAAM_CMD_SZ)
135 +
136 +/* This is max chunk size of a DMA transfer. If a buffer is larger than this
137 + * value it is internally broken into chunks of max CAAM_DMA_CHUNK_SIZE bytes
138 + * and for each chunk a DMA transfer request is issued.
139 + * This value is the largest number on 16 bits that is a multiple of 256 bytes
140 + * (the largest configurable CAAM DMA burst size).
141 + */
142 +#define CAAM_DMA_CHUNK_SIZE 65280
143 +
144 +struct caam_dma_sh_desc {
145 + u32 desc[DESC_DMA_MEMCPY_LEN] ____cacheline_aligned;
146 + dma_addr_t desc_dma;
147 +};
148 +
149 +/* caam dma extended descriptor */
150 +struct caam_dma_edesc {
151 + struct dma_async_tx_descriptor async_tx;
152 + struct list_head node;
153 + struct caam_dma_ctx *ctx;
154 + dma_addr_t src_dma;
155 + dma_addr_t dst_dma;
156 + unsigned int src_len;
157 + unsigned int dst_len;
158 + struct sec4_sg_entry *sec4_sg;
159 + u32 jd[] ____cacheline_aligned;
160 +};
161 +
162 +/*
163 + * caam_dma_ctx - per jr/channel context
164 + * @chan: dma channel used by async_tx API
165 + * @node: list_head used to attach to the global dma_ctx_list
166 + * @jrdev: Job Ring device
167 + * @submit_q: queue of pending (submitted, but not enqueued) jobs
168 + * @done_not_acked: jobs that have been completed by jr, but maybe not acked
169 + * @edesc_lock: protects extended descriptor
170 + */
171 +struct caam_dma_ctx {
172 + struct dma_chan chan;
173 + struct list_head node;
174 + struct device *jrdev;
175 + struct list_head submit_q;
176 + struct list_head done_not_acked;
177 + spinlock_t edesc_lock;
178 +};
179 +
180 +static struct dma_device *dma_dev;
181 +static struct caam_dma_sh_desc *dma_sh_desc;
182 +static LIST_HEAD(dma_ctx_list);
183 +
184 +static dma_cookie_t caam_dma_tx_submit(struct dma_async_tx_descriptor *tx)
185 +{
186 + struct caam_dma_edesc *edesc = NULL;
187 + struct caam_dma_ctx *ctx = NULL;
188 + dma_cookie_t cookie;
189 +
190 + edesc = container_of(tx, struct caam_dma_edesc, async_tx);
191 + ctx = container_of(tx->chan, struct caam_dma_ctx, chan);
192 +
193 + spin_lock_bh(&ctx->edesc_lock);
194 +
195 + cookie = dma_cookie_assign(tx);
196 + list_add_tail(&edesc->node, &ctx->submit_q);
197 +
198 + spin_unlock_bh(&ctx->edesc_lock);
199 +
200 + return cookie;
201 +}
202 +
203 +static unsigned int caam_dma_sg_dma_len(struct scatterlist *sg,
204 + unsigned int nents)
205 +{
206 + unsigned int len;
207 +
208 + for (len = 0; sg && nents; sg = sg_next(sg), nents--)
209 + len += sg_dma_len(sg);
210 +
211 + return len;
212 +}
213 +
214 +static struct caam_dma_edesc *
215 +caam_dma_sg_edesc_alloc(struct dma_chan *chan,
216 + struct scatterlist *dst_sg, unsigned int dst_nents,
217 + struct scatterlist *src_sg, unsigned int src_nents,
218 + unsigned long flags)
219 +{
220 + struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
221 + chan);
222 + struct device *jrdev = ctx->jrdev;
223 + struct caam_dma_edesc *edesc;
224 + struct sec4_sg_entry *sec4_sg;
225 + dma_addr_t sec4_sg_dma_src;
226 + unsigned int sec4_sg_bytes;
227 +
228 + if (!dst_sg || !src_sg || !dst_nents || !src_nents)
229 + return NULL;
230 +
231 + sec4_sg_bytes = (src_nents + dst_nents) * sizeof(*sec4_sg);
232 +
233 + edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
234 + GFP_DMA | GFP_NOWAIT);
235 + if (!edesc)
236 + return ERR_PTR(-ENOMEM);
237 +
238 + edesc->src_len = caam_dma_sg_dma_len(src_sg, src_nents);
239 + edesc->dst_len = caam_dma_sg_dma_len(dst_sg, dst_nents);
240 + if (edesc->src_len != edesc->dst_len) {
241 + dev_err(jrdev, "%s: src(%u) and dst(%u) len mismatch.\n",
242 + __func__, edesc->src_len, edesc->dst_len);
243 + kfree(edesc);
244 + return ERR_PTR(-EINVAL);
245 + }
246 +
247 + dma_async_tx_descriptor_init(&edesc->async_tx, chan);
248 + edesc->async_tx.tx_submit = caam_dma_tx_submit;
249 + edesc->async_tx.flags = flags;
250 + edesc->async_tx.cookie = -EBUSY;
251 +
252 + /* Prepare SEC SGs */
253 + edesc->sec4_sg = (void *)edesc + offsetof(struct caam_dma_edesc, jd) +
254 + DESC_JOB_IO_LEN;
255 +
256 + sec4_sg = edesc->sec4_sg;
257 + sg_to_sec4_sg_last(src_sg, src_nents, sec4_sg, 0);
258 +
259 + sec4_sg += src_nents;
260 + sg_to_sec4_sg_last(dst_sg, dst_nents, sec4_sg, 0);
261 +
262 + sec4_sg_dma_src = dma_map_single(jrdev, edesc->sec4_sg, sec4_sg_bytes,
263 + DMA_TO_DEVICE);
264 + if (dma_mapping_error(jrdev, sec4_sg_dma_src)) {
265 + dev_err(jrdev, "error mapping segments to device\n");
266 + kfree(edesc);
267 + return ERR_PTR(-ENOMEM);
268 + }
269 +
270 + edesc->src_dma = sec4_sg_dma_src;
271 + edesc->dst_dma = sec4_sg_dma_src + src_nents * sizeof(*sec4_sg);
272 + edesc->ctx = ctx;
273 +
274 + return edesc;
275 +}
276 +
277 +static void caam_jr_chan_free_edesc(struct caam_dma_edesc *edesc)
278 +{
279 + struct caam_dma_ctx *ctx = edesc->ctx;
280 + struct caam_dma_edesc *_edesc = NULL;
281 +
282 + spin_lock_bh(&ctx->edesc_lock);
283 +
284 + list_add_tail(&edesc->node, &ctx->done_not_acked);
285 + list_for_each_entry_safe(edesc, _edesc, &ctx->done_not_acked, node) {
286 + if (async_tx_test_ack(&edesc->async_tx)) {
287 + list_del(&edesc->node);
288 + kfree(edesc);
289 + }
290 + }
291 +
292 + spin_unlock_bh(&ctx->edesc_lock);
293 +}
294 +
295 +static void caam_dma_done(struct device *dev, u32 *hwdesc, u32 err,
296 + void *context)
297 +{
298 + struct caam_dma_edesc *edesc = context;
299 + struct caam_dma_ctx *ctx = edesc->ctx;
300 + dma_async_tx_callback callback;
301 + void *callback_param;
302 +
303 + if (err)
304 + caam_jr_strstatus(ctx->jrdev, err);
305 +
306 + dma_run_dependencies(&edesc->async_tx);
307 +
308 + spin_lock_bh(&ctx->edesc_lock);
309 + dma_cookie_complete(&edesc->async_tx);
310 + spin_unlock_bh(&ctx->edesc_lock);
311 +
312 + callback = edesc->async_tx.callback;
313 + callback_param = edesc->async_tx.callback_param;
314 +
315 + dma_descriptor_unmap(&edesc->async_tx);
316 +
317 + caam_jr_chan_free_edesc(edesc);
318 +
319 + if (callback)
320 + callback(callback_param);
321 +}
322 +
323 +static void caam_dma_sg_init_job_desc(struct caam_dma_edesc *edesc)
324 +{
325 + u32 *jd = edesc->jd;
326 + u32 *sh_desc = dma_sh_desc->desc;
327 + dma_addr_t desc_dma = dma_sh_desc->desc_dma;
328 +
329 + /* init the job descriptor */
330 + init_job_desc_shared(jd, desc_dma, desc_len(sh_desc), HDR_REVERSE);
331 +
332 + /* set SEQIN PTR */
333 + append_seq_in_ptr(jd, edesc->src_dma, edesc->src_len, LDST_SGF);
334 +
335 + /* set SEQOUT PTR */
336 + append_seq_out_ptr(jd, edesc->dst_dma, edesc->dst_len, LDST_SGF);
337 +
338 +#ifdef DEBUG
339 + print_hex_dump(KERN_ERR, "caam dma desc@" __stringify(__LINE__) ": ",
340 + DUMP_PREFIX_ADDRESS, 16, 4, jd, desc_bytes(jd), 1);
341 +#endif
342 +}
343 +
344 +/* This function can be called from an interrupt context */
345 +static struct dma_async_tx_descriptor *
346 +caam_dma_prep_sg(struct dma_chan *chan, struct scatterlist *dst_sg,
347 + unsigned int dst_nents, struct scatterlist *src_sg,
348 + unsigned int src_nents, unsigned long flags)
349 +{
350 + struct caam_dma_edesc *edesc;
351 +
352 + /* allocate extended descriptor */
353 + edesc = caam_dma_sg_edesc_alloc(chan, dst_sg, dst_nents, src_sg,
354 + src_nents, flags);
355 + if (IS_ERR_OR_NULL(edesc))
356 + return ERR_CAST(edesc);
357 +
358 + /* Initialize job descriptor */
359 + caam_dma_sg_init_job_desc(edesc);
360 +
361 + return &edesc->async_tx;
362 +}
363 +
364 +static void caam_dma_memcpy_init_job_desc(struct caam_dma_edesc *edesc)
365 +{
366 + u32 *jd = edesc->jd;
367 + u32 *sh_desc = dma_sh_desc->desc;
368 + dma_addr_t desc_dma = dma_sh_desc->desc_dma;
369 +
370 + /* init the job descriptor */
371 + init_job_desc_shared(jd, desc_dma, desc_len(sh_desc), HDR_REVERSE);
372 +
373 + /* set SEQIN PTR */
374 + append_seq_in_ptr(jd, edesc->src_dma, edesc->src_len, 0);
375 +
376 + /* set SEQOUT PTR */
377 + append_seq_out_ptr(jd, edesc->dst_dma, edesc->dst_len, 0);
378 +
379 +#ifdef DEBUG
380 + print_hex_dump(KERN_ERR, "caam dma desc@" __stringify(__LINE__) ": ",
381 + DUMP_PREFIX_ADDRESS, 16, 4, jd, desc_bytes(jd), 1);
382 +#endif
383 +}
384 +
385 +static struct dma_async_tx_descriptor *
386 +caam_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
387 + size_t len, unsigned long flags)
388 +{
389 + struct caam_dma_edesc *edesc;
390 + struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
391 + chan);
392 +
393 + edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN, GFP_DMA | GFP_NOWAIT);
394 + if (!edesc)
395 + return ERR_PTR(-ENOMEM);
396 +
397 + dma_async_tx_descriptor_init(&edesc->async_tx, chan);
398 + edesc->async_tx.tx_submit = caam_dma_tx_submit;
399 + edesc->async_tx.flags = flags;
400 + edesc->async_tx.cookie = -EBUSY;
401 +
402 + edesc->src_dma = src;
403 + edesc->src_len = len;
404 + edesc->dst_dma = dst;
405 + edesc->dst_len = len;
406 + edesc->ctx = ctx;
407 +
408 + caam_dma_memcpy_init_job_desc(edesc);
409 +
410 + return &edesc->async_tx;
411 +}
412 +
413 +/* This function can be called in an interrupt context */
414 +static void caam_dma_issue_pending(struct dma_chan *chan)
415 +{
416 + struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
417 + chan);
418 + struct caam_dma_edesc *edesc, *_edesc;
419 +
420 + spin_lock_bh(&ctx->edesc_lock);
421 + list_for_each_entry_safe(edesc, _edesc, &ctx->submit_q, node) {
422 + if (caam_jr_enqueue(ctx->jrdev, edesc->jd,
423 + caam_dma_done, edesc) < 0)
424 + break;
425 + list_del(&edesc->node);
426 + }
427 + spin_unlock_bh(&ctx->edesc_lock);
428 +}
429 +
430 +static void caam_dma_free_chan_resources(struct dma_chan *chan)
431 +{
432 + struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
433 + chan);
434 + struct caam_dma_edesc *edesc, *_edesc;
435 +
436 + spin_lock_bh(&ctx->edesc_lock);
437 + list_for_each_entry_safe(edesc, _edesc, &ctx->submit_q, node) {
438 + list_del(&edesc->node);
439 + kfree(edesc);
440 + }
441 + list_for_each_entry_safe(edesc, _edesc, &ctx->done_not_acked, node) {
442 + list_del(&edesc->node);
443 + kfree(edesc);
444 + }
445 + spin_unlock_bh(&ctx->edesc_lock);
446 +}
447 +
448 +static int caam_dma_jr_chan_bind(void)
449 +{
450 + struct device *jrdev;
451 + struct caam_dma_ctx *ctx;
452 + int bonds = 0;
453 + int i;
454 +
455 + for (i = 0; i < caam_jr_driver_probed(); i++) {
456 + jrdev = caam_jridx_alloc(i);
457 + if (IS_ERR(jrdev)) {
458 + pr_err("job ring device %d allocation failed\n", i);
459 + continue;
460 + }
461 +
462 + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
463 + if (!ctx) {
464 + caam_jr_free(jrdev);
465 + continue;
466 + }
467 +
468 + ctx->chan.device = dma_dev;
469 + ctx->chan.private = ctx;
470 +
471 + ctx->jrdev = jrdev;
472 +
473 + INIT_LIST_HEAD(&ctx->submit_q);
474 + INIT_LIST_HEAD(&ctx->done_not_acked);
475 + INIT_LIST_HEAD(&ctx->node);
476 + spin_lock_init(&ctx->edesc_lock);
477 +
478 + dma_cookie_init(&ctx->chan);
479 +
480 + /* add the context of this channel to the context list */
481 + list_add_tail(&ctx->node, &dma_ctx_list);
482 +
483 + /* add this channel to the device chan list */
484 + list_add_tail(&ctx->chan.device_node, &dma_dev->channels);
485 +
486 + bonds++;
487 + }
488 +
489 + return bonds;
490 +}
491 +
492 +static inline void caam_jr_dma_free(struct dma_chan *chan)
493 +{
494 + struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
495 + chan);
496 +
497 + list_del(&ctx->node);
498 + list_del(&chan->device_node);
499 + caam_jr_free(ctx->jrdev);
500 + kfree(ctx);
501 +}
502 +
503 +static void set_caam_dma_desc(u32 *desc)
504 +{
505 + u32 *jmp_cmd;
506 +
507 + /* dma shared descriptor */
508 + init_sh_desc(desc, HDR_SHARE_NEVER | (1 << HDR_START_IDX_SHIFT));
509 +
510 + /* REG1 = CAAM_DMA_CHUNK_SIZE */
511 + append_math_add_imm_u32(desc, REG1, ZERO, IMM, CAAM_DMA_CHUNK_SIZE);
512 +
513 + /* REG0 = SEQINLEN - CAAM_DMA_CHUNK_SIZE */
514 + append_math_sub_imm_u32(desc, REG0, SEQINLEN, IMM, CAAM_DMA_CHUNK_SIZE);
515 +
516 + /* if (REG0 > 0)
517 + * jmp to LABEL1
518 + */
519 + jmp_cmd = append_jump(desc, JUMP_TEST_INVALL | JUMP_COND_MATH_N |
520 + JUMP_COND_MATH_Z);
521 +
522 + /* REG1 = SEQINLEN */
523 + append_math_sub(desc, REG1, SEQINLEN, ZERO, CAAM_CMD_SZ);
524 +
525 + /* LABEL1 */
526 + set_jump_tgt_here(desc, jmp_cmd);
527 +
528 + /* VARSEQINLEN = REG1 */
529 + append_math_add(desc, VARSEQINLEN, REG1, ZERO, CAAM_CMD_SZ);
530 +
531 + /* VARSEQOUTLEN = REG1 */
532 + append_math_add(desc, VARSEQOUTLEN, REG1, ZERO, CAAM_CMD_SZ);
533 +
534 + /* do FIFO STORE */
535 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_METADATA | LDST_VLF);
536 +
537 + /* do FIFO LOAD */
538 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
539 + FIFOLD_TYPE_IFIFO | LDST_VLF);
540 +
541 + /* if (REG0 > 0)
542 + * jmp 0xF8 (after shared desc header)
543 + */
544 + append_jump(desc, JUMP_TEST_INVALL | JUMP_COND_MATH_N |
545 + JUMP_COND_MATH_Z | 0xF8);
546 +
547 +#ifdef DEBUG
548 + print_hex_dump(KERN_ERR, "caam dma shdesc@" __stringify(__LINE__) ": ",
549 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
550 +#endif
551 +}
552 +
553 +static int __init caam_dma_probe(struct platform_device *pdev)
554 +{
555 + struct device *dev = &pdev->dev;
556 + struct device *ctrldev = dev->parent;
557 + struct dma_chan *chan, *_chan;
558 + u32 *sh_desc;
559 + int err = -ENOMEM;
560 + int bonds;
561 +
562 + if (!caam_jr_driver_probed()) {
563 + dev_info(dev, "Defer probing after JR driver probing\n");
564 + return -EPROBE_DEFER;
565 + }
566 +
567 + dma_dev = kzalloc(sizeof(*dma_dev), GFP_KERNEL);
568 + if (!dma_dev)
569 + return -ENOMEM;
570 +
571 + dma_sh_desc = kzalloc(sizeof(*dma_sh_desc), GFP_KERNEL | GFP_DMA);
572 + if (!dma_sh_desc)
573 + goto desc_err;
574 +
575 + sh_desc = dma_sh_desc->desc;
576 + set_caam_dma_desc(sh_desc);
577 + dma_sh_desc->desc_dma = dma_map_single(ctrldev, sh_desc,
578 + desc_bytes(sh_desc),
579 + DMA_TO_DEVICE);
580 + if (dma_mapping_error(ctrldev, dma_sh_desc->desc_dma)) {
581 + dev_err(dev, "unable to map dma descriptor\n");
582 + goto map_err;
583 + }
584 +
585 + INIT_LIST_HEAD(&dma_dev->channels);
586 +
587 + bonds = caam_dma_jr_chan_bind();
588 + if (!bonds) {
589 + err = -ENODEV;
590 + goto jr_bind_err;
591 + }
592 +
593 + dma_dev->dev = dev;
594 + dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
595 + dma_cap_set(DMA_SG, dma_dev->cap_mask);
596 + dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
597 + dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
598 + dma_dev->device_tx_status = dma_cookie_status;
599 + dma_dev->device_issue_pending = caam_dma_issue_pending;
600 + dma_dev->device_prep_dma_sg = caam_dma_prep_sg;
601 + dma_dev->device_prep_dma_memcpy = caam_dma_prep_memcpy;
602 + dma_dev->device_free_chan_resources = caam_dma_free_chan_resources;
603 +
604 + err = dma_async_device_register(dma_dev);
605 + if (err) {
606 + dev_err(dev, "Failed to register CAAM DMA engine\n");
607 + goto jr_bind_err;
608 + }
609 +
610 + dev_info(dev, "caam dma support with %d job rings\n", bonds);
611 +
612 + return err;
613 +
614 +jr_bind_err:
615 + list_for_each_entry_safe(chan, _chan, &dma_dev->channels, device_node)
616 + caam_jr_dma_free(chan);
617 +
618 + dma_unmap_single(ctrldev, dma_sh_desc->desc_dma, desc_bytes(sh_desc),
619 + DMA_TO_DEVICE);
620 +map_err:
621 + kfree(dma_sh_desc);
622 +desc_err:
623 + kfree(dma_dev);
624 + return err;
625 +}
626 +
627 +static int caam_dma_remove(struct platform_device *pdev)
628 +{
629 + struct device *dev = &pdev->dev;
630 + struct device *ctrldev = dev->parent;
631 + struct caam_dma_ctx *ctx, *_ctx;
632 +
633 + dma_async_device_unregister(dma_dev);
634 +
635 + list_for_each_entry_safe(ctx, _ctx, &dma_ctx_list, node) {
636 + list_del(&ctx->node);
637 + caam_jr_free(ctx->jrdev);
638 + kfree(ctx);
639 + }
640 +
641 + dma_unmap_single(ctrldev, dma_sh_desc->desc_dma,
642 + desc_bytes(dma_sh_desc->desc), DMA_TO_DEVICE);
643 +
644 + kfree(dma_sh_desc);
645 + kfree(dma_dev);
646 +
647 + dev_info(dev, "caam dma support disabled\n");
648 + return 0;
649 +}
650 +
651 +static const struct of_device_id caam_dma_match[] = {
652 + { .compatible = "fsl,sec-v5.4-dma", },
653 + { .compatible = "fsl,sec-v5.0-dma", },
654 + { .compatible = "fsl,sec-v4.0-dma", },
655 + {},
656 +};
657 +MODULE_DEVICE_TABLE(of, caam_dma_match);
658 +
659 +static struct platform_driver caam_dma_driver = {
660 + .driver = {
661 + .name = "caam-dma",
662 + .of_match_table = caam_dma_match,
663 + },
664 + .probe = caam_dma_probe,
665 + .remove = caam_dma_remove,
666 +};
667 +module_platform_driver(caam_dma_driver);
668 +
669 +MODULE_LICENSE("Dual BSD/GPL");
670 +MODULE_DESCRIPTION("NXP CAAM support for SG DMA");
671 +MODULE_AUTHOR("NXP Semiconductors");
672 diff --git a/drivers/dma/dpaa2-qdma/Kconfig b/drivers/dma/dpaa2-qdma/Kconfig
673 new file mode 100644
674 index 00000000..084e34bf
675 --- /dev/null
676 +++ b/drivers/dma/dpaa2-qdma/Kconfig
677 @@ -0,0 +1,8 @@
678 +menuconfig FSL_DPAA2_QDMA
679 + tristate "NXP DPAA2 QDMA"
680 + depends on FSL_MC_BUS && FSL_MC_DPIO
681 + select DMA_ENGINE
682 + select DMA_VIRTUAL_CHANNELS
683 + ---help---
684 + NXP Data Path Acceleration Architecture 2 QDMA driver,
685 + using the NXP MC bus driver.
686 diff --git a/drivers/dma/dpaa2-qdma/Makefile b/drivers/dma/dpaa2-qdma/Makefile
687 new file mode 100644
688 index 00000000..ba599ac6
689 --- /dev/null
690 +++ b/drivers/dma/dpaa2-qdma/Makefile
691 @@ -0,0 +1,8 @@
692 +#
693 +# Makefile for the NXP DPAA2 CAAM controllers
694 +#
695 +ccflags-y += -DVERSION=\"\"
696 +
697 +obj-$(CONFIG_FSL_DPAA2_QDMA) += fsl-dpaa2-qdma.o
698 +
699 +fsl-dpaa2-qdma-objs := dpaa2-qdma.o dpdmai.o
700 diff --git a/drivers/dma/dpaa2-qdma/dpaa2-qdma.c b/drivers/dma/dpaa2-qdma/dpaa2-qdma.c
701 new file mode 100644
702 index 00000000..ad6b03f7
703 --- /dev/null
704 +++ b/drivers/dma/dpaa2-qdma/dpaa2-qdma.c
705 @@ -0,0 +1,986 @@
706 +/*
707 + * drivers/dma/dpaa2-qdma/dpaa2-qdma.c
708 + *
709 + * Copyright 2015-2017 NXP Semiconductor, Inc.
710 + * Author: Changming Huang <jerry.huang@nxp.com>
711 + *
712 + * Driver for the NXP QDMA engine with QMan mode.
713 + * Channel virtualization is supported through enqueuing of DMA jobs to,
714 + * or dequeuing DMA jobs from different work queues with QMan portal.
715 + * This module can be found on NXP LS2 SoCs.
716 + *
717 + * This program is free software; you can redistribute it and/or modify it
718 + * under the terms of the GNU General Public License as published by the
719 + * Free Software Foundation; either version 2 of the License, or (at your
720 + * option) any later version.
721 + */
722 +
723 +#include <linux/init.h>
724 +#include <linux/module.h>
725 +#include <linux/interrupt.h>
726 +#include <linux/clk.h>
727 +#include <linux/dma-mapping.h>
728 +#include <linux/dmapool.h>
729 +#include <linux/slab.h>
730 +#include <linux/spinlock.h>
731 +#include <linux/of.h>
732 +#include <linux/of_device.h>
733 +#include <linux/of_address.h>
734 +#include <linux/of_irq.h>
735 +#include <linux/of_dma.h>
736 +#include <linux/types.h>
737 +#include <linux/delay.h>
738 +#include <linux/iommu.h>
739 +
740 +#include "../virt-dma.h"
741 +
742 +#include "../../../drivers/staging/fsl-mc/include/mc.h"
743 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
744 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
745 +#include "fsl_dpdmai_cmd.h"
746 +#include "fsl_dpdmai.h"
747 +#include "dpaa2-qdma.h"
748 +
749 +static bool smmu_disable = true;
750 +
751 +static struct dpaa2_qdma_chan *to_dpaa2_qdma_chan(struct dma_chan *chan)
752 +{
753 + return container_of(chan, struct dpaa2_qdma_chan, vchan.chan);
754 +}
755 +
756 +static struct dpaa2_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
757 +{
758 + return container_of(vd, struct dpaa2_qdma_comp, vdesc);
759 +}
760 +
761 +static int dpaa2_qdma_alloc_chan_resources(struct dma_chan *chan)
762 +{
763 + return 0;
764 +}
765 +
766 +static void dpaa2_qdma_free_chan_resources(struct dma_chan *chan)
767 +{
768 + struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
769 + unsigned long flags;
770 + LIST_HEAD(head);
771 +
772 + spin_lock_irqsave(&dpaa2_chan->vchan.lock, flags);
773 + vchan_get_all_descriptors(&dpaa2_chan->vchan, &head);
774 + spin_unlock_irqrestore(&dpaa2_chan->vchan.lock, flags);
775 +
776 + vchan_dma_desc_free_list(&dpaa2_chan->vchan, &head);
777 +}
778 +
779 +/*
780 + * Request a command descriptor for enqueue.
781 + */
782 +static struct dpaa2_qdma_comp *
783 +dpaa2_qdma_request_desc(struct dpaa2_qdma_chan *dpaa2_chan)
784 +{
785 + struct dpaa2_qdma_comp *comp_temp = NULL;
786 + unsigned long flags;
787 +
788 + spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
789 + if (list_empty(&dpaa2_chan->comp_free)) {
790 + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
791 + comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
792 + if (!comp_temp)
793 + goto err;
794 + comp_temp->fd_virt_addr = dma_pool_alloc(dpaa2_chan->fd_pool,
795 + GFP_NOWAIT, &comp_temp->fd_bus_addr);
796 + if (!comp_temp->fd_virt_addr)
797 + goto err;
798 +
799 + comp_temp->fl_virt_addr =
800 + (void *)((struct dpaa2_fd *)
801 + comp_temp->fd_virt_addr + 1);
802 + comp_temp->fl_bus_addr = comp_temp->fd_bus_addr +
803 + sizeof(struct dpaa2_fd);
804 + comp_temp->desc_virt_addr =
805 + (void *)((struct dpaa2_frame_list *)
806 + comp_temp->fl_virt_addr + 3);
807 + comp_temp->desc_bus_addr = comp_temp->fl_bus_addr +
808 + sizeof(struct dpaa2_frame_list) * 3;
809 +
810 + comp_temp->qchan = dpaa2_chan;
811 + comp_temp->sg_blk_num = 0;
812 + INIT_LIST_HEAD(&comp_temp->sg_src_head);
813 + INIT_LIST_HEAD(&comp_temp->sg_dst_head);
814 + return comp_temp;
815 + }
816 + comp_temp = list_first_entry(&dpaa2_chan->comp_free,
817 + struct dpaa2_qdma_comp, list);
818 + list_del(&comp_temp->list);
819 + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
820 +
821 + comp_temp->qchan = dpaa2_chan;
822 +err:
823 + return comp_temp;
824 +}
825 +
826 +static void dpaa2_qdma_populate_fd(uint32_t format,
827 + struct dpaa2_qdma_comp *dpaa2_comp)
828 +{
829 + struct dpaa2_fd *fd;
830 +
831 + fd = (struct dpaa2_fd *)dpaa2_comp->fd_virt_addr;
832 + memset(fd, 0, sizeof(struct dpaa2_fd));
833 +
834 + /* fd populated */
835 + fd->simple.addr = dpaa2_comp->fl_bus_addr;
836 + /* Bypass memory translation, Frame list format, short length disable */
837 + /* we need to disable BMT if fsl-mc use iova addr */
838 + if (smmu_disable)
839 + fd->simple.bpid = QMAN_FD_BMT_ENABLE;
840 + fd->simple.format_offset = QMAN_FD_FMT_ENABLE | QMAN_FD_SL_DISABLE;
841 +
842 + fd->simple.frc = format | QDMA_SER_CTX;
843 +}
844 +
845 +/* first frame list for descriptor buffer */
846 +static void dpaa2_qdma_populate_first_framel(
847 + struct dpaa2_frame_list *f_list,
848 + struct dpaa2_qdma_comp *dpaa2_comp)
849 +{
850 + struct dpaa2_qdma_sd_d *sdd;
851 +
852 + sdd = (struct dpaa2_qdma_sd_d *)dpaa2_comp->desc_virt_addr;
853 + memset(sdd, 0, 2 * (sizeof(*sdd)));
854 + /* source and destination descriptor */
855 + sdd->cmd = QDMA_SD_CMD_RDTTYPE_COHERENT; /* source descriptor CMD */
856 + sdd++;
857 + sdd->cmd = QDMA_DD_CMD_WRTTYPE_COHERENT; /* dest descriptor CMD */
858 +
859 + memset(f_list, 0, sizeof(struct dpaa2_frame_list));
860 + /* first frame list to source descriptor */
861 + f_list->addr_lo = dpaa2_comp->desc_bus_addr;
862 + f_list->addr_hi = (dpaa2_comp->desc_bus_addr >> 32);
863 + f_list->data_len.data_len_sl0 = 0x20; /* source/destination desc len */
864 + f_list->fmt = QDMA_FL_FMT_SBF; /* single buffer frame */
865 + if (smmu_disable)
866 + f_list->bmt = QDMA_FL_BMT_ENABLE; /* bypass memory translation */
867 + f_list->sl = QDMA_FL_SL_LONG; /* long length */
868 + f_list->f = 0; /* not the last frame list */
869 +}
870 +
871 +/* source and destination frame list */
872 +static void dpaa2_qdma_populate_frames(struct dpaa2_frame_list *f_list,
873 + dma_addr_t dst, dma_addr_t src, size_t len, uint8_t fmt)
874 +{
875 + /* source frame list to source buffer */
876 + memset(f_list, 0, sizeof(struct dpaa2_frame_list));
877 + f_list->addr_lo = src;
878 + f_list->addr_hi = (src >> 32);
879 + f_list->data_len.data_len_sl0 = len;
880 + f_list->fmt = fmt; /* single buffer frame or scatter gather frame */
881 + if (smmu_disable)
882 + f_list->bmt = QDMA_FL_BMT_ENABLE; /* bypass memory translation */
883 + f_list->sl = QDMA_FL_SL_LONG; /* long length */
884 + f_list->f = 0; /* not the last frame list */
885 +
886 + f_list++;
887 + /* destination frame list to destination buffer */
888 + memset(f_list, 0, sizeof(struct dpaa2_frame_list));
889 + f_list->addr_lo = dst;
890 + f_list->addr_hi = (dst >> 32);
891 + f_list->data_len.data_len_sl0 = len;
892 + f_list->fmt = fmt; /* single buffer frame or scatter gather frame */
893 + if (smmu_disable)
894 + f_list->bmt = QDMA_FL_BMT_ENABLE; /* bypass memory translation */
895 + f_list->sl = QDMA_FL_SL_LONG; /* long length */
896 + f_list->f = QDMA_FL_F; /* Final bit: 1, for last frame list */
897 +}
898 +
899 +static struct dma_async_tx_descriptor *dpaa2_qdma_prep_memcpy(
900 + struct dma_chan *chan, dma_addr_t dst,
901 + dma_addr_t src, size_t len, unsigned long flags)
902 +{
903 + struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
904 + struct dpaa2_qdma_comp *dpaa2_comp;
905 + struct dpaa2_frame_list *f_list;
906 + uint32_t format;
907 +
908 + dpaa2_comp = dpaa2_qdma_request_desc(dpaa2_chan);
909 +
910 +#ifdef LONG_FORMAT
911 + format = QDMA_FD_LONG_FORMAT;
912 +#else
913 + format = QDMA_FD_SHORT_FORMAT;
914 +#endif
915 + /* populate Frame descriptor */
916 + dpaa2_qdma_populate_fd(format, dpaa2_comp);
917 +
918 + f_list = (struct dpaa2_frame_list *)dpaa2_comp->fl_virt_addr;
919 +
920 +#ifdef LONG_FORMAT
921 + /* first frame list for descriptor buffer (logn format) */
922 + dpaa2_qdma_populate_first_framel(f_list, dpaa2_comp);
923 +
924 + f_list++;
925 +#endif
926 +
927 + dpaa2_qdma_populate_frames(f_list, dst, src, len, QDMA_FL_FMT_SBF);
928 +
929 + return vchan_tx_prep(&dpaa2_chan->vchan, &dpaa2_comp->vdesc, flags);
930 +}
931 +
932 +static struct qdma_sg_blk *dpaa2_qdma_get_sg_blk(
933 + struct dpaa2_qdma_comp *dpaa2_comp,
934 + struct dpaa2_qdma_chan *dpaa2_chan)
935 +{
936 + struct qdma_sg_blk *sg_blk = NULL;
937 + dma_addr_t phy_sgb;
938 + unsigned long flags;
939 +
940 + spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
941 + if (list_empty(&dpaa2_chan->sgb_free)) {
942 + sg_blk = (struct qdma_sg_blk *)dma_pool_alloc(
943 + dpaa2_chan->sg_blk_pool,
944 + GFP_NOWAIT, &phy_sgb);
945 + if (!sg_blk) {
946 + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
947 + return sg_blk;
948 + }
949 + sg_blk->blk_virt_addr = (void *)(sg_blk + 1);
950 + sg_blk->blk_bus_addr = phy_sgb + sizeof(*sg_blk);
951 + } else {
952 + sg_blk = list_first_entry(&dpaa2_chan->sgb_free,
953 + struct qdma_sg_blk, list);
954 + list_del(&sg_blk->list);
955 + }
956 + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
957 +
958 + return sg_blk;
959 +}
960 +
961 +static uint32_t dpaa2_qdma_populate_sg(struct device *dev,
962 + struct dpaa2_qdma_chan *dpaa2_chan,
963 + struct dpaa2_qdma_comp *dpaa2_comp,
964 + struct scatterlist *dst_sg, u32 dst_nents,
965 + struct scatterlist *src_sg, u32 src_nents)
966 +{
967 + struct dpaa2_qdma_sg *src_sge;
968 + struct dpaa2_qdma_sg *dst_sge;
969 + struct qdma_sg_blk *sg_blk;
970 + struct qdma_sg_blk *sg_blk_dst;
971 + dma_addr_t src;
972 + dma_addr_t dst;
973 + uint32_t num;
974 + uint32_t blocks;
975 + uint32_t len = 0;
976 + uint32_t total_len = 0;
977 + int i, j = 0;
978 +
979 + num = min(dst_nents, src_nents);
980 + blocks = num / (NUM_SG_PER_BLK - 1);
981 + if (num % (NUM_SG_PER_BLK - 1))
982 + blocks += 1;
983 + if (dpaa2_comp->sg_blk_num < blocks) {
984 + len = blocks - dpaa2_comp->sg_blk_num;
985 + for (i = 0; i < len; i++) {
986 + /* source sg blocks */
987 + sg_blk = dpaa2_qdma_get_sg_blk(dpaa2_comp, dpaa2_chan);
988 + if (!sg_blk)
989 + return 0;
990 + list_add_tail(&sg_blk->list, &dpaa2_comp->sg_src_head);
991 + /* destination sg blocks */
992 + sg_blk = dpaa2_qdma_get_sg_blk(dpaa2_comp, dpaa2_chan);
993 + if (!sg_blk)
994 + return 0;
995 + list_add_tail(&sg_blk->list, &dpaa2_comp->sg_dst_head);
996 + }
997 + } else {
998 + len = dpaa2_comp->sg_blk_num - blocks;
999 + for (i = 0; i < len; i++) {
1000 + spin_lock(&dpaa2_chan->queue_lock);
1001 + /* handle source sg blocks */
1002 + sg_blk = list_first_entry(&dpaa2_comp->sg_src_head,
1003 + struct qdma_sg_blk, list);
1004 + list_del(&sg_blk->list);
1005 + list_add_tail(&sg_blk->list, &dpaa2_chan->sgb_free);
1006 + /* handle destination sg blocks */
1007 + sg_blk = list_first_entry(&dpaa2_comp->sg_dst_head,
1008 + struct qdma_sg_blk, list);
1009 + list_del(&sg_blk->list);
1010 + list_add_tail(&sg_blk->list, &dpaa2_chan->sgb_free);
1011 + spin_unlock(&dpaa2_chan->queue_lock);
1012 + }
1013 + }
1014 + dpaa2_comp->sg_blk_num = blocks;
1015 +
1016 + /* get the first source sg phy address */
1017 + sg_blk = list_first_entry(&dpaa2_comp->sg_src_head,
1018 + struct qdma_sg_blk, list);
1019 + dpaa2_comp->sge_src_bus_addr = sg_blk->blk_bus_addr;
1020 + /* get the first destinaiton sg phy address */
1021 + sg_blk_dst = list_first_entry(&dpaa2_comp->sg_dst_head,
1022 + struct qdma_sg_blk, list);
1023 + dpaa2_comp->sge_dst_bus_addr = sg_blk_dst->blk_bus_addr;
1024 +
1025 + for (i = 0; i < blocks; i++) {
1026 + src_sge = (struct dpaa2_qdma_sg *)sg_blk->blk_virt_addr;
1027 + dst_sge = (struct dpaa2_qdma_sg *)sg_blk_dst->blk_virt_addr;
1028 +
1029 + for (j = 0; j < (NUM_SG_PER_BLK - 1); j++) {
1030 + len = min(sg_dma_len(dst_sg), sg_dma_len(src_sg));
1031 + if (0 == len)
1032 + goto fetch;
1033 + total_len += len;
1034 + src = sg_dma_address(src_sg);
1035 + dst = sg_dma_address(dst_sg);
1036 +
1037 + /* source SG */
1038 + src_sge->addr_lo = src;
1039 + src_sge->addr_hi = (src >> 32);
1040 + src_sge->data_len.data_len_sl0 = len;
1041 + src_sge->ctrl.sl = QDMA_SG_SL_LONG;
1042 + src_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
1043 + /* destination SG */
1044 + dst_sge->addr_lo = dst;
1045 + dst_sge->addr_hi = (dst >> 32);
1046 + dst_sge->data_len.data_len_sl0 = len;
1047 + dst_sge->ctrl.sl = QDMA_SG_SL_LONG;
1048 + dst_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
1049 +fetch:
1050 + num--;
1051 + if (0 == num) {
1052 + src_sge->ctrl.f = QDMA_SG_F;
1053 + dst_sge->ctrl.f = QDMA_SG_F;
1054 + goto end;
1055 + }
1056 + dst_sg = sg_next(dst_sg);
1057 + src_sg = sg_next(src_sg);
1058 + src_sge++;
1059 + dst_sge++;
1060 + if (j == (NUM_SG_PER_BLK - 2)) {
1061 + /* for next blocks, extension */
1062 + sg_blk = list_next_entry(sg_blk, list);
1063 + sg_blk_dst = list_next_entry(sg_blk_dst, list);
1064 + src_sge->addr_lo = sg_blk->blk_bus_addr;
1065 + src_sge->addr_hi = sg_blk->blk_bus_addr >> 32;
1066 + src_sge->ctrl.sl = QDMA_SG_SL_LONG;
1067 + src_sge->ctrl.fmt = QDMA_SG_FMT_SGTE;
1068 + dst_sge->addr_lo = sg_blk_dst->blk_bus_addr;
1069 + dst_sge->addr_hi =
1070 + sg_blk_dst->blk_bus_addr >> 32;
1071 + dst_sge->ctrl.sl = QDMA_SG_SL_LONG;
1072 + dst_sge->ctrl.fmt = QDMA_SG_FMT_SGTE;
1073 + }
1074 + }
1075 + }
1076 +
1077 +end:
1078 + return total_len;
1079 +}
1080 +
1081 +static struct dma_async_tx_descriptor *dpaa2_qdma_prep_sg(
1082 + struct dma_chan *chan,
1083 + struct scatterlist *dst_sg, u32 dst_nents,
1084 + struct scatterlist *src_sg, u32 src_nents,
1085 + unsigned long flags)
1086 +{
1087 + struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
1088 + struct dpaa2_qdma_comp *dpaa2_comp;
1089 + struct dpaa2_frame_list *f_list;
1090 + struct device *dev = dpaa2_chan->qdma->priv->dev;
1091 + uint32_t total_len = 0;
1092 +
1093 + /* basic sanity checks */
1094 + if (dst_nents == 0 || src_nents == 0)
1095 + return NULL;
1096 +
1097 + if (dst_sg == NULL || src_sg == NULL)
1098 + return NULL;
1099 +
1100 + /* get the descriptors required */
1101 + dpaa2_comp = dpaa2_qdma_request_desc(dpaa2_chan);
1102 +
1103 + /* populate Frame descriptor */
1104 + dpaa2_qdma_populate_fd(QDMA_FD_LONG_FORMAT, dpaa2_comp);
1105 +
1106 + /* prepare Scatter gather entry for source and destination */
1107 + total_len = dpaa2_qdma_populate_sg(dev, dpaa2_chan,
1108 + dpaa2_comp, dst_sg, dst_nents, src_sg, src_nents);
1109 +
1110 + f_list = (struct dpaa2_frame_list *)dpaa2_comp->fl_virt_addr;
1111 + /* first frame list for descriptor buffer */
1112 + dpaa2_qdma_populate_first_framel(f_list, dpaa2_comp);
1113 + f_list++;
1114 + /* prepare Scatter gather entry for source and destination */
1115 + /* populate source and destination frame list table */
1116 + dpaa2_qdma_populate_frames(f_list, dpaa2_comp->sge_dst_bus_addr,
1117 + dpaa2_comp->sge_src_bus_addr,
1118 + total_len, QDMA_FL_FMT_SGE);
1119 +
1120 + return vchan_tx_prep(&dpaa2_chan->vchan, &dpaa2_comp->vdesc, flags);
1121 +}
1122 +
1123 +static enum dma_status dpaa2_qdma_tx_status(struct dma_chan *chan,
1124 + dma_cookie_t cookie, struct dma_tx_state *txstate)
1125 +{
1126 + return dma_cookie_status(chan, cookie, txstate);
1127 +}
1128 +
1129 +static void dpaa2_qdma_free_desc(struct virt_dma_desc *vdesc)
1130 +{
1131 +}
1132 +
1133 +static void dpaa2_qdma_issue_pending(struct dma_chan *chan)
1134 +{
1135 + struct dpaa2_qdma_comp *dpaa2_comp;
1136 + struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
1137 + struct dpaa2_qdma_engine *dpaa2_qdma = dpaa2_chan->qdma;
1138 + struct dpaa2_qdma_priv *priv = dpaa2_qdma->priv;
1139 + struct virt_dma_desc *vdesc;
1140 + struct dpaa2_fd *fd;
1141 + int err;
1142 + unsigned long flags;
1143 +
1144 + spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
1145 + spin_lock(&dpaa2_chan->vchan.lock);
1146 + if (vchan_issue_pending(&dpaa2_chan->vchan)) {
1147 + vdesc = vchan_next_desc(&dpaa2_chan->vchan);
1148 + if (!vdesc)
1149 + goto err_enqueue;
1150 + dpaa2_comp = to_fsl_qdma_comp(vdesc);
1151 +
1152 + fd = (struct dpaa2_fd *)dpaa2_comp->fd_virt_addr;
1153 +
1154 + list_del(&vdesc->node);
1155 + list_add_tail(&dpaa2_comp->list, &dpaa2_chan->comp_used);
1156 +
1157 + /* TOBO: priority hard-coded to zero */
1158 + err = dpaa2_io_service_enqueue_fq(NULL,
1159 + priv->tx_queue_attr[0].fqid, fd);
1160 + if (err) {
1161 + list_del(&dpaa2_comp->list);
1162 + list_add_tail(&dpaa2_comp->list,
1163 + &dpaa2_chan->comp_free);
1164 + }
1165 +
1166 + }
1167 +err_enqueue:
1168 + spin_unlock(&dpaa2_chan->vchan.lock);
1169 + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
1170 +}
1171 +
1172 +static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
1173 +{
1174 + struct device *dev = &ls_dev->dev;
1175 + struct dpaa2_qdma_priv *priv;
1176 + struct dpaa2_qdma_priv_per_prio *ppriv;
1177 + uint8_t prio_def = DPDMAI_PRIO_NUM;
1178 + int err;
1179 + int i;
1180 +
1181 + priv = dev_get_drvdata(dev);
1182 +
1183 + priv->dev = dev;
1184 + priv->dpqdma_id = ls_dev->obj_desc.id;
1185 +
1186 + /*Get the handle for the DPDMAI this interface is associate with */
1187 + err = dpdmai_open(priv->mc_io, 0, priv->dpqdma_id, &ls_dev->mc_handle);
1188 + if (err) {
1189 + dev_err(dev, "dpdmai_open() failed\n");
1190 + return err;
1191 + }
1192 + dev_info(dev, "Opened dpdmai object successfully\n");
1193 +
1194 + err = dpdmai_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
1195 + &priv->dpdmai_attr);
1196 + if (err) {
1197 + dev_err(dev, "dpdmai_get_attributes() failed\n");
1198 + return err;
1199 + }
1200 +
1201 + if (priv->dpdmai_attr.version.major > DPDMAI_VER_MAJOR) {
1202 + dev_err(dev, "DPDMAI major version mismatch\n"
1203 + "Found %u.%u, supported version is %u.%u\n",
1204 + priv->dpdmai_attr.version.major,
1205 + priv->dpdmai_attr.version.minor,
1206 + DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
1207 + }
1208 +
1209 + if (priv->dpdmai_attr.version.minor > DPDMAI_VER_MINOR) {
1210 + dev_err(dev, "DPDMAI minor version mismatch\n"
1211 + "Found %u.%u, supported version is %u.%u\n",
1212 + priv->dpdmai_attr.version.major,
1213 + priv->dpdmai_attr.version.minor,
1214 + DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
1215 + }
1216 +
1217 + priv->num_pairs = min(priv->dpdmai_attr.num_of_priorities, prio_def);
1218 + ppriv = kcalloc(priv->num_pairs, sizeof(*ppriv), GFP_KERNEL);
1219 + if (!ppriv) {
1220 + dev_err(dev, "kzalloc for ppriv failed\n");
1221 + return -1;
1222 + }
1223 + priv->ppriv = ppriv;
1224 +
1225 + for (i = 0; i < priv->num_pairs; i++) {
1226 + err = dpdmai_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
1227 + i, &priv->rx_queue_attr[i]);
1228 + if (err) {
1229 + dev_err(dev, "dpdmai_get_rx_queue() failed\n");
1230 + return err;
1231 + }
1232 + ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
1233 +
1234 + err = dpdmai_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle,
1235 + i, &priv->tx_queue_attr[i]);
1236 + if (err) {
1237 + dev_err(dev, "dpdmai_get_tx_queue() failed\n");
1238 + return err;
1239 + }
1240 + ppriv->req_fqid = priv->tx_queue_attr[i].fqid;
1241 + ppriv->prio = i;
1242 + ppriv->priv = priv;
1243 + ppriv++;
1244 + }
1245 +
1246 + return 0;
1247 +}
1248 +
1249 +static void dpaa2_qdma_fqdan_cb(struct dpaa2_io_notification_ctx *ctx)
1250 +{
1251 + struct dpaa2_qdma_priv_per_prio *ppriv = container_of(ctx,
1252 + struct dpaa2_qdma_priv_per_prio, nctx);
1253 + struct dpaa2_qdma_priv *priv = ppriv->priv;
1254 + struct dpaa2_qdma_comp *dpaa2_comp, *_comp_tmp;
1255 + struct dpaa2_qdma_chan *qchan;
1256 + const struct dpaa2_fd *fd;
1257 + const struct dpaa2_fd *fd_eq;
1258 + struct dpaa2_dq *dq;
1259 + int err;
1260 + int is_last = 0;
1261 + uint8_t status;
1262 + int i;
1263 + int found;
1264 + uint32_t n_chans = priv->dpaa2_qdma->n_chans;
1265 +
1266 + do {
1267 + err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
1268 + ppriv->store);
1269 + } while (err);
1270 +
1271 + while (!is_last) {
1272 + do {
1273 + dq = dpaa2_io_store_next(ppriv->store, &is_last);
1274 + } while (!is_last && !dq);
1275 + if (!dq) {
1276 + dev_err(priv->dev, "FQID returned no valid frames!\n");
1277 + continue;
1278 + }
1279 +
1280 + /* obtain FD and process the error */
1281 + fd = dpaa2_dq_fd(dq);
1282 + status = fd->simple.ctrl & 0xff;
1283 + if (status)
1284 + dev_err(priv->dev, "FD error occurred\n");
1285 + found = 0;
1286 + for (i = 0; i < n_chans; i++) {
1287 + qchan = &priv->dpaa2_qdma->chans[i];
1288 + spin_lock(&qchan->queue_lock);
1289 + if (list_empty(&qchan->comp_used)) {
1290 + spin_unlock(&qchan->queue_lock);
1291 + continue;
1292 + }
1293 + list_for_each_entry_safe(dpaa2_comp, _comp_tmp,
1294 + &qchan->comp_used, list) {
1295 + fd_eq = (struct dpaa2_fd *)
1296 + dpaa2_comp->fd_virt_addr;
1297 +
1298 + if (fd_eq->simple.addr ==
1299 + fd->simple.addr) {
1300 +
1301 + list_del(&dpaa2_comp->list);
1302 + list_add_tail(&dpaa2_comp->list,
1303 + &qchan->comp_free);
1304 +
1305 + spin_lock(&qchan->vchan.lock);
1306 + vchan_cookie_complete(
1307 + &dpaa2_comp->vdesc);
1308 + spin_unlock(&qchan->vchan.lock);
1309 + found = 1;
1310 + break;
1311 + }
1312 + }
1313 + spin_unlock(&qchan->queue_lock);
1314 + if (found)
1315 + break;
1316 + }
1317 + }
1318 +
1319 + dpaa2_io_service_rearm(NULL, ctx);
1320 +}
1321 +
1322 +static int __cold dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv *priv)
1323 +{
1324 + int err, i, num;
1325 + struct device *dev = priv->dev;
1326 + struct dpaa2_qdma_priv_per_prio *ppriv;
1327 +
1328 + num = priv->num_pairs;
1329 + ppriv = priv->ppriv;
1330 + for (i = 0; i < num; i++) {
1331 + ppriv->nctx.is_cdan = 0;
1332 + ppriv->nctx.desired_cpu = 1;
1333 + ppriv->nctx.id = ppriv->rsp_fqid;
1334 + ppriv->nctx.cb = dpaa2_qdma_fqdan_cb;
1335 + err = dpaa2_io_service_register(NULL, &ppriv->nctx);
1336 + if (err) {
1337 + dev_err(dev, "Notification register failed\n");
1338 + goto err_service;
1339 + }
1340 +
1341 + ppriv->store = dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE,
1342 + dev);
1343 + if (!ppriv->store) {
1344 + dev_err(dev, "dpaa2_io_store_create() failed\n");
1345 + goto err_store;
1346 + }
1347 +
1348 + ppriv++;
1349 + }
1350 + return 0;
1351 +
1352 +err_store:
1353 + dpaa2_io_service_deregister(NULL, &ppriv->nctx);
1354 +err_service:
1355 + ppriv--;
1356 + while (ppriv >= priv->ppriv) {
1357 + dpaa2_io_service_deregister(NULL, &ppriv->nctx);
1358 + dpaa2_io_store_destroy(ppriv->store);
1359 + ppriv--;
1360 + }
1361 + return -1;
1362 +}
1363 +
1364 +static void __cold dpaa2_dpmai_store_free(struct dpaa2_qdma_priv *priv)
1365 +{
1366 + struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
1367 + int i;
1368 +
1369 + for (i = 0; i < priv->num_pairs; i++) {
1370 + dpaa2_io_store_destroy(ppriv->store);
1371 + ppriv++;
1372 + }
1373 +}
1374 +
1375 +static void __cold dpaa2_dpdmai_dpio_free(struct dpaa2_qdma_priv *priv)
1376 +{
1377 + struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
1378 + int i;
1379 +
1380 + for (i = 0; i < priv->num_pairs; i++) {
1381 + dpaa2_io_service_deregister(NULL, &ppriv->nctx);
1382 + ppriv++;
1383 + }
1384 +}
1385 +
1386 +static int __cold dpaa2_dpdmai_bind(struct dpaa2_qdma_priv *priv)
1387 +{
1388 + int err;
1389 + struct dpdmai_rx_queue_cfg rx_queue_cfg;
1390 + struct device *dev = priv->dev;
1391 + struct dpaa2_qdma_priv_per_prio *ppriv;
1392 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
1393 + int i, num;
1394 +
1395 + num = priv->num_pairs;
1396 + ppriv = priv->ppriv;
1397 + for (i = 0; i < num; i++) {
1398 + rx_queue_cfg.options = DPDMAI_QUEUE_OPT_USER_CTX |
1399 + DPDMAI_QUEUE_OPT_DEST;
1400 + rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
1401 + rx_queue_cfg.dest_cfg.dest_type = DPDMAI_DEST_DPIO;
1402 + rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
1403 + rx_queue_cfg.dest_cfg.priority = ppriv->prio;
1404 + err = dpdmai_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
1405 + rx_queue_cfg.dest_cfg.priority, &rx_queue_cfg);
1406 + if (err) {
1407 + dev_err(dev, "dpdmai_set_rx_queue() failed\n");
1408 + return err;
1409 + }
1410 +
1411 + ppriv++;
1412 + }
1413 +
1414 + return 0;
1415 +}
1416 +
1417 +static int __cold dpaa2_dpdmai_dpio_unbind(struct dpaa2_qdma_priv *priv)
1418 +{
1419 + int err = 0;
1420 + struct device *dev = priv->dev;
1421 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
1422 + struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
1423 + int i;
1424 +
1425 + for (i = 0; i < priv->num_pairs; i++) {
1426 + ppriv->nctx.qman64 = 0;
1427 + ppriv->nctx.dpio_id = 0;
1428 + ppriv++;
1429 + }
1430 +
1431 + err = dpdmai_reset(priv->mc_io, 0, ls_dev->mc_handle);
1432 + if (err)
1433 + dev_err(dev, "dpdmai_reset() failed\n");
1434 +
1435 + return err;
1436 +}
1437 +
1438 +static void __cold dpaa2_dpdmai_free_pool(struct dpaa2_qdma_chan *qchan,
1439 + struct list_head *head)
1440 +{
1441 + struct qdma_sg_blk *sgb_tmp, *_sgb_tmp;
1442 + /* free the QDMA SG pool block */
1443 + list_for_each_entry_safe(sgb_tmp, _sgb_tmp, head, list) {
1444 + sgb_tmp->blk_virt_addr = (void *)((struct qdma_sg_blk *)
1445 + sgb_tmp->blk_virt_addr - 1);
1446 + sgb_tmp->blk_bus_addr = sgb_tmp->blk_bus_addr
1447 + - sizeof(*sgb_tmp);
1448 + dma_pool_free(qchan->sg_blk_pool, sgb_tmp->blk_virt_addr,
1449 + sgb_tmp->blk_bus_addr);
1450 + }
1451 +
1452 +}
1453 +
1454 +static void __cold dpaa2_dpdmai_free_comp(struct dpaa2_qdma_chan *qchan,
1455 + struct list_head *head)
1456 +{
1457 + struct dpaa2_qdma_comp *comp_tmp, *_comp_tmp;
1458 + /* free the QDMA comp resource */
1459 + list_for_each_entry_safe(comp_tmp, _comp_tmp,
1460 + head, list) {
1461 + dma_pool_free(qchan->fd_pool,
1462 + comp_tmp->fd_virt_addr,
1463 + comp_tmp->fd_bus_addr);
1464 + /* free the SG source block on comp */
1465 + dpaa2_dpdmai_free_pool(qchan, &comp_tmp->sg_src_head);
1466 + /* free the SG destination block on comp */
1467 + dpaa2_dpdmai_free_pool(qchan, &comp_tmp->sg_dst_head);
1468 + list_del(&comp_tmp->list);
1469 + kfree(comp_tmp);
1470 + }
1471 +
1472 +}
1473 +
1474 +static void __cold dpaa2_dpdmai_free_channels(
1475 + struct dpaa2_qdma_engine *dpaa2_qdma)
1476 +{
1477 + struct dpaa2_qdma_chan *qchan;
1478 + int num, i;
1479 +
1480 + num = dpaa2_qdma->n_chans;
1481 + for (i = 0; i < num; i++) {
1482 + qchan = &dpaa2_qdma->chans[i];
1483 + dpaa2_dpdmai_free_comp(qchan, &qchan->comp_used);
1484 + dpaa2_dpdmai_free_comp(qchan, &qchan->comp_free);
1485 + dpaa2_dpdmai_free_pool(qchan, &qchan->sgb_free);
1486 + dma_pool_destroy(qchan->fd_pool);
1487 + dma_pool_destroy(qchan->sg_blk_pool);
1488 + }
1489 +}
1490 +
1491 +static int dpaa2_dpdmai_alloc_channels(struct dpaa2_qdma_engine *dpaa2_qdma)
1492 +{
1493 + struct dpaa2_qdma_chan *dpaa2_chan;
1494 + struct device *dev = &dpaa2_qdma->priv->dpdmai_dev->dev;
1495 + int i;
1496 +
1497 + INIT_LIST_HEAD(&dpaa2_qdma->dma_dev.channels);
1498 + for (i = 0; i < dpaa2_qdma->n_chans; i++) {
1499 + dpaa2_chan = &dpaa2_qdma->chans[i];
1500 + dpaa2_chan->qdma = dpaa2_qdma;
1501 + dpaa2_chan->vchan.desc_free = dpaa2_qdma_free_desc;
1502 + vchan_init(&dpaa2_chan->vchan, &dpaa2_qdma->dma_dev);
1503 +
1504 + dpaa2_chan->fd_pool = dma_pool_create("fd_pool",
1505 + dev, FD_POOL_SIZE, 32, 0);
1506 + if (!dpaa2_chan->fd_pool)
1507 + return -1;
1508 + dpaa2_chan->sg_blk_pool = dma_pool_create("sg_blk_pool",
1509 + dev, SG_POOL_SIZE, 32, 0);
1510 + if (!dpaa2_chan->sg_blk_pool)
1511 + return -1;
1512 +
1513 + spin_lock_init(&dpaa2_chan->queue_lock);
1514 + INIT_LIST_HEAD(&dpaa2_chan->comp_used);
1515 + INIT_LIST_HEAD(&dpaa2_chan->comp_free);
1516 + INIT_LIST_HEAD(&dpaa2_chan->sgb_free);
1517 + }
1518 + return 0;
1519 +}
1520 +
1521 +static int dpaa2_qdma_probe(struct fsl_mc_device *dpdmai_dev)
1522 +{
1523 + struct dpaa2_qdma_priv *priv;
1524 + struct device *dev = &dpdmai_dev->dev;
1525 + struct dpaa2_qdma_engine *dpaa2_qdma;
1526 + int err;
1527 +
1528 + priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1529 + if (!priv)
1530 + return -ENOMEM;
1531 + dev_set_drvdata(dev, priv);
1532 + priv->dpdmai_dev = dpdmai_dev;
1533 +
1534 + priv->iommu_domain = iommu_get_domain_for_dev(dev);
1535 + if (priv->iommu_domain)
1536 + smmu_disable = false;
1537 +
1538 + /* obtain a MC portal */
1539 + err = fsl_mc_portal_allocate(dpdmai_dev, 0, &priv->mc_io);
1540 + if (err) {
1541 + dev_err(dev, "MC portal allocation failed\n");
1542 + goto err_mcportal;
1543 + }
1544 +
1545 + /* DPDMAI initialization */
1546 + err = dpaa2_qdma_setup(dpdmai_dev);
1547 + if (err) {
1548 + dev_err(dev, "dpaa2_dpdmai_setup() failed\n");
1549 + goto err_dpdmai_setup;
1550 + }
1551 +
1552 + /* DPIO */
1553 + err = dpaa2_qdma_dpio_setup(priv);
1554 + if (err) {
1555 + dev_err(dev, "dpaa2_dpdmai_dpio_setup() failed\n");
1556 + goto err_dpio_setup;
1557 + }
1558 +
1559 + /* DPDMAI binding to DPIO */
1560 + err = dpaa2_dpdmai_bind(priv);
1561 + if (err) {
1562 + dev_err(dev, "dpaa2_dpdmai_bind() failed\n");
1563 + goto err_bind;
1564 + }
1565 +
1566 + /* DPDMAI enable */
1567 + err = dpdmai_enable(priv->mc_io, 0, dpdmai_dev->mc_handle);
1568 + if (err) {
1569 + dev_err(dev, "dpdmai_enable() faile\n");
1570 + goto err_enable;
1571 + }
1572 +
1573 + dpaa2_qdma = kzalloc(sizeof(*dpaa2_qdma), GFP_KERNEL);
1574 + if (!dpaa2_qdma) {
1575 + err = -ENOMEM;
1576 + goto err_eng;
1577 + }
1578 +
1579 + priv->dpaa2_qdma = dpaa2_qdma;
1580 + dpaa2_qdma->priv = priv;
1581 +
1582 + dpaa2_qdma->n_chans = NUM_CH;
1583 +
1584 + err = dpaa2_dpdmai_alloc_channels(dpaa2_qdma);
1585 + if (err) {
1586 + dev_err(dev, "QDMA alloc channels faile\n");
1587 + goto err_reg;
1588 + }
1589 +
1590 + dma_cap_set(DMA_PRIVATE, dpaa2_qdma->dma_dev.cap_mask);
1591 + dma_cap_set(DMA_SLAVE, dpaa2_qdma->dma_dev.cap_mask);
1592 + dma_cap_set(DMA_MEMCPY, dpaa2_qdma->dma_dev.cap_mask);
1593 + dma_cap_set(DMA_SG, dpaa2_qdma->dma_dev.cap_mask);
1594 +
1595 + dpaa2_qdma->dma_dev.dev = dev;
1596 + dpaa2_qdma->dma_dev.device_alloc_chan_resources
1597 + = dpaa2_qdma_alloc_chan_resources;
1598 + dpaa2_qdma->dma_dev.device_free_chan_resources
1599 + = dpaa2_qdma_free_chan_resources;
1600 + dpaa2_qdma->dma_dev.device_tx_status = dpaa2_qdma_tx_status;
1601 + dpaa2_qdma->dma_dev.device_prep_dma_memcpy = dpaa2_qdma_prep_memcpy;
1602 + dpaa2_qdma->dma_dev.device_prep_dma_sg = dpaa2_qdma_prep_sg;
1603 + dpaa2_qdma->dma_dev.device_issue_pending = dpaa2_qdma_issue_pending;
1604 +
1605 + err = dma_async_device_register(&dpaa2_qdma->dma_dev);
1606 + if (err) {
1607 + dev_err(dev, "Can't register NXP QDMA engine.\n");
1608 + goto err_reg;
1609 + }
1610 +
1611 + return 0;
1612 +
1613 +err_reg:
1614 + dpaa2_dpdmai_free_channels(dpaa2_qdma);
1615 + kfree(dpaa2_qdma);
1616 +err_eng:
1617 + dpdmai_disable(priv->mc_io, 0, dpdmai_dev->mc_handle);
1618 +err_enable:
1619 + dpaa2_dpdmai_dpio_unbind(priv);
1620 +err_bind:
1621 + dpaa2_dpmai_store_free(priv);
1622 + dpaa2_dpdmai_dpio_free(priv);
1623 +err_dpio_setup:
1624 + dpdmai_close(priv->mc_io, 0, dpdmai_dev->mc_handle);
1625 +err_dpdmai_setup:
1626 + fsl_mc_portal_free(priv->mc_io);
1627 +err_mcportal:
1628 + kfree(priv->ppriv);
1629 + kfree(priv);
1630 + dev_set_drvdata(dev, NULL);
1631 + return err;
1632 +}
1633 +
1634 +static int dpaa2_qdma_remove(struct fsl_mc_device *ls_dev)
1635 +{
1636 + struct device *dev;
1637 + struct dpaa2_qdma_priv *priv;
1638 + struct dpaa2_qdma_engine *dpaa2_qdma;
1639 +
1640 + dev = &ls_dev->dev;
1641 + priv = dev_get_drvdata(dev);
1642 + dpaa2_qdma = priv->dpaa2_qdma;
1643 +
1644 + dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle);
1645 + dpaa2_dpdmai_dpio_unbind(priv);
1646 + dpaa2_dpmai_store_free(priv);
1647 + dpaa2_dpdmai_dpio_free(priv);
1648 + dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
1649 + fsl_mc_portal_free(priv->mc_io);
1650 + dev_set_drvdata(dev, NULL);
1651 + dpaa2_dpdmai_free_channels(dpaa2_qdma);
1652 +
1653 + dma_async_device_unregister(&dpaa2_qdma->dma_dev);
1654 + kfree(priv);
1655 + kfree(dpaa2_qdma);
1656 +
1657 + return 0;
1658 +}
1659 +
1660 +static const struct fsl_mc_device_id dpaa2_qdma_id_table[] = {
1661 + {
1662 + .vendor = FSL_MC_VENDOR_FREESCALE,
1663 + .obj_type = "dpdmai",
1664 + },
1665 + { .vendor = 0x0 }
1666 +};
1667 +
1668 +static struct fsl_mc_driver dpaa2_qdma_driver = {
1669 + .driver = {
1670 + .name = "dpaa2-qdma",
1671 + .owner = THIS_MODULE,
1672 + },
1673 + .probe = dpaa2_qdma_probe,
1674 + .remove = dpaa2_qdma_remove,
1675 + .match_id_table = dpaa2_qdma_id_table
1676 +};
1677 +
1678 +static int __init dpaa2_qdma_driver_init(void)
1679 +{
1680 + return fsl_mc_driver_register(&(dpaa2_qdma_driver));
1681 +}
1682 +late_initcall(dpaa2_qdma_driver_init);
1683 +
1684 +static void __exit fsl_qdma_exit(void)
1685 +{
1686 + fsl_mc_driver_unregister(&(dpaa2_qdma_driver));
1687 +}
1688 +module_exit(fsl_qdma_exit);
1689 +
1690 +MODULE_DESCRIPTION("NXP DPAA2 qDMA driver");
1691 +MODULE_LICENSE("Dual BSD/GPL");
1692 diff --git a/drivers/dma/dpaa2-qdma/dpaa2-qdma.h b/drivers/dma/dpaa2-qdma/dpaa2-qdma.h
1693 new file mode 100644
1694 index 00000000..71a00db8
1695 --- /dev/null
1696 +++ b/drivers/dma/dpaa2-qdma/dpaa2-qdma.h
1697 @@ -0,0 +1,262 @@
1698 +/* Copyright 2015 NXP Semiconductor Inc.
1699 + *
1700 + * Redistribution and use in source and binary forms, with or without
1701 + * modification, are permitted provided that the following conditions are met:
1702 + * * Redistributions of source code must retain the above copyright
1703 + * notice, this list of conditions and the following disclaimer.
1704 + * * Redistributions in binary form must reproduce the above copyright
1705 + * notice, this list of conditions and the following disclaimer in the
1706 + * documentation and/or other materials provided with the distribution.
1707 + * * Neither the name of NXP Semiconductor nor the
1708 + * names of its contributors may be used to endorse or promote products
1709 + * derived from this software without specific prior written permission.
1710 + *
1711 + *
1712 + * ALTERNATIVELY, this software may be distributed under the terms of the
1713 + * GNU General Public License ("GPL") as published by the Free Software
1714 + * Foundation, either version 2 of that License or (at your option) any
1715 + * later version.
1716 + *
1717 + * THIS SOFTWARE IS PROVIDED BY NXP Semiconductor ``AS IS'' AND ANY
1718 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1719 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1720 + * DISCLAIMED. IN NO EVENT SHALL NXP Semiconductor BE LIABLE FOR ANY
1721 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1722 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
1723 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
1724 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1725 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1726 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1727 + */
1728 +
1729 +#ifndef __DPAA2_QDMA_H
1730 +#define __DPAA2_QDMA_H
1731 +
1732 +#define LONG_FORMAT 1
1733 +
1734 +#define DPAA2_QDMA_STORE_SIZE 16
1735 +#define NUM_CH 8
1736 +#define NUM_SG_PER_BLK 16
1737 +
1738 +#define QDMA_DMR_OFFSET 0x0
1739 +#define QDMA_DQ_EN (0 << 30)
1740 +#define QDMA_DQ_DIS (1 << 30)
1741 +
1742 +#define QDMA_DSR_M_OFFSET 0x10004
1743 +
1744 +struct dpaa2_qdma_sd_d {
1745 + uint32_t rsv:32;
1746 + union {
1747 + struct {
1748 + uint32_t ssd:12; /* souce stride distance */
1749 + uint32_t sss:12; /* souce stride size */
1750 + uint32_t rsv1:8;
1751 + } sdf;
1752 + struct {
1753 + uint32_t dsd:12; /* Destination stride distance */
1754 + uint32_t dss:12; /* Destination stride size */
1755 + uint32_t rsv2:8;
1756 + } ddf;
1757 + } df;
1758 + uint32_t rbpcmd; /* Route-by-port command */
1759 + uint32_t cmd;
1760 +} __attribute__((__packed__));
1761 +/* Source descriptor command read transaction type for RBP=0:
1762 + coherent copy of cacheable memory */
1763 +#define QDMA_SD_CMD_RDTTYPE_COHERENT (0xb << 28)
1764 +/* Destination descriptor command write transaction type for RBP=0:
1765 + coherent copy of cacheable memory */
1766 +#define QDMA_DD_CMD_WRTTYPE_COHERENT (0x6 << 28)
1767 +
1768 +#define QDMA_SG_FMT_SDB 0x0 /* single data buffer */
1769 +#define QDMA_SG_FMT_FDS 0x1 /* frame data section */
1770 +#define QDMA_SG_FMT_SGTE 0x2 /* SGT extension */
1771 +#define QDMA_SG_SL_SHORT 0x1 /* short length */
1772 +#define QDMA_SG_SL_LONG 0x0 /* short length */
1773 +#define QDMA_SG_F 0x1 /* last sg entry */
1774 +struct dpaa2_qdma_sg {
1775 + uint32_t addr_lo; /* address 0:31 */
1776 + uint32_t addr_hi:17; /* address 32:48 */
1777 + uint32_t rsv:15;
1778 + union {
1779 + uint32_t data_len_sl0; /* SL=0, the long format */
1780 + struct {
1781 + uint32_t len:17; /* SL=1, the short format */
1782 + uint32_t reserve:3;
1783 + uint32_t sf:1;
1784 + uint32_t sr:1;
1785 + uint32_t size:10; /* buff size */
1786 + } data_len_sl1;
1787 + } data_len; /* AVAIL_LENGTH */
1788 + struct {
1789 + uint32_t bpid:14;
1790 + uint32_t ivp:1;
1791 + uint32_t mbt:1;
1792 + uint32_t offset:12;
1793 + uint32_t fmt:2;
1794 + uint32_t sl:1;
1795 + uint32_t f:1;
1796 + } ctrl;
1797 +} __attribute__((__packed__));
1798 +
1799 +#define QMAN_FD_FMT_ENABLE (1 << 12) /* frame list table enable */
1800 +#define QMAN_FD_BMT_ENABLE (1 << 15) /* bypass memory translation */
1801 +#define QMAN_FD_BMT_DISABLE (0 << 15) /* bypass memory translation */
1802 +#define QMAN_FD_SL_DISABLE (0 << 14) /* short lengthe disabled */
1803 +#define QMAN_FD_SL_ENABLE (1 << 14) /* short lengthe enabled */
1804 +
1805 +#define QDMA_SB_FRAME (0 << 28) /* single frame */
1806 +#define QDMA_SG_FRAME (2 << 28) /* scatter gather frames */
1807 +#define QDMA_FINAL_BIT_DISABLE (0 << 31) /* final bit disable */
1808 +#define QDMA_FINAL_BIT_ENABLE (1 << 31) /* final bit enable */
1809 +
1810 +#define QDMA_FD_SHORT_FORMAT (1 << 11) /* short format */
1811 +#define QDMA_FD_LONG_FORMAT (0 << 11) /* long format */
1812 +#define QDMA_SER_DISABLE (0 << 8) /* no notification */
1813 +#define QDMA_SER_CTX (1 << 8) /* notification by FQD_CTX[fqid] */
1814 +#define QDMA_SER_DEST (2 << 8) /* notification by destination desc */
1815 +#define QDMA_SER_BOTH (3 << 8) /* soruce and dest notification */
1816 +#define QDMA_FD_SPF_ENALBE (1 << 30) /* source prefetch enable */
1817 +
1818 +#define QMAN_FD_VA_ENABLE (1 << 14) /* Address used is virtual address */
1819 +#define QMAN_FD_VA_DISABLE (0 << 14)/* Address used is a real address */
1820 +#define QMAN_FD_CBMT_ENABLE (1 << 15) /* Flow Context: 49bit physical address */
1821 +#define QMAN_FD_CBMT_DISABLE (0 << 15) /* Flow Context: 64bit virtual address */
1822 +#define QMAN_FD_SC_DISABLE (0 << 27) /* stashing control */
1823 +
1824 +#define QDMA_FL_FMT_SBF 0x0 /* Single buffer frame */
1825 +#define QDMA_FL_FMT_SGE 0x2 /* Scatter gather frame */
1826 +#define QDMA_FL_BMT_ENABLE 0x1 /* enable bypass memory translation */
1827 +#define QDMA_FL_BMT_DISABLE 0x0 /* enable bypass memory translation */
1828 +#define QDMA_FL_SL_LONG 0x0 /* long length */
1829 +#define QDMA_FL_SL_SHORT 0x1 /* short length */
1830 +#define QDMA_FL_F 0x1 /* last frame list bit */
1831 +/*Description of Frame list table structure*/
1832 +struct dpaa2_frame_list {
1833 + uint32_t addr_lo; /* lower 32 bits of address */
1834 + uint32_t addr_hi:17; /* upper 17 bits of address */
1835 + uint32_t resrvd:15;
1836 + union {
1837 + uint32_t data_len_sl0; /* If SL=0, then data length is 32 */
1838 + struct {
1839 + uint32_t data_len:18; /* IF SL=1; length is 18bit */
1840 + uint32_t resrvd:2;
1841 + uint32_t mem:12; /* Valid only when SL=1 */
1842 + } data_len_sl1;
1843 + } data_len;
1844 + /* word 4 */
1845 + uint32_t bpid:14; /* Frame buffer pool ID */
1846 + uint32_t ivp:1; /* Invalid Pool ID. */
1847 + uint32_t bmt:1; /* Bypass Memory Translation */
1848 + uint32_t offset:12; /* Frame offset */
1849 + uint32_t fmt:2; /* Frame Format */
1850 + uint32_t sl:1; /* Short Length */
1851 + uint32_t f:1; /* Final bit */
1852 +
1853 + uint32_t frc; /* Frame Context */
1854 + /* word 6 */
1855 + uint32_t err:8; /* Frame errors */
1856 + uint32_t resrvd0:8;
1857 + uint32_t asal:4; /* accelerator-specific annotation length */
1858 + uint32_t resrvd1:1;
1859 + uint32_t ptv2:1;
1860 + uint32_t ptv1:1;
1861 + uint32_t pta:1; /* pass-through annotation */
1862 + uint32_t resrvd2:8;
1863 +
1864 + uint32_t flc_lo; /* lower 32 bits fo flow context */
1865 + uint32_t flc_hi; /* higher 32 bits fo flow context */
1866 +} __attribute__((__packed__));
1867 +
1868 +struct dpaa2_qdma_chan {
1869 + struct virt_dma_chan vchan;
1870 + struct virt_dma_desc vdesc;
1871 + enum dma_status status;
1872 + struct dpaa2_qdma_engine *qdma;
1873 +
1874 + struct mutex dpaa2_queue_mutex;
1875 + spinlock_t queue_lock;
1876 + struct dma_pool *fd_pool;
1877 + struct dma_pool *sg_blk_pool;
1878 +
1879 + struct list_head comp_used;
1880 + struct list_head comp_free;
1881 +
1882 + struct list_head sgb_free;
1883 +};
1884 +
1885 +struct qdma_sg_blk {
1886 + dma_addr_t blk_bus_addr;
1887 + void *blk_virt_addr;
1888 + struct list_head list;
1889 +};
1890 +
1891 +struct dpaa2_qdma_comp {
1892 + dma_addr_t fd_bus_addr;
1893 + dma_addr_t fl_bus_addr;
1894 + dma_addr_t desc_bus_addr;
1895 + dma_addr_t sge_src_bus_addr;
1896 + dma_addr_t sge_dst_bus_addr;
1897 + void *fd_virt_addr;
1898 + void *fl_virt_addr;
1899 + void *desc_virt_addr;
1900 + void *sg_src_virt_addr;
1901 + void *sg_dst_virt_addr;
1902 + struct qdma_sg_blk *sg_blk;
1903 + uint32_t sg_blk_num;
1904 + struct list_head sg_src_head;
1905 + struct list_head sg_dst_head;
1906 + struct dpaa2_qdma_chan *qchan;
1907 + struct virt_dma_desc vdesc;
1908 + struct list_head list;
1909 +};
1910 +
1911 +struct dpaa2_qdma_engine {
1912 + struct dma_device dma_dev;
1913 + u32 n_chans;
1914 + struct dpaa2_qdma_chan chans[NUM_CH];
1915 +
1916 + struct dpaa2_qdma_priv *priv;
1917 +};
1918 +
1919 +/*
1920 + * dpaa2_qdma_priv - driver private data
1921 + */
1922 +struct dpaa2_qdma_priv {
1923 + int dpqdma_id;
1924 +
1925 + struct iommu_domain *iommu_domain;
1926 + struct dpdmai_attr dpdmai_attr;
1927 + struct device *dev;
1928 + struct fsl_mc_io *mc_io;
1929 + struct fsl_mc_device *dpdmai_dev;
1930 +
1931 + struct dpdmai_rx_queue_attr rx_queue_attr[DPDMAI_PRIO_NUM];
1932 + struct dpdmai_tx_queue_attr tx_queue_attr[DPDMAI_PRIO_NUM];
1933 +
1934 + uint8_t num_pairs;
1935 +
1936 + struct dpaa2_qdma_engine *dpaa2_qdma;
1937 + struct dpaa2_qdma_priv_per_prio *ppriv;
1938 +};
1939 +
1940 +struct dpaa2_qdma_priv_per_prio {
1941 + int req_fqid;
1942 + int rsp_fqid;
1943 + int prio;
1944 +
1945 + struct dpaa2_io_store *store;
1946 + struct dpaa2_io_notification_ctx nctx;
1947 +
1948 + struct dpaa2_qdma_priv *priv;
1949 +};
1950 +
1951 +/* FD pool size: one FD + 3 Frame list + 2 source/destination descriptor */
1952 +#define FD_POOL_SIZE (sizeof(struct dpaa2_fd) + \
1953 + sizeof(struct dpaa2_frame_list) * 3 + \
1954 + sizeof(struct dpaa2_qdma_sd_d) * 2)
1955 +
1956 +/* qdma_sg_blk + 16 SGs */
1957 +#define SG_POOL_SIZE (sizeof(struct qdma_sg_blk) +\
1958 + sizeof(struct dpaa2_qdma_sg) * NUM_SG_PER_BLK)
1959 +#endif /* __DPAA2_QDMA_H */
1960 diff --git a/drivers/dma/dpaa2-qdma/dpdmai.c b/drivers/dma/dpaa2-qdma/dpdmai.c
1961 new file mode 100644
1962 index 00000000..ad13fc1e
1963 --- /dev/null
1964 +++ b/drivers/dma/dpaa2-qdma/dpdmai.c
1965 @@ -0,0 +1,454 @@
1966 +/* Copyright 2013-2015 Freescale Semiconductor Inc.
1967 + *
1968 + * Redistribution and use in source and binary forms, with or without
1969 + * modification, are permitted provided that the following conditions are met:
1970 + * * Redistributions of source code must retain the above copyright
1971 + * notice, this list of conditions and the following disclaimer.
1972 + * * Redistributions in binary form must reproduce the above copyright
1973 + * notice, this list of conditions and the following disclaimer in the
1974 + * documentation and/or other materials provided with the distribution.
1975 + * * Neither the name of the above-listed copyright holders nor the
1976 + * names of any contributors may be used to endorse or promote products
1977 + * derived from this software without specific prior written permission.
1978 + *
1979 + *
1980 + * ALTERNATIVELY, this software may be distributed under the terms of the
1981 + * GNU General Public License ("GPL") as published by the Free Software
1982 + * Foundation, either version 2 of that License or (at your option) any
1983 + * later version.
1984 + *
1985 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
1986 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1987 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1988 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
1989 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
1990 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
1991 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
1992 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
1993 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
1994 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
1995 + * POSSIBILITY OF SUCH DAMAGE.
1996 + */
1997 +#include <linux/types.h>
1998 +#include <linux/io.h>
1999 +#include "fsl_dpdmai.h"
2000 +#include "fsl_dpdmai_cmd.h"
2001 +#include "../../../drivers/staging/fsl-mc/include/mc-sys.h"
2002 +#include "../../../drivers/staging/fsl-mc/include/mc-cmd.h"
2003 +
2004 +int dpdmai_open(struct fsl_mc_io *mc_io,
2005 + uint32_t cmd_flags,
2006 + int dpdmai_id,
2007 + uint16_t *token)
2008 +{
2009 + struct mc_command cmd = { 0 };
2010 + int err;
2011 +
2012 + /* prepare command */
2013 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_OPEN,
2014 + cmd_flags,
2015 + 0);
2016 + DPDMAI_CMD_OPEN(cmd, dpdmai_id);
2017 +
2018 + /* send command to mc*/
2019 + err = mc_send_command(mc_io, &cmd);
2020 + if (err)
2021 + return err;
2022 +
2023 + /* retrieve response parameters */
2024 + *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
2025 +
2026 + return 0;
2027 +}
2028 +
2029 +int dpdmai_close(struct fsl_mc_io *mc_io,
2030 + uint32_t cmd_flags,
2031 + uint16_t token)
2032 +{
2033 + struct mc_command cmd = { 0 };
2034 +
2035 + /* prepare command */
2036 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLOSE,
2037 + cmd_flags, token);
2038 +
2039 + /* send command to mc*/
2040 + return mc_send_command(mc_io, &cmd);
2041 +}
2042 +
2043 +int dpdmai_create(struct fsl_mc_io *mc_io,
2044 + uint32_t cmd_flags,
2045 + const struct dpdmai_cfg *cfg,
2046 + uint16_t *token)
2047 +{
2048 + struct mc_command cmd = { 0 };
2049 + int err;
2050 +
2051 + /* prepare command */
2052 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CREATE,
2053 + cmd_flags,
2054 + 0);
2055 + DPDMAI_CMD_CREATE(cmd, cfg);
2056 +
2057 + /* send command to mc*/
2058 + err = mc_send_command(mc_io, &cmd);
2059 + if (err)
2060 + return err;
2061 +
2062 + /* retrieve response parameters */
2063 + *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
2064 +
2065 + return 0;
2066 +}
2067 +
2068 +int dpdmai_destroy(struct fsl_mc_io *mc_io,
2069 + uint32_t cmd_flags,
2070 + uint16_t token)
2071 +{
2072 + struct mc_command cmd = { 0 };
2073 +
2074 + /* prepare command */
2075 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DESTROY,
2076 + cmd_flags,
2077 + token);
2078 +
2079 + /* send command to mc*/
2080 + return mc_send_command(mc_io, &cmd);
2081 +}
2082 +
2083 +int dpdmai_enable(struct fsl_mc_io *mc_io,
2084 + uint32_t cmd_flags,
2085 + uint16_t token)
2086 +{
2087 + struct mc_command cmd = { 0 };
2088 +
2089 + /* prepare command */
2090 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_ENABLE,
2091 + cmd_flags,
2092 + token);
2093 +
2094 + /* send command to mc*/
2095 + return mc_send_command(mc_io, &cmd);
2096 +}
2097 +
2098 +int dpdmai_disable(struct fsl_mc_io *mc_io,
2099 + uint32_t cmd_flags,
2100 + uint16_t token)
2101 +{
2102 + struct mc_command cmd = { 0 };
2103 +
2104 + /* prepare command */
2105 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DISABLE,
2106 + cmd_flags,
2107 + token);
2108 +
2109 + /* send command to mc*/
2110 + return mc_send_command(mc_io, &cmd);
2111 +}
2112 +
2113 +int dpdmai_is_enabled(struct fsl_mc_io *mc_io,
2114 + uint32_t cmd_flags,
2115 + uint16_t token,
2116 + int *en)
2117 +{
2118 + struct mc_command cmd = { 0 };
2119 + int err;
2120 + /* prepare command */
2121 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_IS_ENABLED,
2122 + cmd_flags,
2123 + token);
2124 +
2125 + /* send command to mc*/
2126 + err = mc_send_command(mc_io, &cmd);
2127 + if (err)
2128 + return err;
2129 +
2130 + /* retrieve response parameters */
2131 + DPDMAI_RSP_IS_ENABLED(cmd, *en);
2132 +
2133 + return 0;
2134 +}
2135 +
2136 +int dpdmai_reset(struct fsl_mc_io *mc_io,
2137 + uint32_t cmd_flags,
2138 + uint16_t token)
2139 +{
2140 + struct mc_command cmd = { 0 };
2141 +
2142 + /* prepare command */
2143 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_RESET,
2144 + cmd_flags,
2145 + token);
2146 +
2147 + /* send command to mc*/
2148 + return mc_send_command(mc_io, &cmd);
2149 +}
2150 +
2151 +int dpdmai_get_irq(struct fsl_mc_io *mc_io,
2152 + uint32_t cmd_flags,
2153 + uint16_t token,
2154 + uint8_t irq_index,
2155 + int *type,
2156 + struct dpdmai_irq_cfg *irq_cfg)
2157 +{
2158 + struct mc_command cmd = { 0 };
2159 + int err;
2160 +
2161 + /* prepare command */
2162 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ,
2163 + cmd_flags,
2164 + token);
2165 + DPDMAI_CMD_GET_IRQ(cmd, irq_index);
2166 +
2167 + /* send command to mc*/
2168 + err = mc_send_command(mc_io, &cmd);
2169 + if (err)
2170 + return err;
2171 +
2172 + /* retrieve response parameters */
2173 + DPDMAI_RSP_GET_IRQ(cmd, *type, irq_cfg);
2174 +
2175 + return 0;
2176 +}
2177 +
2178 +int dpdmai_set_irq(struct fsl_mc_io *mc_io,
2179 + uint32_t cmd_flags,
2180 + uint16_t token,
2181 + uint8_t irq_index,
2182 + struct dpdmai_irq_cfg *irq_cfg)
2183 +{
2184 + struct mc_command cmd = { 0 };
2185 +
2186 + /* prepare command */
2187 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ,
2188 + cmd_flags,
2189 + token);
2190 + DPDMAI_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
2191 +
2192 + /* send command to mc*/
2193 + return mc_send_command(mc_io, &cmd);
2194 +}
2195 +
2196 +int dpdmai_get_irq_enable(struct fsl_mc_io *mc_io,
2197 + uint32_t cmd_flags,
2198 + uint16_t token,
2199 + uint8_t irq_index,
2200 + uint8_t *en)
2201 +{
2202 + struct mc_command cmd = { 0 };
2203 + int err;
2204 +
2205 + /* prepare command */
2206 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_ENABLE,
2207 + cmd_flags,
2208 + token);
2209 + DPDMAI_CMD_GET_IRQ_ENABLE(cmd, irq_index);
2210 +
2211 + /* send command to mc*/
2212 + err = mc_send_command(mc_io, &cmd);
2213 + if (err)
2214 + return err;
2215 +
2216 + /* retrieve response parameters */
2217 + DPDMAI_RSP_GET_IRQ_ENABLE(cmd, *en);
2218 +
2219 + return 0;
2220 +}
2221 +
2222 +int dpdmai_set_irq_enable(struct fsl_mc_io *mc_io,
2223 + uint32_t cmd_flags,
2224 + uint16_t token,
2225 + uint8_t irq_index,
2226 + uint8_t en)
2227 +{
2228 + struct mc_command cmd = { 0 };
2229 +
2230 + /* prepare command */
2231 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ_ENABLE,
2232 + cmd_flags,
2233 + token);
2234 + DPDMAI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
2235 +
2236 + /* send command to mc*/
2237 + return mc_send_command(mc_io, &cmd);
2238 +}
2239 +
2240 +int dpdmai_get_irq_mask(struct fsl_mc_io *mc_io,
2241 + uint32_t cmd_flags,
2242 + uint16_t token,
2243 + uint8_t irq_index,
2244 + uint32_t *mask)
2245 +{
2246 + struct mc_command cmd = { 0 };
2247 + int err;
2248 +
2249 + /* prepare command */
2250 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_MASK,
2251 + cmd_flags,
2252 + token);
2253 + DPDMAI_CMD_GET_IRQ_MASK(cmd, irq_index);
2254 +
2255 + /* send command to mc*/
2256 + err = mc_send_command(mc_io, &cmd);
2257 + if (err)
2258 + return err;
2259 +
2260 + /* retrieve response parameters */
2261 + DPDMAI_RSP_GET_IRQ_MASK(cmd, *mask);
2262 +
2263 + return 0;
2264 +}
2265 +
2266 +int dpdmai_set_irq_mask(struct fsl_mc_io *mc_io,
2267 + uint32_t cmd_flags,
2268 + uint16_t token,
2269 + uint8_t irq_index,
2270 + uint32_t mask)
2271 +{
2272 + struct mc_command cmd = { 0 };
2273 +
2274 + /* prepare command */
2275 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ_MASK,
2276 + cmd_flags,
2277 + token);
2278 + DPDMAI_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
2279 +
2280 + /* send command to mc*/
2281 + return mc_send_command(mc_io, &cmd);
2282 +}
2283 +
2284 +int dpdmai_get_irq_status(struct fsl_mc_io *mc_io,
2285 + uint32_t cmd_flags,
2286 + uint16_t token,
2287 + uint8_t irq_index,
2288 + uint32_t *status)
2289 +{
2290 + struct mc_command cmd = { 0 };
2291 + int err;
2292 +
2293 + /* prepare command */
2294 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_STATUS,
2295 + cmd_flags,
2296 + token);
2297 + DPDMAI_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
2298 +
2299 + /* send command to mc*/
2300 + err = mc_send_command(mc_io, &cmd);
2301 + if (err)
2302 + return err;
2303 +
2304 + /* retrieve response parameters */
2305 + DPDMAI_RSP_GET_IRQ_STATUS(cmd, *status);
2306 +
2307 + return 0;
2308 +}
2309 +
2310 +int dpdmai_clear_irq_status(struct fsl_mc_io *mc_io,
2311 + uint32_t cmd_flags,
2312 + uint16_t token,
2313 + uint8_t irq_index,
2314 + uint32_t status)
2315 +{
2316 + struct mc_command cmd = { 0 };
2317 +
2318 + /* prepare command */
2319 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLEAR_IRQ_STATUS,
2320 + cmd_flags,
2321 + token);
2322 + DPDMAI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
2323 +
2324 + /* send command to mc*/
2325 + return mc_send_command(mc_io, &cmd);
2326 +}
2327 +
2328 +int dpdmai_get_attributes(struct fsl_mc_io *mc_io,
2329 + uint32_t cmd_flags,
2330 + uint16_t token,
2331 + struct dpdmai_attr *attr)
2332 +{
2333 + struct mc_command cmd = { 0 };
2334 + int err;
2335 +
2336 + /* prepare command */
2337 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_ATTR,
2338 + cmd_flags,
2339 + token);
2340 +
2341 + /* send command to mc*/
2342 + err = mc_send_command(mc_io, &cmd);
2343 + if (err)
2344 + return err;
2345 +
2346 + /* retrieve response parameters */
2347 + DPDMAI_RSP_GET_ATTR(cmd, attr);
2348 +
2349 + return 0;
2350 +}
2351 +
2352 +int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io,
2353 + uint32_t cmd_flags,
2354 + uint16_t token,
2355 + uint8_t priority,
2356 + const struct dpdmai_rx_queue_cfg *cfg)
2357 +{
2358 + struct mc_command cmd = { 0 };
2359 +
2360 + /* prepare command */
2361 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_RX_QUEUE,
2362 + cmd_flags,
2363 + token);
2364 + DPDMAI_CMD_SET_RX_QUEUE(cmd, priority, cfg);
2365 +
2366 + /* send command to mc*/
2367 + return mc_send_command(mc_io, &cmd);
2368 +}
2369 +
2370 +int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io,
2371 + uint32_t cmd_flags,
2372 + uint16_t token,
2373 + uint8_t priority, struct dpdmai_rx_queue_attr *attr)
2374 +{
2375 + struct mc_command cmd = { 0 };
2376 + int err;
2377 +
2378 + /* prepare command */
2379 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_RX_QUEUE,
2380 + cmd_flags,
2381 + token);
2382 + DPDMAI_CMD_GET_RX_QUEUE(cmd, priority);
2383 +
2384 + /* send command to mc*/
2385 + err = mc_send_command(mc_io, &cmd);
2386 + if (err)
2387 + return err;
2388 +
2389 + /* retrieve response parameters */
2390 + DPDMAI_RSP_GET_RX_QUEUE(cmd, attr);
2391 +
2392 + return 0;
2393 +}
2394 +
2395 +int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io,
2396 + uint32_t cmd_flags,
2397 + uint16_t token,
2398 + uint8_t priority,
2399 + struct dpdmai_tx_queue_attr *attr)
2400 +{
2401 + struct mc_command cmd = { 0 };
2402 + int err;
2403 +
2404 + /* prepare command */
2405 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_TX_QUEUE,
2406 + cmd_flags,
2407 + token);
2408 + DPDMAI_CMD_GET_TX_QUEUE(cmd, priority);
2409 +
2410 + /* send command to mc*/
2411 + err = mc_send_command(mc_io, &cmd);
2412 + if (err)
2413 + return err;
2414 +
2415 + /* retrieve response parameters */
2416 + DPDMAI_RSP_GET_TX_QUEUE(cmd, attr);
2417 +
2418 + return 0;
2419 +}
2420 diff --git a/drivers/dma/dpaa2-qdma/fsl_dpdmai.h b/drivers/dma/dpaa2-qdma/fsl_dpdmai.h
2421 new file mode 100644
2422 index 00000000..e931ce16
2423 --- /dev/null
2424 +++ b/drivers/dma/dpaa2-qdma/fsl_dpdmai.h
2425 @@ -0,0 +1,521 @@
2426 +/* Copyright 2013-2015 Freescale Semiconductor Inc.
2427 + *
2428 + * Redistribution and use in source and binary forms, with or without
2429 + * modification, are permitted provided that the following conditions are met:
2430 + * * Redistributions of source code must retain the above copyright
2431 + * notice, this list of conditions and the following disclaimer.
2432 + * * Redistributions in binary form must reproduce the above copyright
2433 + * notice, this list of conditions and the following disclaimer in the
2434 + * documentation and/or other materials provided with the distribution.
2435 + * * Neither the name of the above-listed copyright holders nor the
2436 + * names of any contributors may be used to endorse or promote products
2437 + * derived from this software without specific prior written permission.
2438 + *
2439 + *
2440 + * ALTERNATIVELY, this software may be distributed under the terms of the
2441 + * GNU General Public License ("GPL") as published by the Free Software
2442 + * Foundation, either version 2 of that License or (at your option) any
2443 + * later version.
2444 + *
2445 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
2446 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2447 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2448 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
2449 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2450 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
2451 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
2452 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
2453 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
2454 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
2455 + * POSSIBILITY OF SUCH DAMAGE.
2456 + */
2457 +#ifndef __FSL_DPDMAI_H
2458 +#define __FSL_DPDMAI_H
2459 +
2460 +struct fsl_mc_io;
2461 +
2462 +/* Data Path DMA Interface API
2463 + * Contains initialization APIs and runtime control APIs for DPDMAI
2464 + */
2465 +
2466 +/* General DPDMAI macros */
2467 +
2468 +/**
2469 + * Maximum number of Tx/Rx priorities per DPDMAI object
2470 + */
2471 +#define DPDMAI_PRIO_NUM 2
2472 +
2473 +/**
2474 + * All queues considered; see dpdmai_set_rx_queue()
2475 + */
2476 +#define DPDMAI_ALL_QUEUES (uint8_t)(-1)
2477 +
2478 +/**
2479 + * dpdmai_open() - Open a control session for the specified object
2480 + * @mc_io: Pointer to MC portal's I/O object
2481 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2482 + * @dpdmai_id: DPDMAI unique ID
2483 + * @token: Returned token; use in subsequent API calls
2484 + *
2485 + * This function can be used to open a control session for an
2486 + * already created object; an object may have been declared in
2487 + * the DPL or by calling the dpdmai_create() function.
2488 + * This function returns a unique authentication token,
2489 + * associated with the specific object ID and the specific MC
2490 + * portal; this token must be used in all subsequent commands for
2491 + * this specific object.
2492 + *
2493 + * Return: '0' on Success; Error code otherwise.
2494 + */
2495 +int dpdmai_open(struct fsl_mc_io *mc_io,
2496 + uint32_t cmd_flags,
2497 + int dpdmai_id,
2498 + uint16_t *token);
2499 +
2500 +/**
2501 + * dpdmai_close() - Close the control session of the object
2502 + * @mc_io: Pointer to MC portal's I/O object
2503 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2504 + * @token: Token of DPDMAI object
2505 + *
2506 + * After this function is called, no further operations are
2507 + * allowed on the object without opening a new control session.
2508 + *
2509 + * Return: '0' on Success; Error code otherwise.
2510 + */
2511 +int dpdmai_close(struct fsl_mc_io *mc_io,
2512 + uint32_t cmd_flags,
2513 + uint16_t token);
2514 +
2515 +/**
2516 + * struct dpdmai_cfg - Structure representing DPDMAI configuration
2517 + * @priorities: Priorities for the DMA hardware processing; valid priorities are
2518 + * configured with values 1-8; the entry following last valid entry
2519 + * should be configured with 0
2520 + */
2521 +struct dpdmai_cfg {
2522 + uint8_t priorities[DPDMAI_PRIO_NUM];
2523 +};
2524 +
2525 +/**
2526 + * dpdmai_create() - Create the DPDMAI object
2527 + * @mc_io: Pointer to MC portal's I/O object
2528 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2529 + * @cfg: Configuration structure
2530 + * @token: Returned token; use in subsequent API calls
2531 + *
2532 + * Create the DPDMAI object, allocate required resources and
2533 + * perform required initialization.
2534 + *
2535 + * The object can be created either by declaring it in the
2536 + * DPL file, or by calling this function.
2537 + *
2538 + * This function returns a unique authentication token,
2539 + * associated with the specific object ID and the specific MC
2540 + * portal; this token must be used in all subsequent calls to
2541 + * this specific object. For objects that are created using the
2542 + * DPL file, call dpdmai_open() function to get an authentication
2543 + * token first.
2544 + *
2545 + * Return: '0' on Success; Error code otherwise.
2546 + */
2547 +int dpdmai_create(struct fsl_mc_io *mc_io,
2548 + uint32_t cmd_flags,
2549 + const struct dpdmai_cfg *cfg,
2550 + uint16_t *token);
2551 +
2552 +/**
2553 + * dpdmai_destroy() - Destroy the DPDMAI object and release all its resources.
2554 + * @mc_io: Pointer to MC portal's I/O object
2555 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2556 + * @token: Token of DPDMAI object
2557 + *
2558 + * Return: '0' on Success; error code otherwise.
2559 + */
2560 +int dpdmai_destroy(struct fsl_mc_io *mc_io,
2561 + uint32_t cmd_flags,
2562 + uint16_t token);
2563 +
2564 +/**
2565 + * dpdmai_enable() - Enable the DPDMAI, allow sending and receiving frames.
2566 + * @mc_io: Pointer to MC portal's I/O object
2567 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2568 + * @token: Token of DPDMAI object
2569 + *
2570 + * Return: '0' on Success; Error code otherwise.
2571 + */
2572 +int dpdmai_enable(struct fsl_mc_io *mc_io,
2573 + uint32_t cmd_flags,
2574 + uint16_t token);
2575 +
2576 +/**
2577 + * dpdmai_disable() - Disable the DPDMAI, stop sending and receiving frames.
2578 + * @mc_io: Pointer to MC portal's I/O object
2579 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2580 + * @token: Token of DPDMAI object
2581 + *
2582 + * Return: '0' on Success; Error code otherwise.
2583 + */
2584 +int dpdmai_disable(struct fsl_mc_io *mc_io,
2585 + uint32_t cmd_flags,
2586 + uint16_t token);
2587 +
2588 +/**
2589 + * dpdmai_is_enabled() - Check if the DPDMAI is enabled.
2590 + * @mc_io: Pointer to MC portal's I/O object
2591 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2592 + * @token: Token of DPDMAI object
2593 + * @en: Returns '1' if object is enabled; '0' otherwise
2594 + *
2595 + * Return: '0' on Success; Error code otherwise.
2596 + */
2597 +int dpdmai_is_enabled(struct fsl_mc_io *mc_io,
2598 + uint32_t cmd_flags,
2599 + uint16_t token,
2600 + int *en);
2601 +
2602 +/**
2603 + * dpdmai_reset() - Reset the DPDMAI, returns the object to initial state.
2604 + * @mc_io: Pointer to MC portal's I/O object
2605 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2606 + * @token: Token of DPDMAI object
2607 + *
2608 + * Return: '0' on Success; Error code otherwise.
2609 + */
2610 +int dpdmai_reset(struct fsl_mc_io *mc_io,
2611 + uint32_t cmd_flags,
2612 + uint16_t token);
2613 +
2614 +/**
2615 + * struct dpdmai_irq_cfg - IRQ configuration
2616 + * @addr: Address that must be written to signal a message-based interrupt
2617 + * @val: Value to write into irq_addr address
2618 + * @irq_num: A user defined number associated with this IRQ
2619 + */
2620 +struct dpdmai_irq_cfg {
2621 + uint64_t addr;
2622 + uint32_t val;
2623 + int irq_num;
2624 +};
2625 +
2626 +/**
2627 + * dpdmai_set_irq() - Set IRQ information for the DPDMAI to trigger an interrupt.
2628 + * @mc_io: Pointer to MC portal's I/O object
2629 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2630 + * @token: Token of DPDMAI object
2631 + * @irq_index: Identifies the interrupt index to configure
2632 + * @irq_cfg: IRQ configuration
2633 + *
2634 + * Return: '0' on Success; Error code otherwise.
2635 + */
2636 +int dpdmai_set_irq(struct fsl_mc_io *mc_io,
2637 + uint32_t cmd_flags,
2638 + uint16_t token,
2639 + uint8_t irq_index,
2640 + struct dpdmai_irq_cfg *irq_cfg);
2641 +
2642 +/**
2643 + * dpdmai_get_irq() - Get IRQ information from the DPDMAI
2644 + *
2645 + * @mc_io: Pointer to MC portal's I/O object
2646 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2647 + * @token: Token of DPDMAI object
2648 + * @irq_index: The interrupt index to configure
2649 + * @type: Interrupt type: 0 represents message interrupt
2650 + * type (both irq_addr and irq_val are valid)
2651 + * @irq_cfg: IRQ attributes
2652 + *
2653 + * Return: '0' on Success; Error code otherwise.
2654 + */
2655 +int dpdmai_get_irq(struct fsl_mc_io *mc_io,
2656 + uint32_t cmd_flags,
2657 + uint16_t token,
2658 + uint8_t irq_index,
2659 + int *type,
2660 + struct dpdmai_irq_cfg *irq_cfg);
2661 +
2662 +/**
2663 + * dpdmai_set_irq_enable() - Set overall interrupt state.
2664 + * @mc_io: Pointer to MC portal's I/O object
2665 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2666 + * @token: Token of DPDMAI object
2667 + * @irq_index: The interrupt index to configure
2668 + * @en: Interrupt state - enable = 1, disable = 0
2669 + *
2670 + * Allows GPP software to control when interrupts are generated.
2671 + * Each interrupt can have up to 32 causes. The enable/disable control's the
2672 + * overall interrupt state. if the interrupt is disabled no causes will cause
2673 + * an interrupt
2674 + *
2675 + * Return: '0' on Success; Error code otherwise.
2676 + */
2677 +int dpdmai_set_irq_enable(struct fsl_mc_io *mc_io,
2678 + uint32_t cmd_flags,
2679 + uint16_t token,
2680 + uint8_t irq_index,
2681 + uint8_t en);
2682 +
2683 +/**
2684 + * dpdmai_get_irq_enable() - Get overall interrupt state
2685 + * @mc_io: Pointer to MC portal's I/O object
2686 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2687 + * @token: Token of DPDMAI object
2688 + * @irq_index: The interrupt index to configure
2689 + * @en: Returned Interrupt state - enable = 1, disable = 0
2690 + *
2691 + * Return: '0' on Success; Error code otherwise.
2692 + */
2693 +int dpdmai_get_irq_enable(struct fsl_mc_io *mc_io,
2694 + uint32_t cmd_flags,
2695 + uint16_t token,
2696 + uint8_t irq_index,
2697 + uint8_t *en);
2698 +
2699 +/**
2700 + * dpdmai_set_irq_mask() - Set interrupt mask.
2701 + * @mc_io: Pointer to MC portal's I/O object
2702 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2703 + * @token: Token of DPDMAI object
2704 + * @irq_index: The interrupt index to configure
2705 + * @mask: event mask to trigger interrupt;
2706 + * each bit:
2707 + * 0 = ignore event
2708 + * 1 = consider event for asserting IRQ
2709 + *
2710 + * Every interrupt can have up to 32 causes and the interrupt model supports
2711 + * masking/unmasking each cause independently
2712 + *
2713 + * Return: '0' on Success; Error code otherwise.
2714 + */
2715 +int dpdmai_set_irq_mask(struct fsl_mc_io *mc_io,
2716 + uint32_t cmd_flags,
2717 + uint16_t token,
2718 + uint8_t irq_index,
2719 + uint32_t mask);
2720 +
2721 +/**
2722 + * dpdmai_get_irq_mask() - Get interrupt mask.
2723 + * @mc_io: Pointer to MC portal's I/O object
2724 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2725 + * @token: Token of DPDMAI object
2726 + * @irq_index: The interrupt index to configure
2727 + * @mask: Returned event mask to trigger interrupt
2728 + *
2729 + * Every interrupt can have up to 32 causes and the interrupt model supports
2730 + * masking/unmasking each cause independently
2731 + *
2732 + * Return: '0' on Success; Error code otherwise.
2733 + */
2734 +int dpdmai_get_irq_mask(struct fsl_mc_io *mc_io,
2735 + uint32_t cmd_flags,
2736 + uint16_t token,
2737 + uint8_t irq_index,
2738 + uint32_t *mask);
2739 +
2740 +/**
2741 + * dpdmai_get_irq_status() - Get the current status of any pending interrupts
2742 + * @mc_io: Pointer to MC portal's I/O object
2743 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2744 + * @token: Token of DPDMAI object
2745 + * @irq_index: The interrupt index to configure
2746 + * @status: Returned interrupts status - one bit per cause:
2747 + * 0 = no interrupt pending
2748 + * 1 = interrupt pending
2749 + *
2750 + * Return: '0' on Success; Error code otherwise.
2751 + */
2752 +int dpdmai_get_irq_status(struct fsl_mc_io *mc_io,
2753 + uint32_t cmd_flags,
2754 + uint16_t token,
2755 + uint8_t irq_index,
2756 + uint32_t *status);
2757 +
2758 +/**
2759 + * dpdmai_clear_irq_status() - Clear a pending interrupt's status
2760 + * @mc_io: Pointer to MC portal's I/O object
2761 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2762 + * @token: Token of DPDMAI object
2763 + * @irq_index: The interrupt index to configure
2764 + * @status: bits to clear (W1C) - one bit per cause:
2765 + * 0 = don't change
2766 + * 1 = clear status bit
2767 + *
2768 + * Return: '0' on Success; Error code otherwise.
2769 + */
2770 +int dpdmai_clear_irq_status(struct fsl_mc_io *mc_io,
2771 + uint32_t cmd_flags,
2772 + uint16_t token,
2773 + uint8_t irq_index,
2774 + uint32_t status);
2775 +
2776 +/**
2777 + * struct dpdmai_attr - Structure representing DPDMAI attributes
2778 + * @id: DPDMAI object ID
2779 + * @version: DPDMAI version
2780 + * @num_of_priorities: number of priorities
2781 + */
2782 +struct dpdmai_attr {
2783 + int id;
2784 + /**
2785 + * struct version - DPDMAI version
2786 + * @major: DPDMAI major version
2787 + * @minor: DPDMAI minor version
2788 + */
2789 + struct {
2790 + uint16_t major;
2791 + uint16_t minor;
2792 + } version;
2793 + uint8_t num_of_priorities;
2794 +};
2795 +
2796 +/**
2797 + * dpdmai_get_attributes() - Retrieve DPDMAI attributes.
2798 + * @mc_io: Pointer to MC portal's I/O object
2799 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2800 + * @token: Token of DPDMAI object
2801 + * @attr: Returned object's attributes
2802 + *
2803 + * Return: '0' on Success; Error code otherwise.
2804 + */
2805 +int dpdmai_get_attributes(struct fsl_mc_io *mc_io,
2806 + uint32_t cmd_flags,
2807 + uint16_t token,
2808 + struct dpdmai_attr *attr);
2809 +
2810 +/**
2811 + * enum dpdmai_dest - DPDMAI destination types
2812 + * @DPDMAI_DEST_NONE: Unassigned destination; The queue is set in parked mode
2813 + * and does not generate FQDAN notifications; user is expected to dequeue
2814 + * from the queue based on polling or other user-defined method
2815 + * @DPDMAI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
2816 + * notifications to the specified DPIO; user is expected to dequeue
2817 + * from the queue only after notification is received
2818 + * @DPDMAI_DEST_DPCON: The queue is set in schedule mode and does not generate
2819 + * FQDAN notifications, but is connected to the specified DPCON object;
2820 + * user is expected to dequeue from the DPCON channel
2821 + */
2822 +enum dpdmai_dest {
2823 + DPDMAI_DEST_NONE = 0,
2824 + DPDMAI_DEST_DPIO = 1,
2825 + DPDMAI_DEST_DPCON = 2
2826 +};
2827 +
2828 +/**
2829 + * struct dpdmai_dest_cfg - Structure representing DPDMAI destination parameters
2830 + * @dest_type: Destination type
2831 + * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
2832 + * @priority: Priority selection within the DPIO or DPCON channel; valid values
2833 + * are 0-1 or 0-7, depending on the number of priorities in that
2834 + * channel; not relevant for 'DPDMAI_DEST_NONE' option
2835 + */
2836 +struct dpdmai_dest_cfg {
2837 + enum dpdmai_dest dest_type;
2838 + int dest_id;
2839 + uint8_t priority;
2840 +};
2841 +
2842 +/* DPDMAI queue modification options */
2843 +
2844 +/**
2845 + * Select to modify the user's context associated with the queue
2846 + */
2847 +#define DPDMAI_QUEUE_OPT_USER_CTX 0x00000001
2848 +
2849 +/**
2850 + * Select to modify the queue's destination
2851 + */
2852 +#define DPDMAI_QUEUE_OPT_DEST 0x00000002
2853 +
2854 +/**
2855 + * struct dpdmai_rx_queue_cfg - DPDMAI RX queue configuration
2856 + * @options: Flags representing the suggested modifications to the queue;
2857 + * Use any combination of 'DPDMAI_QUEUE_OPT_<X>' flags
2858 + * @user_ctx: User context value provided in the frame descriptor of each
2859 + * dequeued frame;
2860 + * valid only if 'DPDMAI_QUEUE_OPT_USER_CTX' is contained in 'options'
2861 + * @dest_cfg: Queue destination parameters;
2862 + * valid only if 'DPDMAI_QUEUE_OPT_DEST' is contained in 'options'
2863 + */
2864 +struct dpdmai_rx_queue_cfg {
2865 + uint32_t options;
2866 + uint64_t user_ctx;
2867 + struct dpdmai_dest_cfg dest_cfg;
2868 +
2869 +};
2870 +
2871 +/**
2872 + * dpdmai_set_rx_queue() - Set Rx queue configuration
2873 + * @mc_io: Pointer to MC portal's I/O object
2874 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2875 + * @token: Token of DPDMAI object
2876 + * @priority: Select the queue relative to number of
2877 + * priorities configured at DPDMAI creation; use
2878 + * DPDMAI_ALL_QUEUES to configure all Rx queues
2879 + * identically.
2880 + * @cfg: Rx queue configuration
2881 + *
2882 + * Return: '0' on Success; Error code otherwise.
2883 + */
2884 +int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io,
2885 + uint32_t cmd_flags,
2886 + uint16_t token,
2887 + uint8_t priority,
2888 + const struct dpdmai_rx_queue_cfg *cfg);
2889 +
2890 +/**
2891 + * struct dpdmai_rx_queue_attr - Structure representing attributes of Rx queues
2892 + * @user_ctx: User context value provided in the frame descriptor of each
2893 + * dequeued frame
2894 + * @dest_cfg: Queue destination configuration
2895 + * @fqid: Virtual FQID value to be used for dequeue operations
2896 + */
2897 +struct dpdmai_rx_queue_attr {
2898 + uint64_t user_ctx;
2899 + struct dpdmai_dest_cfg dest_cfg;
2900 + uint32_t fqid;
2901 +};
2902 +
2903 +/**
2904 + * dpdmai_get_rx_queue() - Retrieve Rx queue attributes.
2905 + * @mc_io: Pointer to MC portal's I/O object
2906 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2907 + * @token: Token of DPDMAI object
2908 + * @priority: Select the queue relative to number of
2909 + * priorities configured at DPDMAI creation
2910 + * @attr: Returned Rx queue attributes
2911 + *
2912 + * Return: '0' on Success; Error code otherwise.
2913 + */
2914 +int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io,
2915 + uint32_t cmd_flags,
2916 + uint16_t token,
2917 + uint8_t priority,
2918 + struct dpdmai_rx_queue_attr *attr);
2919 +
2920 +/**
2921 + * struct dpdmai_tx_queue_attr - Structure representing attributes of Tx queues
2922 + * @fqid: Virtual FQID to be used for sending frames to DMA hardware
2923 + */
2924 +
2925 +struct dpdmai_tx_queue_attr {
2926 + uint32_t fqid;
2927 +};
2928 +
2929 +/**
2930 + * dpdmai_get_tx_queue() - Retrieve Tx queue attributes.
2931 + * @mc_io: Pointer to MC portal's I/O object
2932 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2933 + * @token: Token of DPDMAI object
2934 + * @priority: Select the queue relative to number of
2935 + * priorities configured at DPDMAI creation
2936 + * @attr: Returned Tx queue attributes
2937 + *
2938 + * Return: '0' on Success; Error code otherwise.
2939 + */
2940 +int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io,
2941 + uint32_t cmd_flags,
2942 + uint16_t token,
2943 + uint8_t priority,
2944 + struct dpdmai_tx_queue_attr *attr);
2945 +
2946 +#endif /* __FSL_DPDMAI_H */
2947 diff --git a/drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h b/drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h
2948 new file mode 100644
2949 index 00000000..7d403c01
2950 --- /dev/null
2951 +++ b/drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h
2952 @@ -0,0 +1,222 @@
2953 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
2954 + *
2955 + * Redistribution and use in source and binary forms, with or without
2956 + * modification, are permitted provided that the following conditions are met:
2957 + * * Redistributions of source code must retain the above copyright
2958 + * notice, this list of conditions and the following disclaimer.
2959 + * * Redistributions in binary form must reproduce the above copyright
2960 + * notice, this list of conditions and the following disclaimer in the
2961 + * documentation and/or other materials provided with the distribution.
2962 + * * Neither the name of the above-listed copyright holders nor the
2963 + * names of any contributors may be used to endorse or promote products
2964 + * derived from this software without specific prior written permission.
2965 + *
2966 + *
2967 + * ALTERNATIVELY, this software may be distributed under the terms of the
2968 + * GNU General Public License ("GPL") as published by the Free Software
2969 + * Foundation, either version 2 of that License or (at your option) any
2970 + * later version.
2971 + *
2972 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
2973 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2974 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2975 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
2976 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2977 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
2978 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
2979 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
2980 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
2981 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
2982 + * POSSIBILITY OF SUCH DAMAGE.
2983 + */
2984 +#ifndef _FSL_DPDMAI_CMD_H
2985 +#define _FSL_DPDMAI_CMD_H
2986 +
2987 +/* DPDMAI Version */
2988 +#define DPDMAI_VER_MAJOR 2
2989 +#define DPDMAI_VER_MINOR 2
2990 +
2991 +#define DPDMAI_CMD_BASE_VERSION 0
2992 +#define DPDMAI_CMD_ID_OFFSET 4
2993 +
2994 +/* Command IDs */
2995 +#define DPDMAI_CMDID_CLOSE ((0x800 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2996 +#define DPDMAI_CMDID_OPEN ((0x80E << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2997 +#define DPDMAI_CMDID_CREATE ((0x90E << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2998 +#define DPDMAI_CMDID_DESTROY ((0x900 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2999 +
3000 +#define DPDMAI_CMDID_ENABLE ((0x002 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
3001 +#define DPDMAI_CMDID_DISABLE ((0x003 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
3002 +#define DPDMAI_CMDID_GET_ATTR ((0x004 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
3003 +#define DPDMAI_CMDID_RESET ((0x005 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
3004 +#define DPDMAI_CMDID_IS_ENABLED ((0x006 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
3005 +
3006 +#define DPDMAI_CMDID_SET_IRQ ((0x010 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
3007 +#define DPDMAI_CMDID_GET_IRQ ((0x011 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
3008 +#define DPDMAI_CMDID_SET_IRQ_ENABLE ((0x012 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
3009 +#define DPDMAI_CMDID_GET_IRQ_ENABLE ((0x013 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
3010 +#define DPDMAI_CMDID_SET_IRQ_MASK ((0x014 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
3011 +#define DPDMAI_CMDID_GET_IRQ_MASK ((0x015 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
3012 +#define DPDMAI_CMDID_GET_IRQ_STATUS ((0x016 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
3013 +#define DPDMAI_CMDID_CLEAR_IRQ_STATUS ((0x017 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
3014 +
3015 +#define DPDMAI_CMDID_SET_RX_QUEUE ((0x1A0 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
3016 +#define DPDMAI_CMDID_GET_RX_QUEUE ((0x1A1 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
3017 +#define DPDMAI_CMDID_GET_TX_QUEUE ((0x1A2 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
3018 +
3019 +
3020 +#define MC_CMD_HDR_TOKEN_O 32 /* Token field offset */
3021 +#define MC_CMD_HDR_TOKEN_S 16 /* Token field size */
3022 +
3023 +
3024 +#define MAKE_UMASK64(_width) \
3025 + ((uint64_t)((_width) < 64 ? ((uint64_t)1 << (_width)) - 1 : \
3026 + (uint64_t)-1))
3027 +
3028 +static inline uint64_t mc_enc(int lsoffset, int width, uint64_t val)
3029 +{
3030 + return (uint64_t)(((uint64_t)val & MAKE_UMASK64(width)) << lsoffset);
3031 +}
3032 +
3033 +static inline uint64_t mc_dec(uint64_t val, int lsoffset, int width)
3034 +{
3035 + return (uint64_t)((val >> lsoffset) & MAKE_UMASK64(width));
3036 +}
3037 +
3038 +#define MC_CMD_OP(_cmd, _param, _offset, _width, _type, _arg) \
3039 + ((_cmd).params[_param] |= mc_enc((_offset), (_width), _arg))
3040 +
3041 +#define MC_RSP_OP(_cmd, _param, _offset, _width, _type, _arg) \
3042 + (_arg = (_type)mc_dec(_cmd.params[_param], (_offset), (_width)))
3043 +
3044 +#define MC_CMD_HDR_READ_TOKEN(_hdr) \
3045 + ((uint16_t)mc_dec((_hdr), MC_CMD_HDR_TOKEN_O, MC_CMD_HDR_TOKEN_S))
3046 +
3047 +/* cmd, param, offset, width, type, arg_name */
3048 +#define DPDMAI_CMD_OPEN(cmd, dpdmai_id) \
3049 + MC_CMD_OP(cmd, 0, 0, 32, int, dpdmai_id)
3050 +
3051 +/* cmd, param, offset, width, type, arg_name */
3052 +#define DPDMAI_CMD_CREATE(cmd, cfg) \
3053 +do { \
3054 + MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->priorities[0]);\
3055 + MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->priorities[1]);\
3056 +} while (0)
3057 +
3058 +/* cmd, param, offset, width, type, arg_name */
3059 +#define DPDMAI_RSP_IS_ENABLED(cmd, en) \
3060 + MC_RSP_OP(cmd, 0, 0, 1, int, en)
3061 +
3062 +/* cmd, param, offset, width, type, arg_name */
3063 +#define DPDMAI_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
3064 +do { \
3065 + MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
3066 + MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
3067 + MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
3068 + MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
3069 +} while (0)
3070 +
3071 +/* cmd, param, offset, width, type, arg_name */
3072 +#define DPDMAI_CMD_GET_IRQ(cmd, irq_index) \
3073 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
3074 +
3075 +/* cmd, param, offset, width, type, arg_name */
3076 +#define DPDMAI_RSP_GET_IRQ(cmd, type, irq_cfg) \
3077 +do { \
3078 + MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
3079 + MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
3080 + MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
3081 + MC_RSP_OP(cmd, 2, 32, 32, int, type); \
3082 +} while (0)
3083 +
3084 +/* cmd, param, offset, width, type, arg_name */
3085 +#define DPDMAI_CMD_SET_IRQ_ENABLE(cmd, irq_index, enable_state) \
3086 +do { \
3087 + MC_CMD_OP(cmd, 0, 0, 8, uint8_t, enable_state); \
3088 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
3089 +} while (0)
3090 +
3091 +/* cmd, param, offset, width, type, arg_name */
3092 +#define DPDMAI_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
3093 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
3094 +
3095 +/* cmd, param, offset, width, type, arg_name */
3096 +#define DPDMAI_RSP_GET_IRQ_ENABLE(cmd, enable_state) \
3097 + MC_RSP_OP(cmd, 0, 0, 8, uint8_t, enable_state)
3098 +
3099 +/* cmd, param, offset, width, type, arg_name */
3100 +#define DPDMAI_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
3101 +do { \
3102 + MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \
3103 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
3104 +} while (0)
3105 +
3106 +/* cmd, param, offset, width, type, arg_name */
3107 +#define DPDMAI_CMD_GET_IRQ_MASK(cmd, irq_index) \
3108 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
3109 +
3110 +/* cmd, param, offset, width, type, arg_name */
3111 +#define DPDMAI_RSP_GET_IRQ_MASK(cmd, mask) \
3112 + MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
3113 +
3114 +/* cmd, param, offset, width, type, arg_name */
3115 +#define DPDMAI_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
3116 +do { \
3117 + MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
3118 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
3119 +} while (0)
3120 +
3121 +/* cmd, param, offset, width, type, arg_name */
3122 +#define DPDMAI_RSP_GET_IRQ_STATUS(cmd, status) \
3123 + MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
3124 +
3125 +/* cmd, param, offset, width, type, arg_name */
3126 +#define DPDMAI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
3127 +do { \
3128 + MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
3129 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
3130 +} while (0)
3131 +
3132 +/* cmd, param, offset, width, type, arg_name */
3133 +#define DPDMAI_RSP_GET_ATTR(cmd, attr) \
3134 +do { \
3135 + MC_RSP_OP(cmd, 0, 0, 32, int, attr->id); \
3136 + MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->num_of_priorities); \
3137 + MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\
3138 + MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\
3139 +} while (0)
3140 +
3141 +/* cmd, param, offset, width, type, arg_name */
3142 +#define DPDMAI_CMD_SET_RX_QUEUE(cmd, priority, cfg) \
3143 +do { \
3144 + MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \
3145 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority); \
3146 + MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority); \
3147 + MC_CMD_OP(cmd, 0, 48, 4, enum dpdmai_dest, cfg->dest_cfg.dest_type); \
3148 + MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \
3149 + MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\
3150 +} while (0)
3151 +
3152 +/* cmd, param, offset, width, type, arg_name */
3153 +#define DPDMAI_CMD_GET_RX_QUEUE(cmd, priority) \
3154 + MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority)
3155 +
3156 +/* cmd, param, offset, width, type, arg_name */
3157 +#define DPDMAI_RSP_GET_RX_QUEUE(cmd, attr) \
3158 +do { \
3159 + MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id);\
3160 + MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\
3161 + MC_RSP_OP(cmd, 0, 48, 4, enum dpdmai_dest, attr->dest_cfg.dest_type);\
3162 + MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->user_ctx);\
3163 + MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->fqid);\
3164 +} while (0)
3165 +
3166 +/* cmd, param, offset, width, type, arg_name */
3167 +#define DPDMAI_CMD_GET_TX_QUEUE(cmd, priority) \
3168 + MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority)
3169 +
3170 +/* cmd, param, offset, width, type, arg_name */
3171 +#define DPDMAI_RSP_GET_TX_QUEUE(cmd, attr) \
3172 + MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->fqid)
3173 +
3174 +#endif /* _FSL_DPDMAI_CMD_H */
3175 diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
3176 new file mode 100644
3177 index 00000000..6c4c2813
3178 --- /dev/null
3179 +++ b/drivers/dma/fsl-qdma.c
3180 @@ -0,0 +1,1201 @@
3181 +/*
3182 + * drivers/dma/fsl-qdma.c
3183 + *
3184 + * Copyright 2014-2015 Freescale Semiconductor, Inc.
3185 + *
3186 + * Driver for the Freescale qDMA engine with software command queue mode.
3187 + * Channel virtualization is supported through enqueuing of DMA jobs to,
3188 + * or dequeuing DMA jobs from, different work queues.
3189 + * This module can be found on Freescale LS SoCs.
3190 + *
3191 + * This program is free software; you can redistribute it and/or modify it
3192 + * under the terms of the GNU General Public License as published by the
3193 + * Free Software Foundation; either version 2 of the License, or (at your
3194 + * option) any later version.
3195 + */
3196 +
3197 +#include <asm/cacheflush.h>
3198 +#include <linux/clk.h>
3199 +#include <linux/delay.h>
3200 +#include <linux/dma-mapping.h>
3201 +#include <linux/dmapool.h>
3202 +#include <linux/init.h>
3203 +#include <linux/interrupt.h>
3204 +#include <linux/module.h>
3205 +#include <linux/of.h>
3206 +#include <linux/of_address.h>
3207 +#include <linux/of_device.h>
3208 +#include <linux/of_dma.h>
3209 +#include <linux/of_irq.h>
3210 +#include <linux/slab.h>
3211 +#include <linux/spinlock.h>
3212 +
3213 +#include "virt-dma.h"
3214 +
3215 +#define FSL_QDMA_DMR 0x0
3216 +#define FSL_QDMA_DSR 0x4
3217 +#define FSL_QDMA_DEIER 0xe00
3218 +#define FSL_QDMA_DEDR 0xe04
3219 +#define FSL_QDMA_DECFDW0R 0xe10
3220 +#define FSL_QDMA_DECFDW1R 0xe14
3221 +#define FSL_QDMA_DECFDW2R 0xe18
3222 +#define FSL_QDMA_DECFDW3R 0xe1c
3223 +#define FSL_QDMA_DECFQIDR 0xe30
3224 +#define FSL_QDMA_DECBR 0xe34
3225 +
3226 +#define FSL_QDMA_BCQMR(x) (0xc0 + 0x100 * (x))
3227 +#define FSL_QDMA_BCQSR(x) (0xc4 + 0x100 * (x))
3228 +#define FSL_QDMA_BCQEDPA_SADDR(x) (0xc8 + 0x100 * (x))
3229 +#define FSL_QDMA_BCQDPA_SADDR(x) (0xcc + 0x100 * (x))
3230 +#define FSL_QDMA_BCQEEPA_SADDR(x) (0xd0 + 0x100 * (x))
3231 +#define FSL_QDMA_BCQEPA_SADDR(x) (0xd4 + 0x100 * (x))
3232 +#define FSL_QDMA_BCQIER(x) (0xe0 + 0x100 * (x))
3233 +#define FSL_QDMA_BCQIDR(x) (0xe4 + 0x100 * (x))
3234 +
3235 +#define FSL_QDMA_SQDPAR 0x80c
3236 +#define FSL_QDMA_SQEPAR 0x814
3237 +#define FSL_QDMA_BSQMR 0x800
3238 +#define FSL_QDMA_BSQSR 0x804
3239 +#define FSL_QDMA_BSQICR 0x828
3240 +#define FSL_QDMA_CQMR 0xa00
3241 +#define FSL_QDMA_CQDSCR1 0xa08
3242 +#define FSL_QDMA_CQDSCR2 0xa0c
3243 +#define FSL_QDMA_CQIER 0xa10
3244 +#define FSL_QDMA_CQEDR 0xa14
3245 +#define FSL_QDMA_SQCCMR 0xa20
3246 +
3247 +#define FSL_QDMA_SQICR_ICEN
3248 +
3249 +#define FSL_QDMA_CQIDR_CQT 0xff000000
3250 +#define FSL_QDMA_CQIDR_SQPE 0x800000
3251 +#define FSL_QDMA_CQIDR_SQT 0x8000
3252 +
3253 +#define FSL_QDMA_BCQIER_CQTIE 0x8000
3254 +#define FSL_QDMA_BCQIER_CQPEIE 0x800000
3255 +#define FSL_QDMA_BSQICR_ICEN 0x80000000
3256 +#define FSL_QDMA_BSQICR_ICST(x) ((x) << 16)
3257 +#define FSL_QDMA_CQIER_MEIE 0x80000000
3258 +#define FSL_QDMA_CQIER_TEIE 0x1
3259 +#define FSL_QDMA_SQCCMR_ENTER_WM 0x200000
3260 +
3261 +#define FSL_QDMA_QUEUE_MAX 8
3262 +
3263 +#define FSL_QDMA_BCQMR_EN 0x80000000
3264 +#define FSL_QDMA_BCQMR_EI 0x40000000
3265 +#define FSL_QDMA_BCQMR_CD_THLD(x) ((x) << 20)
3266 +#define FSL_QDMA_BCQMR_CQ_SIZE(x) ((x) << 16)
3267 +
3268 +#define FSL_QDMA_BCQSR_QF 0x10000
3269 +#define FSL_QDMA_BCQSR_XOFF 0x1
3270 +
3271 +#define FSL_QDMA_BSQMR_EN 0x80000000
3272 +#define FSL_QDMA_BSQMR_DI 0x40000000
3273 +#define FSL_QDMA_BSQMR_CQ_SIZE(x) ((x) << 16)
3274 +
3275 +#define FSL_QDMA_BSQSR_QE 0x20000
3276 +
3277 +#define FSL_QDMA_DMR_DQD 0x40000000
3278 +#define FSL_QDMA_DSR_DB 0x80000000
3279 +
3280 +#define FSL_QDMA_BASE_BUFFER_SIZE 96
3281 +#define FSL_QDMA_EXPECT_SG_ENTRY_NUM 16
3282 +#define FSL_QDMA_CIRCULAR_DESC_SIZE_MIN 64
3283 +#define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX 16384
3284 +#define FSL_QDMA_QUEUE_NUM_MAX 8
3285 +
3286 +#define FSL_QDMA_CMD_RWTTYPE 0x4
3287 +#define FSL_QDMA_CMD_LWC 0x2
3288 +
3289 +#define FSL_QDMA_CMD_RWTTYPE_OFFSET 28
3290 +#define FSL_QDMA_CMD_NS_OFFSET 27
3291 +#define FSL_QDMA_CMD_DQOS_OFFSET 24
3292 +#define FSL_QDMA_CMD_WTHROTL_OFFSET 20
3293 +#define FSL_QDMA_CMD_DSEN_OFFSET 19
3294 +#define FSL_QDMA_CMD_LWC_OFFSET 16
3295 +
3296 +#define FSL_QDMA_E_SG_TABLE 1
3297 +#define FSL_QDMA_E_DATA_BUFFER 0
3298 +#define FSL_QDMA_F_LAST_ENTRY 1
3299 +
3300 +u64 pre_addr, pre_queue;
3301 +
3302 +struct fsl_qdma_ccdf {
3303 + u8 status;
3304 + u32 rev1:22;
3305 + u32 ser:1;
3306 + u32 rev2:1;
3307 + u32 rev3:20;
3308 + u32 offset:9;
3309 + u32 format:3;
3310 + union {
3311 + struct {
3312 + u32 addr_lo; /* low 32-bits of 40-bit address */
3313 + u32 addr_hi:8; /* high 8-bits of 40-bit address */
3314 + u32 rev4:16;
3315 + u32 queue:3;
3316 + u32 rev5:3;
3317 + u32 dd:2; /* dynamic debug */
3318 + };
3319 + struct {
3320 + u64 addr:40;
3321 + /* More efficient address accessor */
3322 + u64 __notaddress:24;
3323 + };
3324 + };
3325 +} __packed;
3326 +
3327 +struct fsl_qdma_csgf {
3328 + u32 offset:13;
3329 + u32 rev1:19;
3330 + u32 length:30;
3331 + u32 f:1;
3332 + u32 e:1;
3333 + union {
3334 + struct {
3335 + u32 addr_lo; /* low 32-bits of 40-bit address */
3336 + u32 addr_hi:8; /* high 8-bits of 40-bit address */
3337 + u32 rev2:24;
3338 + };
3339 + struct {
3340 + u64 addr:40;
3341 + /* More efficient address accessor */
3342 + u64 __notaddress:24;
3343 + };
3344 + };
3345 +} __packed;
3346 +
3347 +struct fsl_qdma_sdf {
3348 + u32 rev3:32;
3349 + u32 ssd:12; /* souce stride distance */
3350 + u32 sss:12; /* souce stride size */
3351 + u32 rev4:8;
3352 + u32 rev5:32;
3353 + u32 cmd;
3354 +} __packed;
3355 +
3356 +struct fsl_qdma_ddf {
3357 + u32 rev1:32;
3358 + u32 dsd:12; /* Destination stride distance */
3359 + u32 dss:12; /* Destination stride size */
3360 + u32 rev2:8;
3361 + u32 rev3:32;
3362 + u32 cmd;
3363 +} __packed;
3364 +
3365 +struct fsl_qdma_chan {
3366 + struct virt_dma_chan vchan;
3367 + struct virt_dma_desc vdesc;
3368 + enum dma_status status;
3369 + u32 slave_id;
3370 + struct fsl_qdma_engine *qdma;
3371 + struct fsl_qdma_queue *queue;
3372 + struct list_head qcomp;
3373 +};
3374 +
3375 +struct fsl_qdma_queue {
3376 + struct fsl_qdma_ccdf *virt_head;
3377 + struct fsl_qdma_ccdf *virt_tail;
3378 + struct list_head comp_used;
3379 + struct list_head comp_free;
3380 + struct dma_pool *comp_pool;
3381 + struct dma_pool *sg_pool;
3382 + spinlock_t queue_lock;
3383 + dma_addr_t bus_addr;
3384 + u32 n_cq;
3385 + u32 id;
3386 + struct fsl_qdma_ccdf *cq;
3387 +};
3388 +
3389 +struct fsl_qdma_sg {
3390 + dma_addr_t bus_addr;
3391 + void *virt_addr;
3392 +};
3393 +
3394 +struct fsl_qdma_comp {
3395 + dma_addr_t bus_addr;
3396 + void *virt_addr;
3397 + struct fsl_qdma_chan *qchan;
3398 + struct fsl_qdma_sg *sg_block;
3399 + struct virt_dma_desc vdesc;
3400 + struct list_head list;
3401 + u32 sg_block_src;
3402 + u32 sg_block_dst;
3403 +};
3404 +
3405 +struct fsl_qdma_engine {
3406 + struct dma_device dma_dev;
3407 + void __iomem *ctrl_base;
3408 + void __iomem *status_base;
3409 + void __iomem *block_base;
3410 + u32 n_chans;
3411 + u32 n_queues;
3412 + struct mutex fsl_qdma_mutex;
3413 + int error_irq;
3414 + int queue_irq;
3415 + bool big_endian;
3416 + struct fsl_qdma_queue *queue;
3417 + struct fsl_qdma_queue *status;
3418 + struct fsl_qdma_chan chans[];
3419 +
3420 +};
3421 +
3422 +static u32 qdma_readl(struct fsl_qdma_engine *qdma, void __iomem *addr)
3423 +{
3424 + if (qdma->big_endian)
3425 + return ioread32be(addr);
3426 + else
3427 + return ioread32(addr);
3428 +}
3429 +
3430 +static void qdma_writel(struct fsl_qdma_engine *qdma, u32 val,
3431 + void __iomem *addr)
3432 +{
3433 + if (qdma->big_endian)
3434 + iowrite32be(val, addr);
3435 + else
3436 + iowrite32(val, addr);
3437 +}
3438 +
3439 +static struct fsl_qdma_chan *to_fsl_qdma_chan(struct dma_chan *chan)
3440 +{
3441 + return container_of(chan, struct fsl_qdma_chan, vchan.chan);
3442 +}
3443 +
3444 +static struct fsl_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
3445 +{
3446 + return container_of(vd, struct fsl_qdma_comp, vdesc);
3447 +}
3448 +
3449 +static int fsl_qdma_alloc_chan_resources(struct dma_chan *chan)
3450 +{
3451 + /*
3452 + * In QDMA mode, We don't need to do anything.
3453 + */
3454 + return 0;
3455 +}
3456 +
3457 +static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
3458 +{
3459 + struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
3460 + unsigned long flags;
3461 + LIST_HEAD(head);
3462 +
3463 + spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
3464 + vchan_get_all_descriptors(&fsl_chan->vchan, &head);
3465 + spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
3466 +
3467 + vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
3468 +}
3469 +
3470 +static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
3471 + dma_addr_t dst, dma_addr_t src, u32 len)
3472 +{
3473 + struct fsl_qdma_ccdf *ccdf;
3474 + struct fsl_qdma_csgf *csgf_desc, *csgf_src, *csgf_dest;
3475 + struct fsl_qdma_sdf *sdf;
3476 + struct fsl_qdma_ddf *ddf;
3477 +
3478 + ccdf = (struct fsl_qdma_ccdf *)fsl_comp->virt_addr;
3479 + csgf_desc = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 1;
3480 + csgf_src = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 2;
3481 + csgf_dest = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 3;
3482 + sdf = (struct fsl_qdma_sdf *)fsl_comp->virt_addr + 4;
3483 + ddf = (struct fsl_qdma_ddf *)fsl_comp->virt_addr + 5;
3484 +
3485 + memset(fsl_comp->virt_addr, 0, FSL_QDMA_BASE_BUFFER_SIZE);
3486 + /* Head Command Descriptor(Frame Descriptor) */
3487 + ccdf->addr = fsl_comp->bus_addr + 16;
3488 + ccdf->format = 1; /* Compound S/G format */
3489 + /* Status notification is enqueued to status queue. */
3490 + ccdf->ser = 1;
3491 + /* Compound Command Descriptor(Frame List Table) */
3492 + csgf_desc->addr = fsl_comp->bus_addr + 64;
3493 + /* It must be 32 as Compound S/G Descriptor */
3494 + csgf_desc->length = 32;
3495 + csgf_src->addr = src;
3496 + csgf_src->length = len;
3497 + csgf_dest->addr = dst;
3498 + csgf_dest->length = len;
3499 + /* This entry is the last entry. */
3500 + csgf_dest->f = FSL_QDMA_F_LAST_ENTRY;
3501 + /* Descriptor Buffer */
3502 + sdf->cmd = FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET;
3503 + ddf->cmd = FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET;
3504 + ddf->cmd |= FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET;
3505 +}
3506 +
3507 +static void fsl_qdma_comp_fill_sg(
3508 + struct fsl_qdma_comp *fsl_comp,
3509 + struct scatterlist *dst_sg, unsigned int dst_nents,
3510 + struct scatterlist *src_sg, unsigned int src_nents)
3511 +{
3512 + struct fsl_qdma_ccdf *ccdf;
3513 + struct fsl_qdma_csgf *csgf_desc, *csgf_src, *csgf_dest, *csgf_sg;
3514 + struct fsl_qdma_sdf *sdf;
3515 + struct fsl_qdma_ddf *ddf;
3516 + struct fsl_qdma_sg *sg_block, *temp;
3517 + struct scatterlist *sg;
3518 + u64 total_src_len = 0;
3519 + u64 total_dst_len = 0;
3520 + u32 i;
3521 +
3522 + ccdf = (struct fsl_qdma_ccdf *)fsl_comp->virt_addr;
3523 + csgf_desc = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 1;
3524 + csgf_src = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 2;
3525 + csgf_dest = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 3;
3526 + sdf = (struct fsl_qdma_sdf *)fsl_comp->virt_addr + 4;
3527 + ddf = (struct fsl_qdma_ddf *)fsl_comp->virt_addr + 5;
3528 +
3529 + memset(fsl_comp->virt_addr, 0, FSL_QDMA_BASE_BUFFER_SIZE);
3530 + /* Head Command Descriptor(Frame Descriptor) */
3531 + ccdf->addr = fsl_comp->bus_addr + 16;
3532 + ccdf->format = 1; /* Compound S/G format */
3533 + /* Status notification is enqueued to status queue. */
3534 + ccdf->ser = 1;
3535 +
3536 + /* Compound Command Descriptor(Frame List Table) */
3537 + csgf_desc->addr = fsl_comp->bus_addr + 64;
3538 + /* It must be 32 as Compound S/G Descriptor */
3539 + csgf_desc->length = 32;
3540 +
3541 + sg_block = fsl_comp->sg_block;
3542 + csgf_src->addr = sg_block->bus_addr;
3543 + /* This entry link to the s/g entry. */
3544 + csgf_src->e = FSL_QDMA_E_SG_TABLE;
3545 +
3546 + temp = sg_block + fsl_comp->sg_block_src;
3547 + csgf_dest->addr = temp->bus_addr;
3548 + /* This entry is the last entry. */
3549 + csgf_dest->f = FSL_QDMA_F_LAST_ENTRY;
3550 + /* This entry link to the s/g entry. */
3551 + csgf_dest->e = FSL_QDMA_E_SG_TABLE;
3552 +
3553 + for_each_sg(src_sg, sg, src_nents, i) {
3554 + temp = sg_block + i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1);
3555 + csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr +
3556 + i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1);
3557 + csgf_sg->addr = sg_dma_address(sg);
3558 + csgf_sg->length = sg_dma_len(sg);
3559 + total_src_len += sg_dma_len(sg);
3560 +
3561 + if (i == src_nents - 1)
3562 + csgf_sg->f = FSL_QDMA_F_LAST_ENTRY;
3563 + if (i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) ==
3564 + FSL_QDMA_EXPECT_SG_ENTRY_NUM - 2) {
3565 + csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr +
3566 + FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1;
3567 + temp = sg_block +
3568 + i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;
3569 + csgf_sg->addr = temp->bus_addr;
3570 + csgf_sg->e = FSL_QDMA_E_SG_TABLE;
3571 + }
3572 + }
3573 +
3574 + sg_block += fsl_comp->sg_block_src;
3575 + for_each_sg(dst_sg, sg, dst_nents, i) {
3576 + temp = sg_block + i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1);
3577 + csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr +
3578 + i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1);
3579 + csgf_sg->addr = sg_dma_address(sg);
3580 + csgf_sg->length = sg_dma_len(sg);
3581 + total_dst_len += sg_dma_len(sg);
3582 +
3583 + if (i == dst_nents - 1)
3584 + csgf_sg->f = FSL_QDMA_F_LAST_ENTRY;
3585 + if (i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) ==
3586 + FSL_QDMA_EXPECT_SG_ENTRY_NUM - 2) {
3587 + csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr +
3588 + FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1;
3589 + temp = sg_block +
3590 + i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;
3591 + csgf_sg->addr = temp->bus_addr;
3592 + csgf_sg->e = FSL_QDMA_E_SG_TABLE;
3593 + }
3594 + }
3595 +
3596 + if (total_src_len != total_dst_len)
3597 + dev_err(&fsl_comp->qchan->vchan.chan.dev->device,
3598 + "The data length for src and dst isn't match.\n");
3599 +
3600 + csgf_src->length = total_src_len;
3601 + csgf_dest->length = total_dst_len;
3602 +
3603 + /* Descriptor Buffer */
3604 + sdf->cmd = FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET;
3605 + ddf->cmd = FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET;
3606 +}
3607 +
3608 +/*
3609 + * Prei-request full command descriptor for enqueue.
3610 + */
3611 +static int fsl_qdma_pre_request_enqueue_desc(struct fsl_qdma_queue *queue)
3612 +{
3613 + struct fsl_qdma_comp *comp_temp;
3614 + int i;
3615 +
3616 + for (i = 0; i < queue->n_cq; i++) {
3617 + comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
3618 + if (!comp_temp)
3619 + return -1;
3620 + comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool,
3621 + GFP_NOWAIT,
3622 + &comp_temp->bus_addr);
3623 + if (!comp_temp->virt_addr)
3624 + return -1;
3625 + list_add_tail(&comp_temp->list, &queue->comp_free);
3626 + }
3627 + return 0;
3628 +}
3629 +
3630 +/*
3631 + * Request a command descriptor for enqueue.
3632 + */
3633 +static struct fsl_qdma_comp *fsl_qdma_request_enqueue_desc(
3634 + struct fsl_qdma_chan *fsl_chan,
3635 + unsigned int dst_nents,
3636 + unsigned int src_nents)
3637 +{
3638 + struct fsl_qdma_comp *comp_temp;
3639 + struct fsl_qdma_sg *sg_block;
3640 + struct fsl_qdma_queue *queue = fsl_chan->queue;
3641 + unsigned long flags;
3642 + unsigned int dst_sg_entry_block, src_sg_entry_block, sg_entry_total, i;
3643 +
3644 + spin_lock_irqsave(&queue->queue_lock, flags);
3645 + if (list_empty(&queue->comp_free)) {
3646 + spin_unlock_irqrestore(&queue->queue_lock, flags);
3647 + comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
3648 + if (!comp_temp)
3649 + return NULL;
3650 + comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool,
3651 + GFP_NOWAIT,
3652 + &comp_temp->bus_addr);
3653 + if (!comp_temp->virt_addr)
3654 + return NULL;
3655 + } else {
3656 + comp_temp = list_first_entry(&queue->comp_free,
3657 + struct fsl_qdma_comp,
3658 + list);
3659 + list_del(&comp_temp->list);
3660 + spin_unlock_irqrestore(&queue->queue_lock, flags);
3661 + }
3662 +
3663 + if (dst_nents != 0)
3664 + dst_sg_entry_block = dst_nents /
3665 + (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;
3666 + else
3667 + dst_sg_entry_block = 0;
3668 +
3669 + if (src_nents != 0)
3670 + src_sg_entry_block = src_nents /
3671 + (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;
3672 + else
3673 + src_sg_entry_block = 0;
3674 +
3675 + sg_entry_total = dst_sg_entry_block + src_sg_entry_block;
3676 + if (sg_entry_total) {
3677 + sg_block = kzalloc(sizeof(*sg_block) *
3678 + sg_entry_total,
3679 + GFP_KERNEL);
3680 + if (!sg_block)
3681 + return NULL;
3682 + comp_temp->sg_block = sg_block;
3683 + for (i = 0; i < sg_entry_total; i++) {
3684 + sg_block->virt_addr = dma_pool_alloc(queue->sg_pool,
3685 + GFP_NOWAIT,
3686 + &sg_block->bus_addr);
3687 + memset(sg_block->virt_addr, 0,
3688 + FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16);
3689 + sg_block++;
3690 + }
3691 + }
3692 +
3693 + comp_temp->sg_block_src = src_sg_entry_block;
3694 + comp_temp->sg_block_dst = dst_sg_entry_block;
3695 + comp_temp->qchan = fsl_chan;
3696 +
3697 + return comp_temp;
3698 +}
3699 +
3700 +static struct fsl_qdma_queue *fsl_qdma_alloc_queue_resources(
3701 + struct platform_device *pdev,
3702 + unsigned int queue_num)
3703 +{
3704 + struct device_node *np = pdev->dev.of_node;
3705 + struct fsl_qdma_queue *queue_head, *queue_temp;
3706 + int ret, len, i;
3707 + unsigned int queue_size[FSL_QDMA_QUEUE_MAX];
3708 +
3709 + if (queue_num > FSL_QDMA_QUEUE_MAX)
3710 + queue_num = FSL_QDMA_QUEUE_MAX;
3711 + len = sizeof(*queue_head) * queue_num;
3712 + queue_head = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
3713 + if (!queue_head)
3714 + return NULL;
3715 +
3716 + ret = of_property_read_u32_array(np, "queue-sizes", queue_size,
3717 + queue_num);
3718 + if (ret) {
3719 + dev_err(&pdev->dev, "Can't get queue-sizes.\n");
3720 + return NULL;
3721 + }
3722 +
3723 + for (i = 0; i < queue_num; i++) {
3724 + if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX
3725 + || queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
3726 + dev_err(&pdev->dev, "Get wrong queue-sizes.\n");
3727 + return NULL;
3728 + }
3729 + queue_temp = queue_head + i;
3730 + queue_temp->cq = dma_alloc_coherent(&pdev->dev,
3731 + sizeof(struct fsl_qdma_ccdf) *
3732 + queue_size[i],
3733 + &queue_temp->bus_addr,
3734 + GFP_KERNEL);
3735 + if (!queue_temp->cq)
3736 + return NULL;
3737 + queue_temp->n_cq = queue_size[i];
3738 + queue_temp->id = i;
3739 + queue_temp->virt_head = queue_temp->cq;
3740 + queue_temp->virt_tail = queue_temp->cq;
3741 + /*
3742 + * The dma pool for queue command buffer
3743 + */
3744 + queue_temp->comp_pool = dma_pool_create("comp_pool",
3745 + &pdev->dev,
3746 + FSL_QDMA_BASE_BUFFER_SIZE,
3747 + 16, 0);
3748 + if (!queue_temp->comp_pool) {
3749 + dma_free_coherent(&pdev->dev,
3750 + sizeof(struct fsl_qdma_ccdf) *
3751 + queue_size[i],
3752 + queue_temp->cq,
3753 + queue_temp->bus_addr);
3754 + return NULL;
3755 + }
3756 + /*
3757 + * The dma pool for queue command buffer
3758 + */
3759 + queue_temp->sg_pool = dma_pool_create("sg_pool",
3760 + &pdev->dev,
3761 + FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16,
3762 + 64, 0);
3763 + if (!queue_temp->sg_pool) {
3764 + dma_free_coherent(&pdev->dev,
3765 + sizeof(struct fsl_qdma_ccdf) *
3766 + queue_size[i],
3767 + queue_temp->cq,
3768 + queue_temp->bus_addr);
3769 + dma_pool_destroy(queue_temp->comp_pool);
3770 + return NULL;
3771 + }
3772 + /*
3773 + * List for queue command buffer
3774 + */
3775 + INIT_LIST_HEAD(&queue_temp->comp_used);
3776 + INIT_LIST_HEAD(&queue_temp->comp_free);
3777 + spin_lock_init(&queue_temp->queue_lock);
3778 + }
3779 +
3780 + return queue_head;
3781 +}
3782 +
3783 +static struct fsl_qdma_queue *fsl_qdma_prep_status_queue(
3784 + struct platform_device *pdev)
3785 +{
3786 + struct device_node *np = pdev->dev.of_node;
3787 + struct fsl_qdma_queue *status_head;
3788 + unsigned int status_size;
3789 + int ret;
3790 +
3791 + ret = of_property_read_u32(np, "status-sizes", &status_size);
3792 + if (ret) {
3793 + dev_err(&pdev->dev, "Can't get status-sizes.\n");
3794 + return NULL;
3795 + }
3796 + if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX
3797 + || status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
3798 + dev_err(&pdev->dev, "Get wrong status_size.\n");
3799 + return NULL;
3800 + }
3801 + status_head = devm_kzalloc(&pdev->dev, sizeof(*status_head),
3802 + GFP_KERNEL);
3803 + if (!status_head)
3804 + return NULL;
3805 +
3806 + /*
3807 + * Buffer for queue command
3808 + */
3809 + status_head->cq = dma_alloc_coherent(&pdev->dev,
3810 + sizeof(struct fsl_qdma_ccdf) *
3811 + status_size,
3812 + &status_head->bus_addr,
3813 + GFP_KERNEL);
3814 + if (!status_head->cq)
3815 + return NULL;
3816 + status_head->n_cq = status_size;
3817 + status_head->virt_head = status_head->cq;
3818 + status_head->virt_tail = status_head->cq;
3819 + status_head->comp_pool = NULL;
3820 +
3821 + return status_head;
3822 +}
3823 +
3824 +static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
3825 +{
3826 + void __iomem *ctrl = fsl_qdma->ctrl_base;
3827 + void __iomem *block = fsl_qdma->block_base;
3828 + int i, count = 5;
3829 + u32 reg;
3830 +
3831 + /* Disable the command queue and wait for idle state. */
3832 + reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
3833 + reg |= FSL_QDMA_DMR_DQD;
3834 + qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
3835 + for (i = 0; i < FSL_QDMA_QUEUE_NUM_MAX; i++)
3836 + qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQMR(i));
3837 +
3838 + while (1) {
3839 + reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DSR);
3840 + if (!(reg & FSL_QDMA_DSR_DB))
3841 + break;
3842 + if (count-- < 0)
3843 + return -EBUSY;
3844 + udelay(100);
3845 + }
3846 +
3847 + /* Disable status queue. */
3848 + qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BSQMR);
3849 +
3850 + /*
3851 + * Clear the command queue interrupt detect register for all queues.
3852 + */
3853 + qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
3854 +
3855 + return 0;
3856 +}
3857 +
3858 +static int fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma)
3859 +{
3860 + struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
3861 + struct fsl_qdma_queue *fsl_status = fsl_qdma->status;
3862 + struct fsl_qdma_queue *temp_queue;
3863 + struct fsl_qdma_comp *fsl_comp;
3864 + struct fsl_qdma_ccdf *status_addr;
3865 + struct fsl_qdma_csgf *csgf_src;
3866 + void __iomem *block = fsl_qdma->block_base;
3867 + u32 reg, i;
3868 + bool duplicate, duplicate_handle;
3869 +
3870 + while (1) {
3871 + duplicate = 0;
3872 + duplicate_handle = 0;
3873 + reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQSR);
3874 + if (reg & FSL_QDMA_BSQSR_QE)
3875 + return 0;
3876 + status_addr = fsl_status->virt_head;
3877 + if (status_addr->queue == pre_queue &&
3878 + status_addr->addr == pre_addr)
3879 + duplicate = 1;
3880 +
3881 + i = status_addr->queue;
3882 + pre_queue = status_addr->queue;
3883 + pre_addr = status_addr->addr;
3884 + temp_queue = fsl_queue + i;
3885 + spin_lock(&temp_queue->queue_lock);
3886 + if (list_empty(&temp_queue->comp_used)) {
3887 + if (duplicate)
3888 + duplicate_handle = 1;
3889 + else {
3890 + spin_unlock(&temp_queue->queue_lock);
3891 + return -1;
3892 + }
3893 + } else {
3894 + fsl_comp = list_first_entry(&temp_queue->comp_used,
3895 + struct fsl_qdma_comp,
3896 + list);
3897 + csgf_src = (struct fsl_qdma_csgf *)fsl_comp->virt_addr
3898 + + 2;
3899 + if (fsl_comp->bus_addr + 16 !=
3900 + (dma_addr_t)status_addr->addr) {
3901 + if (duplicate)
3902 + duplicate_handle = 1;
3903 + else {
3904 + spin_unlock(&temp_queue->queue_lock);
3905 + return -1;
3906 + }
3907 + }
3908 + }
3909 +
3910 + if (duplicate_handle) {
3911 + reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
3912 + reg |= FSL_QDMA_BSQMR_DI;
3913 + status_addr->addr = 0x0;
3914 + fsl_status->virt_head++;
3915 + if (fsl_status->virt_head == fsl_status->cq
3916 + + fsl_status->n_cq)
3917 + fsl_status->virt_head = fsl_status->cq;
3918 + qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
3919 + spin_unlock(&temp_queue->queue_lock);
3920 + continue;
3921 + }
3922 + list_del(&fsl_comp->list);
3923 +
3924 + reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
3925 + reg |= FSL_QDMA_BSQMR_DI;
3926 + status_addr->addr = 0x0;
3927 + fsl_status->virt_head++;
3928 + if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq)
3929 + fsl_status->virt_head = fsl_status->cq;
3930 + qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
3931 + spin_unlock(&temp_queue->queue_lock);
3932 +
3933 + spin_lock(&fsl_comp->qchan->vchan.lock);
3934 + vchan_cookie_complete(&fsl_comp->vdesc);
3935 + fsl_comp->qchan->status = DMA_COMPLETE;
3936 + spin_unlock(&fsl_comp->qchan->vchan.lock);
3937 + }
3938 + return 0;
3939 +}
3940 +
3941 +static irqreturn_t fsl_qdma_error_handler(int irq, void *dev_id)
3942 +{
3943 + struct fsl_qdma_engine *fsl_qdma = dev_id;
3944 + unsigned int intr;
3945 + void __iomem *status = fsl_qdma->status_base;
3946 +
3947 + intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR);
3948 +
3949 + if (intr)
3950 + dev_err(fsl_qdma->dma_dev.dev, "DMA transaction error!\n");
3951 +
3952 + qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEDR);
3953 + return IRQ_HANDLED;
3954 +}
3955 +
3956 +static irqreturn_t fsl_qdma_queue_handler(int irq, void *dev_id)
3957 +{
3958 + struct fsl_qdma_engine *fsl_qdma = dev_id;
3959 + unsigned int intr, reg;
3960 + void __iomem *block = fsl_qdma->block_base;
3961 + void __iomem *ctrl = fsl_qdma->ctrl_base;
3962 +
3963 + intr = qdma_readl(fsl_qdma, block + FSL_QDMA_BCQIDR(0));
3964 +
3965 + if ((intr & FSL_QDMA_CQIDR_SQT) != 0)
3966 + intr = fsl_qdma_queue_transfer_complete(fsl_qdma);
3967 +
3968 + if (intr != 0) {
3969 + reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
3970 + reg |= FSL_QDMA_DMR_DQD;
3971 + qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
3972 + qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQIER(0));
3973 + dev_err(fsl_qdma->dma_dev.dev, "QDMA: status err!\n");
3974 + }
3975 +
3976 + qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
3977 +
3978 + return IRQ_HANDLED;
3979 +}
3980 +
3981 +static int
3982 +fsl_qdma_irq_init(struct platform_device *pdev,
3983 + struct fsl_qdma_engine *fsl_qdma)
3984 +{
3985 + int ret;
3986 +
3987 + fsl_qdma->error_irq = platform_get_irq_byname(pdev,
3988 + "qdma-error");
3989 + if (fsl_qdma->error_irq < 0) {
3990 + dev_err(&pdev->dev, "Can't get qdma controller irq.\n");
3991 + return fsl_qdma->error_irq;
3992 + }
3993 +
3994 + fsl_qdma->queue_irq = platform_get_irq_byname(pdev, "qdma-queue");
3995 + if (fsl_qdma->queue_irq < 0) {
3996 + dev_err(&pdev->dev, "Can't get qdma queue irq.\n");
3997 + return fsl_qdma->queue_irq;
3998 + }
3999 +
4000 + ret = devm_request_irq(&pdev->dev, fsl_qdma->error_irq,
4001 + fsl_qdma_error_handler, 0, "qDMA error", fsl_qdma);
4002 + if (ret) {
4003 + dev_err(&pdev->dev, "Can't register qDMA controller IRQ.\n");
4004 + return ret;
4005 + }
4006 + ret = devm_request_irq(&pdev->dev, fsl_qdma->queue_irq,
4007 + fsl_qdma_queue_handler, 0, "qDMA queue", fsl_qdma);
4008 + if (ret) {
4009 + dev_err(&pdev->dev, "Can't register qDMA queue IRQ.\n");
4010 + return ret;
4011 + }
4012 +
4013 + return 0;
4014 +}
4015 +
4016 +static int fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
4017 +{
4018 + struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
4019 + struct fsl_qdma_queue *temp;
4020 + void __iomem *ctrl = fsl_qdma->ctrl_base;
4021 + void __iomem *status = fsl_qdma->status_base;
4022 + void __iomem *block = fsl_qdma->block_base;
4023 + int i, ret;
4024 + u32 reg;
4025 +
4026 + /* Try to halt the qDMA engine first. */
4027 + ret = fsl_qdma_halt(fsl_qdma);
4028 + if (ret) {
4029 + dev_err(fsl_qdma->dma_dev.dev, "DMA halt failed!");
4030 + return ret;
4031 + }
4032 +
4033 + /*
4034 + * Clear the command queue interrupt detect register for all queues.
4035 + */
4036 + qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
4037 +
4038 + for (i = 0; i < fsl_qdma->n_queues; i++) {
4039 + temp = fsl_queue + i;
4040 + /*
4041 + * Initialize Command Queue registers to point to the first
4042 + * command descriptor in memory.
4043 + * Dequeue Pointer Address Registers
4044 + * Enqueue Pointer Address Registers
4045 + */
4046 + qdma_writel(fsl_qdma, temp->bus_addr,
4047 + block + FSL_QDMA_BCQDPA_SADDR(i));
4048 + qdma_writel(fsl_qdma, temp->bus_addr,
4049 + block + FSL_QDMA_BCQEPA_SADDR(i));
4050 +
4051 + /* Initialize the queue mode. */
4052 + reg = FSL_QDMA_BCQMR_EN;
4053 + reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq)-4);
4054 + reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq)-6);
4055 + qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BCQMR(i));
4056 + }
4057 +
4058 + /*
4059 + * Workaround for erratum: ERR010812.
4060 + * We must enable XOFF to avoid the enqueue rejection occurs.
4061 + * Setting SQCCMR ENTER_WM to 0x20.
4062 + */
4063 + qdma_writel(fsl_qdma, FSL_QDMA_SQCCMR_ENTER_WM,
4064 + block + FSL_QDMA_SQCCMR);
4065 + /*
4066 + * Initialize status queue registers to point to the first
4067 + * command descriptor in memory.
4068 + * Dequeue Pointer Address Registers
4069 + * Enqueue Pointer Address Registers
4070 + */
4071 + qdma_writel(fsl_qdma, fsl_qdma->status->bus_addr,
4072 + block + FSL_QDMA_SQEPAR);
4073 + qdma_writel(fsl_qdma, fsl_qdma->status->bus_addr,
4074 + block + FSL_QDMA_SQDPAR);
4075 + /* Initialize status queue interrupt. */
4076 + qdma_writel(fsl_qdma, FSL_QDMA_BCQIER_CQTIE,
4077 + block + FSL_QDMA_BCQIER(0));
4078 + qdma_writel(fsl_qdma, FSL_QDMA_BSQICR_ICEN | FSL_QDMA_BSQICR_ICST(5)
4079 + | 0x8000,
4080 + block + FSL_QDMA_BSQICR);
4081 + qdma_writel(fsl_qdma, FSL_QDMA_CQIER_MEIE | FSL_QDMA_CQIER_TEIE,
4082 + block + FSL_QDMA_CQIER);
4083 + /* Initialize controller interrupt register. */
4084 + qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEDR);
4085 + qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEIER);
4086 +
4087 + /* Initialize the status queue mode. */
4088 + reg = FSL_QDMA_BSQMR_EN;
4089 + reg |= FSL_QDMA_BSQMR_CQ_SIZE(ilog2(fsl_qdma->status->n_cq)-6);
4090 + qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
4091 +
4092 + reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
4093 + reg &= ~FSL_QDMA_DMR_DQD;
4094 + qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
4095 +
4096 + return 0;
4097 +}
4098 +
4099 +static struct dma_async_tx_descriptor *fsl_qdma_prep_dma_sg(
4100 + struct dma_chan *chan,
4101 + struct scatterlist *dst_sg, unsigned int dst_nents,
4102 + struct scatterlist *src_sg, unsigned int src_nents,
4103 + unsigned long flags)
4104 +{
4105 + struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
4106 + struct fsl_qdma_comp *fsl_comp;
4107 +
4108 + fsl_comp = fsl_qdma_request_enqueue_desc(fsl_chan,
4109 + dst_nents,
4110 + src_nents);
4111 + fsl_qdma_comp_fill_sg(fsl_comp, dst_sg, dst_nents, src_sg, src_nents);
4112 +
4113 + return vchan_tx_prep(&fsl_chan->vchan, &fsl_comp->vdesc, flags);
4114 +}
4115 +
4116 +static struct dma_async_tx_descriptor *
4117 +fsl_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
4118 + dma_addr_t src, size_t len, unsigned long flags)
4119 +{
4120 + struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
4121 + struct fsl_qdma_comp *fsl_comp;
4122 +
4123 + fsl_comp = fsl_qdma_request_enqueue_desc(fsl_chan, 0, 0);
4124 + fsl_qdma_comp_fill_memcpy(fsl_comp, dst, src, len);
4125 +
4126 + return vchan_tx_prep(&fsl_chan->vchan, &fsl_comp->vdesc, flags);
4127 +}
4128 +
4129 +static void fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
4130 +{
4131 + void __iomem *block = fsl_chan->qdma->block_base;
4132 + struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
4133 + struct fsl_qdma_comp *fsl_comp;
4134 + struct virt_dma_desc *vdesc;
4135 + u32 reg;
4136 +
4137 + reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQSR(fsl_queue->id));
4138 + if (reg & (FSL_QDMA_BCQSR_QF | FSL_QDMA_BCQSR_XOFF))
4139 + return;
4140 + vdesc = vchan_next_desc(&fsl_chan->vchan);
4141 + if (!vdesc)
4142 + return;
4143 + list_del(&vdesc->node);
4144 + fsl_comp = to_fsl_qdma_comp(vdesc);
4145 +
4146 + memcpy(fsl_queue->virt_head++, fsl_comp->virt_addr, 16);
4147 + if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq)
4148 + fsl_queue->virt_head = fsl_queue->cq;
4149 +
4150 + list_add_tail(&fsl_comp->list, &fsl_queue->comp_used);
4151 + barrier();
4152 + reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQMR(fsl_queue->id));
4153 + reg |= FSL_QDMA_BCQMR_EI;
4154 + qdma_writel(fsl_chan->qdma, reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
4155 + fsl_chan->status = DMA_IN_PROGRESS;
4156 +}
4157 +
4158 +static enum dma_status fsl_qdma_tx_status(struct dma_chan *chan,
4159 + dma_cookie_t cookie, struct dma_tx_state *txstate)
4160 +{
4161 + return dma_cookie_status(chan, cookie, txstate);
4162 +}
4163 +
4164 +static void fsl_qdma_free_desc(struct virt_dma_desc *vdesc)
4165 +{
4166 + struct fsl_qdma_comp *fsl_comp;
4167 + struct fsl_qdma_queue *fsl_queue;
4168 + struct fsl_qdma_sg *sg_block;
4169 + unsigned long flags;
4170 + unsigned int i;
4171 +
4172 + fsl_comp = to_fsl_qdma_comp(vdesc);
4173 + fsl_queue = fsl_comp->qchan->queue;
4174 +
4175 + if (fsl_comp->sg_block) {
4176 + for (i = 0; i < fsl_comp->sg_block_src +
4177 + fsl_comp->sg_block_dst; i++) {
4178 + sg_block = fsl_comp->sg_block + i;
4179 + dma_pool_free(fsl_queue->sg_pool,
4180 + sg_block->virt_addr,
4181 + sg_block->bus_addr);
4182 + }
4183 + kfree(fsl_comp->sg_block);
4184 + }
4185 +
4186 + spin_lock_irqsave(&fsl_queue->queue_lock, flags);
4187 + list_add_tail(&fsl_comp->list, &fsl_queue->comp_free);
4188 + spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
4189 +}
4190 +
4191 +static void fsl_qdma_issue_pending(struct dma_chan *chan)
4192 +{
4193 + struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
4194 + struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
4195 + unsigned long flags;
4196 +
4197 + spin_lock_irqsave(&fsl_queue->queue_lock, flags);
4198 + spin_lock(&fsl_chan->vchan.lock);
4199 + if (vchan_issue_pending(&fsl_chan->vchan))
4200 + fsl_qdma_enqueue_desc(fsl_chan);
4201 + spin_unlock(&fsl_chan->vchan.lock);
4202 + spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
4203 +}
4204 +
4205 +static int fsl_qdma_probe(struct platform_device *pdev)
4206 +{
4207 + struct device_node *np = pdev->dev.of_node;
4208 + struct fsl_qdma_engine *fsl_qdma;
4209 + struct fsl_qdma_chan *fsl_chan;
4210 + struct resource *res;
4211 + unsigned int len, chans, queues;
4212 + int ret, i;
4213 +
4214 + ret = of_property_read_u32(np, "channels", &chans);
4215 + if (ret) {
4216 + dev_err(&pdev->dev, "Can't get channels.\n");
4217 + return ret;
4218 + }
4219 +
4220 + len = sizeof(*fsl_qdma) + sizeof(*fsl_chan) * chans;
4221 + fsl_qdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
4222 + if (!fsl_qdma)
4223 + return -ENOMEM;
4224 +
4225 + ret = of_property_read_u32(np, "queues", &queues);
4226 + if (ret) {
4227 + dev_err(&pdev->dev, "Can't get queues.\n");
4228 + return ret;
4229 + }
4230 +
4231 + fsl_qdma->queue = fsl_qdma_alloc_queue_resources(pdev, queues);
4232 + if (!fsl_qdma->queue)
4233 + return -ENOMEM;
4234 +
4235 + fsl_qdma->status = fsl_qdma_prep_status_queue(pdev);
4236 + if (!fsl_qdma->status)
4237 + return -ENOMEM;
4238 +
4239 + fsl_qdma->n_chans = chans;
4240 + fsl_qdma->n_queues = queues;
4241 + mutex_init(&fsl_qdma->fsl_qdma_mutex);
4242 +
4243 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4244 + fsl_qdma->ctrl_base = devm_ioremap_resource(&pdev->dev, res);
4245 + if (IS_ERR(fsl_qdma->ctrl_base))
4246 + return PTR_ERR(fsl_qdma->ctrl_base);
4247 +
4248 + res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
4249 + fsl_qdma->status_base = devm_ioremap_resource(&pdev->dev, res);
4250 + if (IS_ERR(fsl_qdma->status_base))
4251 + return PTR_ERR(fsl_qdma->status_base);
4252 +
4253 + res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
4254 + fsl_qdma->block_base = devm_ioremap_resource(&pdev->dev, res);
4255 + if (IS_ERR(fsl_qdma->block_base))
4256 + return PTR_ERR(fsl_qdma->block_base);
4257 +
4258 + ret = fsl_qdma_irq_init(pdev, fsl_qdma);
4259 + if (ret)
4260 + return ret;
4261 +
4262 + fsl_qdma->big_endian = of_property_read_bool(np, "big-endian");
4263 + INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels);
4264 + for (i = 0; i < fsl_qdma->n_chans; i++) {
4265 + struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
4266 +
4267 + fsl_chan->qdma = fsl_qdma;
4268 + fsl_chan->queue = fsl_qdma->queue + i % fsl_qdma->n_queues;
4269 + fsl_chan->vchan.desc_free = fsl_qdma_free_desc;
4270 + INIT_LIST_HEAD(&fsl_chan->qcomp);
4271 + vchan_init(&fsl_chan->vchan, &fsl_qdma->dma_dev);
4272 + }
4273 + for (i = 0; i < fsl_qdma->n_queues; i++)
4274 + fsl_qdma_pre_request_enqueue_desc(fsl_qdma->queue + i);
4275 +
4276 + dma_cap_set(DMA_MEMCPY, fsl_qdma->dma_dev.cap_mask);
4277 + dma_cap_set(DMA_SG, fsl_qdma->dma_dev.cap_mask);
4278 +
4279 + fsl_qdma->dma_dev.dev = &pdev->dev;
4280 + fsl_qdma->dma_dev.device_alloc_chan_resources
4281 + = fsl_qdma_alloc_chan_resources;
4282 + fsl_qdma->dma_dev.device_free_chan_resources
4283 + = fsl_qdma_free_chan_resources;
4284 + fsl_qdma->dma_dev.device_tx_status = fsl_qdma_tx_status;
4285 + fsl_qdma->dma_dev.device_prep_dma_memcpy = fsl_qdma_prep_memcpy;
4286 + fsl_qdma->dma_dev.device_prep_dma_sg = fsl_qdma_prep_dma_sg;
4287 + fsl_qdma->dma_dev.device_issue_pending = fsl_qdma_issue_pending;
4288 +
4289 + dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
4290 +
4291 + platform_set_drvdata(pdev, fsl_qdma);
4292 +
4293 + ret = dma_async_device_register(&fsl_qdma->dma_dev);
4294 + if (ret) {
4295 + dev_err(&pdev->dev, "Can't register Freescale qDMA engine.\n");
4296 + return ret;
4297 + }
4298 +
4299 + ret = fsl_qdma_reg_init(fsl_qdma);
4300 + if (ret) {
4301 + dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
4302 + return ret;
4303 + }
4304 +
4305 +
4306 + return 0;
4307 +}
4308 +
4309 +static int fsl_qdma_remove(struct platform_device *pdev)
4310 +{
4311 + struct device_node *np = pdev->dev.of_node;
4312 + struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev);
4313 + struct fsl_qdma_queue *queue_temp;
4314 + struct fsl_qdma_queue *status = fsl_qdma->status;
4315 + struct fsl_qdma_comp *comp_temp, *_comp_temp;
4316 + int i;
4317 +
4318 + of_dma_controller_free(np);
4319 + dma_async_device_unregister(&fsl_qdma->dma_dev);
4320 +
4321 + /* Free descriptor areas */
4322 + for (i = 0; i < fsl_qdma->n_queues; i++) {
4323 + queue_temp = fsl_qdma->queue + i;
4324 + list_for_each_entry_safe(comp_temp, _comp_temp,
4325 + &queue_temp->comp_used, list) {
4326 + dma_pool_free(queue_temp->comp_pool,
4327 + comp_temp->virt_addr,
4328 + comp_temp->bus_addr);
4329 + list_del(&comp_temp->list);
4330 + kfree(comp_temp);
4331 + }
4332 + list_for_each_entry_safe(comp_temp, _comp_temp,
4333 + &queue_temp->comp_free, list) {
4334 + dma_pool_free(queue_temp->comp_pool,
4335 + comp_temp->virt_addr,
4336 + comp_temp->bus_addr);
4337 + list_del(&comp_temp->list);
4338 + kfree(comp_temp);
4339 + }
4340 + dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_ccdf) *
4341 + queue_temp->n_cq, queue_temp->cq,
4342 + queue_temp->bus_addr);
4343 + dma_pool_destroy(queue_temp->comp_pool);
4344 + }
4345 +
4346 + dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_ccdf) *
4347 + status->n_cq, status->cq, status->bus_addr);
4348 + return 0;
4349 +}
4350 +
4351 +static const struct of_device_id fsl_qdma_dt_ids[] = {
4352 + { .compatible = "fsl,ls1021a-qdma", },
4353 + { /* sentinel */ }
4354 +};
4355 +MODULE_DEVICE_TABLE(of, fsl_qdma_dt_ids);
4356 +
4357 +static struct platform_driver fsl_qdma_driver = {
4358 + .driver = {
4359 + .name = "fsl-qdma",
4360 + .owner = THIS_MODULE,
4361 + .of_match_table = fsl_qdma_dt_ids,
4362 + },
4363 + .probe = fsl_qdma_probe,
4364 + .remove = fsl_qdma_remove,
4365 +};
4366 +
4367 +static int __init fsl_qdma_init(void)
4368 +{
4369 + return platform_driver_register(&fsl_qdma_driver);
4370 +}
4371 +subsys_initcall(fsl_qdma_init);
4372 +
4373 +static void __exit fsl_qdma_exit(void)
4374 +{
4375 + platform_driver_unregister(&fsl_qdma_driver);
4376 +}
4377 +module_exit(fsl_qdma_exit);
4378 +
4379 +MODULE_ALIAS("platform:fsl-qdma");
4380 +MODULE_DESCRIPTION("Freescale qDMA engine driver");
4381 +MODULE_LICENSE("GPL v2");
4382 --
4383 2.14.1
4384