kernel: bump 4.9 to 4.9.146
[openwrt/openwrt.git] / target / linux / layerscape / patches-4.9 / 805-dma-support-layerscape.patch
1 From d3d537ebe9884e7d945ab74bb02312d0c2c9b08d Mon Sep 17 00:00:00 2001
2 From: Yangbo Lu <yangbo.lu@nxp.com>
3 Date: Thu, 5 Jul 2018 17:32:53 +0800
4 Subject: [PATCH 17/32] dma: support layerscape
5
6 This is an integrated patch for layerscape dma support.
7
8 Signed-off-by: jiaheng.fan <jiaheng.fan@nxp.com>
9 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
10 ---
11 drivers/dma/Kconfig | 31 +
12 drivers/dma/Makefile | 3 +
13 drivers/dma/caam_dma.c | 563 ++++++++++
14 drivers/dma/dpaa2-qdma/Kconfig | 8 +
15 drivers/dma/dpaa2-qdma/Makefile | 8 +
16 drivers/dma/dpaa2-qdma/dpaa2-qdma.c | 940 +++++++++++++++++
17 drivers/dma/dpaa2-qdma/dpaa2-qdma.h | 227 +++++
18 drivers/dma/dpaa2-qdma/dpdmai.c | 515 ++++++++++
19 drivers/dma/dpaa2-qdma/fsl_dpdmai.h | 521 ++++++++++
20 drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h | 222 ++++
21 drivers/dma/fsl-qdma.c | 1243 +++++++++++++++++++++++
22 11 files changed, 4281 insertions(+)
23 create mode 100644 drivers/dma/caam_dma.c
24 create mode 100644 drivers/dma/dpaa2-qdma/Kconfig
25 create mode 100644 drivers/dma/dpaa2-qdma/Makefile
26 create mode 100644 drivers/dma/dpaa2-qdma/dpaa2-qdma.c
27 create mode 100644 drivers/dma/dpaa2-qdma/dpaa2-qdma.h
28 create mode 100644 drivers/dma/dpaa2-qdma/dpdmai.c
29 create mode 100644 drivers/dma/dpaa2-qdma/fsl_dpdmai.h
30 create mode 100644 drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h
31 create mode 100644 drivers/dma/fsl-qdma.c
32
33 --- a/drivers/dma/Kconfig
34 +++ b/drivers/dma/Kconfig
35 @@ -192,6 +192,20 @@ config FSL_EDMA
36 multiplexing capability for DMA request sources(slot).
37 This module can be found on Freescale Vybrid and LS-1 SoCs.
38
39 +config FSL_QDMA
40 + tristate "Freescale qDMA engine support"
41 + select DMA_ENGINE
42 + select DMA_VIRTUAL_CHANNELS
43 + select DMA_ENGINE_RAID
44 + select ASYNC_TX_ENABLE_CHANNEL_SWITCH
45 + help
46 + Support the Freescale qDMA engine with command queue and legacy mode.
47 + Channel virtualization is supported through enqueuing of DMA jobs to,
48 + or dequeuing DMA jobs from, different work queues.
49 + This module can be found on Freescale LS SoCs.
50 +
51 +source drivers/dma/dpaa2-qdma/Kconfig
52 +
53 config FSL_RAID
54 tristate "Freescale RAID engine Support"
55 depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH
56 @@ -564,6 +578,23 @@ config ZX_DMA
57 help
58 Support the DMA engine for ZTE ZX296702 platform devices.
59
60 +config CRYPTO_DEV_FSL_CAAM_DMA
61 + tristate "CAAM DMA engine support"
62 + depends on CRYPTO_DEV_FSL_CAAM_JR
63 + default y
64 + select DMA_ENGINE
65 + select ASYNC_CORE
66 + select ASYNC_TX_ENABLE_CHANNEL_SWITCH
67 + help
68 + Selecting this will offload the DMA operations for users of
69 + the scatter gather memcopy API to the CAAM via job rings. The
70 + CAAM is a hardware module that provides hardware acceleration to
71 + cryptographic operations. It has a built-in DMA controller that can
72 + be programmed to read/write cryptographic data. This module defines
73 + a DMA driver that uses the DMA capabilities of the CAAM.
74 +
75 + To compile this as a module, choose M here: the module
76 + will be called caam_dma.
77
78 # driver files
79 source "drivers/dma/bestcomm/Kconfig"
80 --- a/drivers/dma/Makefile
81 +++ b/drivers/dma/Makefile
82 @@ -29,6 +29,8 @@ obj-$(CONFIG_DW_DMAC_CORE) += dw/
83 obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
84 obj-$(CONFIG_FSL_DMA) += fsldma.o
85 obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
86 +obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o
87 +obj-$(CONFIG_FSL_DPAA2_QDMA) += dpaa2-qdma/
88 obj-$(CONFIG_FSL_RAID) += fsl_raid.o
89 obj-$(CONFIG_HSU_DMA) += hsu/
90 obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o
91 @@ -67,6 +69,7 @@ obj-$(CONFIG_TI_DMA_CROSSBAR) += ti-dma-
92 obj-$(CONFIG_TI_EDMA) += edma.o
93 obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
94 obj-$(CONFIG_ZX_DMA) += zx296702_dma.o
95 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_DMA) += caam_dma.o
96
97 obj-y += qcom/
98 obj-y += xilinx/
99 --- /dev/null
100 +++ b/drivers/dma/caam_dma.c
101 @@ -0,0 +1,563 @@
102 +/*
103 + * caam support for SG DMA
104 + *
105 + * Copyright 2016 Freescale Semiconductor, Inc
106 + * Copyright 2017 NXP
107 + */
108 +
109 +#include <linux/module.h>
110 +#include <linux/platform_device.h>
111 +#include <linux/dma-mapping.h>
112 +#include <linux/interrupt.h>
113 +#include <linux/slab.h>
114 +#include <linux/debugfs.h>
115 +
116 +#include <linux/dmaengine.h>
117 +#include "dmaengine.h"
118 +
119 +#include "../crypto/caam/regs.h"
120 +#include "../crypto/caam/jr.h"
121 +#include "../crypto/caam/error.h"
122 +#include "../crypto/caam/intern.h"
123 +#include "../crypto/caam/desc_constr.h"
124 +#include "../crypto/caam/sg_sw_sec4.h"
125 +
126 +#define DESC_DMA_MEMCPY_LEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / \
127 + CAAM_CMD_SZ)
128 +
129 +/* This is max chunk size of a DMA transfer. If a buffer is larger than this
130 + * value it is internally broken into chunks of max CAAM_DMA_CHUNK_SIZE bytes
131 + * and for each chunk a DMA transfer request is issued.
132 + * This value is the largest number on 16 bits that is a multiple of 256 bytes
133 + * (the largest configurable CAAM DMA burst size).
134 + */
135 +#define CAAM_DMA_CHUNK_SIZE 65280
136 +
137 +struct caam_dma_sh_desc {
138 + u32 desc[DESC_DMA_MEMCPY_LEN] ____cacheline_aligned;
139 + dma_addr_t desc_dma;
140 +};
141 +
142 +/* caam dma extended descriptor */
143 +struct caam_dma_edesc {
144 + struct dma_async_tx_descriptor async_tx;
145 + struct list_head node;
146 + struct caam_dma_ctx *ctx;
147 + dma_addr_t src_dma;
148 + dma_addr_t dst_dma;
149 + unsigned int src_len;
150 + unsigned int dst_len;
151 + struct sec4_sg_entry *sec4_sg;
152 + u32 jd[] ____cacheline_aligned;
153 +};
154 +
155 +/*
156 + * caam_dma_ctx - per jr/channel context
157 + * @chan: dma channel used by async_tx API
158 + * @node: list_head used to attach to the global dma_ctx_list
159 + * @jrdev: Job Ring device
160 + * @submit_q: queue of pending (submitted, but not enqueued) jobs
161 + * @done_not_acked: jobs that have been completed by jr, but maybe not acked
162 + * @edesc_lock: protects extended descriptor
163 + */
164 +struct caam_dma_ctx {
165 + struct dma_chan chan;
166 + struct list_head node;
167 + struct device *jrdev;
168 + struct list_head submit_q;
169 + struct list_head done_not_acked;
170 + spinlock_t edesc_lock;
171 +};
172 +
173 +static struct dma_device *dma_dev;
174 +static struct caam_dma_sh_desc *dma_sh_desc;
175 +static LIST_HEAD(dma_ctx_list);
176 +
177 +static dma_cookie_t caam_dma_tx_submit(struct dma_async_tx_descriptor *tx)
178 +{
179 + struct caam_dma_edesc *edesc = NULL;
180 + struct caam_dma_ctx *ctx = NULL;
181 + dma_cookie_t cookie;
182 +
183 + edesc = container_of(tx, struct caam_dma_edesc, async_tx);
184 + ctx = container_of(tx->chan, struct caam_dma_ctx, chan);
185 +
186 + spin_lock_bh(&ctx->edesc_lock);
187 +
188 + cookie = dma_cookie_assign(tx);
189 + list_add_tail(&edesc->node, &ctx->submit_q);
190 +
191 + spin_unlock_bh(&ctx->edesc_lock);
192 +
193 + return cookie;
194 +}
195 +
196 +static unsigned int caam_dma_sg_dma_len(struct scatterlist *sg,
197 + unsigned int nents)
198 +{
199 + unsigned int len;
200 +
201 + for (len = 0; sg && nents; sg = sg_next(sg), nents--)
202 + len += sg_dma_len(sg);
203 +
204 + return len;
205 +}
206 +
207 +static struct caam_dma_edesc *
208 +caam_dma_sg_edesc_alloc(struct dma_chan *chan,
209 + struct scatterlist *dst_sg, unsigned int dst_nents,
210 + struct scatterlist *src_sg, unsigned int src_nents,
211 + unsigned long flags)
212 +{
213 + struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
214 + chan);
215 + struct device *jrdev = ctx->jrdev;
216 + struct caam_dma_edesc *edesc;
217 + struct sec4_sg_entry *sec4_sg;
218 + dma_addr_t sec4_sg_dma_src;
219 + unsigned int sec4_sg_bytes;
220 +
221 + if (!dst_sg || !src_sg || !dst_nents || !src_nents)
222 + return NULL;
223 +
224 + sec4_sg_bytes = (src_nents + dst_nents) * sizeof(*sec4_sg);
225 +
226 + edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
227 + GFP_DMA | GFP_NOWAIT);
228 + if (!edesc)
229 + return ERR_PTR(-ENOMEM);
230 +
231 + edesc->src_len = caam_dma_sg_dma_len(src_sg, src_nents);
232 + edesc->dst_len = caam_dma_sg_dma_len(dst_sg, dst_nents);
233 + if (edesc->src_len != edesc->dst_len) {
234 + dev_err(jrdev, "%s: src(%u) and dst(%u) len mismatch.\n",
235 + __func__, edesc->src_len, edesc->dst_len);
236 + kfree(edesc);
237 + return ERR_PTR(-EINVAL);
238 + }
239 +
240 + dma_async_tx_descriptor_init(&edesc->async_tx, chan);
241 + edesc->async_tx.tx_submit = caam_dma_tx_submit;
242 + edesc->async_tx.flags = flags;
243 + edesc->async_tx.cookie = -EBUSY;
244 +
245 + /* Prepare SEC SGs */
246 + edesc->sec4_sg = (void *)edesc + offsetof(struct caam_dma_edesc, jd) +
247 + DESC_JOB_IO_LEN;
248 +
249 + sec4_sg = edesc->sec4_sg;
250 + sg_to_sec4_sg_last(src_sg, src_nents, sec4_sg, 0);
251 +
252 + sec4_sg += src_nents;
253 + sg_to_sec4_sg_last(dst_sg, dst_nents, sec4_sg, 0);
254 +
255 + sec4_sg_dma_src = dma_map_single(jrdev, edesc->sec4_sg, sec4_sg_bytes,
256 + DMA_TO_DEVICE);
257 + if (dma_mapping_error(jrdev, sec4_sg_dma_src)) {
258 + dev_err(jrdev, "error mapping segments to device\n");
259 + kfree(edesc);
260 + return ERR_PTR(-ENOMEM);
261 + }
262 +
263 + edesc->src_dma = sec4_sg_dma_src;
264 + edesc->dst_dma = sec4_sg_dma_src + src_nents * sizeof(*sec4_sg);
265 + edesc->ctx = ctx;
266 +
267 + return edesc;
268 +}
269 +
270 +static void caam_jr_chan_free_edesc(struct caam_dma_edesc *edesc)
271 +{
272 + struct caam_dma_ctx *ctx = edesc->ctx;
273 + struct caam_dma_edesc *_edesc = NULL;
274 +
275 + spin_lock_bh(&ctx->edesc_lock);
276 +
277 + list_add_tail(&edesc->node, &ctx->done_not_acked);
278 + list_for_each_entry_safe(edesc, _edesc, &ctx->done_not_acked, node) {
279 + if (async_tx_test_ack(&edesc->async_tx)) {
280 + list_del(&edesc->node);
281 + kfree(edesc);
282 + }
283 + }
284 +
285 + spin_unlock_bh(&ctx->edesc_lock);
286 +}
287 +
288 +static void caam_dma_done(struct device *dev, u32 *hwdesc, u32 err,
289 + void *context)
290 +{
291 + struct caam_dma_edesc *edesc = context;
292 + struct caam_dma_ctx *ctx = edesc->ctx;
293 + dma_async_tx_callback callback;
294 + void *callback_param;
295 +
296 + if (err)
297 + caam_jr_strstatus(ctx->jrdev, err);
298 +
299 + dma_run_dependencies(&edesc->async_tx);
300 +
301 + spin_lock_bh(&ctx->edesc_lock);
302 + dma_cookie_complete(&edesc->async_tx);
303 + spin_unlock_bh(&ctx->edesc_lock);
304 +
305 + callback = edesc->async_tx.callback;
306 + callback_param = edesc->async_tx.callback_param;
307 +
308 + dma_descriptor_unmap(&edesc->async_tx);
309 +
310 + caam_jr_chan_free_edesc(edesc);
311 +
312 + if (callback)
313 + callback(callback_param);
314 +}
315 +
316 +static void caam_dma_sg_init_job_desc(struct caam_dma_edesc *edesc)
317 +{
318 + u32 *jd = edesc->jd;
319 + u32 *sh_desc = dma_sh_desc->desc;
320 + dma_addr_t desc_dma = dma_sh_desc->desc_dma;
321 +
322 + /* init the job descriptor */
323 + init_job_desc_shared(jd, desc_dma, desc_len(sh_desc), HDR_REVERSE);
324 +
325 + /* set SEQIN PTR */
326 + append_seq_in_ptr(jd, edesc->src_dma, edesc->src_len, LDST_SGF);
327 +
328 + /* set SEQOUT PTR */
329 + append_seq_out_ptr(jd, edesc->dst_dma, edesc->dst_len, LDST_SGF);
330 +
331 +#ifdef DEBUG
332 + print_hex_dump(KERN_ERR, "caam dma desc@" __stringify(__LINE__) ": ",
333 + DUMP_PREFIX_ADDRESS, 16, 4, jd, desc_bytes(jd), 1);
334 +#endif
335 +}
336 +
337 +/* This function can be called from an interrupt context */
338 +static struct dma_async_tx_descriptor *
339 +caam_dma_prep_sg(struct dma_chan *chan, struct scatterlist *dst_sg,
340 + unsigned int dst_nents, struct scatterlist *src_sg,
341 + unsigned int src_nents, unsigned long flags)
342 +{
343 + struct caam_dma_edesc *edesc;
344 +
345 + /* allocate extended descriptor */
346 + edesc = caam_dma_sg_edesc_alloc(chan, dst_sg, dst_nents, src_sg,
347 + src_nents, flags);
348 + if (IS_ERR_OR_NULL(edesc))
349 + return ERR_CAST(edesc);
350 +
351 + /* Initialize job descriptor */
352 + caam_dma_sg_init_job_desc(edesc);
353 +
354 + return &edesc->async_tx;
355 +}
356 +
357 +static void caam_dma_memcpy_init_job_desc(struct caam_dma_edesc *edesc)
358 +{
359 + u32 *jd = edesc->jd;
360 + u32 *sh_desc = dma_sh_desc->desc;
361 + dma_addr_t desc_dma = dma_sh_desc->desc_dma;
362 +
363 + /* init the job descriptor */
364 + init_job_desc_shared(jd, desc_dma, desc_len(sh_desc), HDR_REVERSE);
365 +
366 + /* set SEQIN PTR */
367 + append_seq_in_ptr(jd, edesc->src_dma, edesc->src_len, 0);
368 +
369 + /* set SEQOUT PTR */
370 + append_seq_out_ptr(jd, edesc->dst_dma, edesc->dst_len, 0);
371 +
372 +#ifdef DEBUG
373 + print_hex_dump(KERN_ERR, "caam dma desc@" __stringify(__LINE__) ": ",
374 + DUMP_PREFIX_ADDRESS, 16, 4, jd, desc_bytes(jd), 1);
375 +#endif
376 +}
377 +
378 +static struct dma_async_tx_descriptor *
379 +caam_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
380 + size_t len, unsigned long flags)
381 +{
382 + struct caam_dma_edesc *edesc;
383 + struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
384 + chan);
385 +
386 + edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN, GFP_DMA | GFP_NOWAIT);
387 + if (!edesc)
388 + return ERR_PTR(-ENOMEM);
389 +
390 + dma_async_tx_descriptor_init(&edesc->async_tx, chan);
391 + edesc->async_tx.tx_submit = caam_dma_tx_submit;
392 + edesc->async_tx.flags = flags;
393 + edesc->async_tx.cookie = -EBUSY;
394 +
395 + edesc->src_dma = src;
396 + edesc->src_len = len;
397 + edesc->dst_dma = dst;
398 + edesc->dst_len = len;
399 + edesc->ctx = ctx;
400 +
401 + caam_dma_memcpy_init_job_desc(edesc);
402 +
403 + return &edesc->async_tx;
404 +}
405 +
406 +/* This function can be called in an interrupt context */
407 +static void caam_dma_issue_pending(struct dma_chan *chan)
408 +{
409 + struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
410 + chan);
411 + struct caam_dma_edesc *edesc, *_edesc;
412 +
413 + spin_lock_bh(&ctx->edesc_lock);
414 + list_for_each_entry_safe(edesc, _edesc, &ctx->submit_q, node) {
415 + if (caam_jr_enqueue(ctx->jrdev, edesc->jd,
416 + caam_dma_done, edesc) < 0)
417 + break;
418 + list_del(&edesc->node);
419 + }
420 + spin_unlock_bh(&ctx->edesc_lock);
421 +}
422 +
423 +static void caam_dma_free_chan_resources(struct dma_chan *chan)
424 +{
425 + struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
426 + chan);
427 + struct caam_dma_edesc *edesc, *_edesc;
428 +
429 + spin_lock_bh(&ctx->edesc_lock);
430 + list_for_each_entry_safe(edesc, _edesc, &ctx->submit_q, node) {
431 + list_del(&edesc->node);
432 + kfree(edesc);
433 + }
434 + list_for_each_entry_safe(edesc, _edesc, &ctx->done_not_acked, node) {
435 + list_del(&edesc->node);
436 + kfree(edesc);
437 + }
438 + spin_unlock_bh(&ctx->edesc_lock);
439 +}
440 +
441 +static int caam_dma_jr_chan_bind(void)
442 +{
443 + struct device *jrdev;
444 + struct caam_dma_ctx *ctx;
445 + int bonds = 0;
446 + int i;
447 +
448 + for (i = 0; i < caam_jr_driver_probed(); i++) {
449 + jrdev = caam_jridx_alloc(i);
450 + if (IS_ERR(jrdev)) {
451 + pr_err("job ring device %d allocation failed\n", i);
452 + continue;
453 + }
454 +
455 + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
456 + if (!ctx) {
457 + caam_jr_free(jrdev);
458 + continue;
459 + }
460 +
461 + ctx->chan.device = dma_dev;
462 + ctx->chan.private = ctx;
463 +
464 + ctx->jrdev = jrdev;
465 +
466 + INIT_LIST_HEAD(&ctx->submit_q);
467 + INIT_LIST_HEAD(&ctx->done_not_acked);
468 + INIT_LIST_HEAD(&ctx->node);
469 + spin_lock_init(&ctx->edesc_lock);
470 +
471 + dma_cookie_init(&ctx->chan);
472 +
473 + /* add the context of this channel to the context list */
474 + list_add_tail(&ctx->node, &dma_ctx_list);
475 +
476 + /* add this channel to the device chan list */
477 + list_add_tail(&ctx->chan.device_node, &dma_dev->channels);
478 +
479 + bonds++;
480 + }
481 +
482 + return bonds;
483 +}
484 +
485 +static inline void caam_jr_dma_free(struct dma_chan *chan)
486 +{
487 + struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
488 + chan);
489 +
490 + list_del(&ctx->node);
491 + list_del(&chan->device_node);
492 + caam_jr_free(ctx->jrdev);
493 + kfree(ctx);
494 +}
495 +
496 +static void set_caam_dma_desc(u32 *desc)
497 +{
498 + u32 *jmp_cmd;
499 +
500 + /* dma shared descriptor */
501 + init_sh_desc(desc, HDR_SHARE_NEVER | (1 << HDR_START_IDX_SHIFT));
502 +
503 + /* REG1 = CAAM_DMA_CHUNK_SIZE */
504 + append_math_add_imm_u32(desc, REG1, ZERO, IMM, CAAM_DMA_CHUNK_SIZE);
505 +
506 + /* REG0 = SEQINLEN - CAAM_DMA_CHUNK_SIZE */
507 + append_math_sub_imm_u32(desc, REG0, SEQINLEN, IMM, CAAM_DMA_CHUNK_SIZE);
508 +
509 + /* if (REG0 > 0)
510 + * jmp to LABEL1
511 + */
512 + jmp_cmd = append_jump(desc, JUMP_TEST_INVALL | JUMP_COND_MATH_N |
513 + JUMP_COND_MATH_Z);
514 +
515 + /* REG1 = SEQINLEN */
516 + append_math_sub(desc, REG1, SEQINLEN, ZERO, CAAM_CMD_SZ);
517 +
518 + /* LABEL1 */
519 + set_jump_tgt_here(desc, jmp_cmd);
520 +
521 + /* VARSEQINLEN = REG1 */
522 + append_math_add(desc, VARSEQINLEN, REG1, ZERO, CAAM_CMD_SZ);
523 +
524 + /* VARSEQOUTLEN = REG1 */
525 + append_math_add(desc, VARSEQOUTLEN, REG1, ZERO, CAAM_CMD_SZ);
526 +
527 + /* do FIFO STORE */
528 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_METADATA | LDST_VLF);
529 +
530 + /* do FIFO LOAD */
531 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
532 + FIFOLD_TYPE_IFIFO | LDST_VLF);
533 +
534 + /* if (REG0 > 0)
535 + * jmp 0xF8 (after shared desc header)
536 + */
537 + append_jump(desc, JUMP_TEST_INVALL | JUMP_COND_MATH_N |
538 + JUMP_COND_MATH_Z | 0xF8);
539 +
540 +#ifdef DEBUG
541 + print_hex_dump(KERN_ERR, "caam dma shdesc@" __stringify(__LINE__) ": ",
542 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
543 +#endif
544 +}
545 +
546 +static int __init caam_dma_probe(struct platform_device *pdev)
547 +{
548 + struct device *dev = &pdev->dev;
549 + struct device *ctrldev = dev->parent;
550 + struct dma_chan *chan, *_chan;
551 + u32 *sh_desc;
552 + int err = -ENOMEM;
553 + int bonds;
554 +
555 + if (!caam_jr_driver_probed()) {
556 + dev_info(dev, "Defer probing after JR driver probing\n");
557 + return -EPROBE_DEFER;
558 + }
559 +
560 + dma_dev = kzalloc(sizeof(*dma_dev), GFP_KERNEL);
561 + if (!dma_dev)
562 + return -ENOMEM;
563 +
564 + dma_sh_desc = kzalloc(sizeof(*dma_sh_desc), GFP_KERNEL | GFP_DMA);
565 + if (!dma_sh_desc)
566 + goto desc_err;
567 +
568 + sh_desc = dma_sh_desc->desc;
569 + set_caam_dma_desc(sh_desc);
570 + dma_sh_desc->desc_dma = dma_map_single(ctrldev, sh_desc,
571 + desc_bytes(sh_desc),
572 + DMA_TO_DEVICE);
573 + if (dma_mapping_error(ctrldev, dma_sh_desc->desc_dma)) {
574 + dev_err(dev, "unable to map dma descriptor\n");
575 + goto map_err;
576 + }
577 +
578 + INIT_LIST_HEAD(&dma_dev->channels);
579 +
580 + bonds = caam_dma_jr_chan_bind();
581 + if (!bonds) {
582 + err = -ENODEV;
583 + goto jr_bind_err;
584 + }
585 +
586 + dma_dev->dev = dev;
587 + dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
588 + dma_cap_set(DMA_SG, dma_dev->cap_mask);
589 + dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
590 + dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
591 + dma_dev->device_tx_status = dma_cookie_status;
592 + dma_dev->device_issue_pending = caam_dma_issue_pending;
593 + dma_dev->device_prep_dma_sg = caam_dma_prep_sg;
594 + dma_dev->device_prep_dma_memcpy = caam_dma_prep_memcpy;
595 + dma_dev->device_free_chan_resources = caam_dma_free_chan_resources;
596 +
597 + err = dma_async_device_register(dma_dev);
598 + if (err) {
599 + dev_err(dev, "Failed to register CAAM DMA engine\n");
600 + goto jr_bind_err;
601 + }
602 +
603 + dev_info(dev, "caam dma support with %d job rings\n", bonds);
604 +
605 + return err;
606 +
607 +jr_bind_err:
608 + list_for_each_entry_safe(chan, _chan, &dma_dev->channels, device_node)
609 + caam_jr_dma_free(chan);
610 +
611 + dma_unmap_single(ctrldev, dma_sh_desc->desc_dma, desc_bytes(sh_desc),
612 + DMA_TO_DEVICE);
613 +map_err:
614 + kfree(dma_sh_desc);
615 +desc_err:
616 + kfree(dma_dev);
617 + return err;
618 +}
619 +
620 +static int caam_dma_remove(struct platform_device *pdev)
621 +{
622 + struct device *dev = &pdev->dev;
623 + struct device *ctrldev = dev->parent;
624 + struct caam_dma_ctx *ctx, *_ctx;
625 +
626 + dma_async_device_unregister(dma_dev);
627 +
628 + list_for_each_entry_safe(ctx, _ctx, &dma_ctx_list, node) {
629 + list_del(&ctx->node);
630 + caam_jr_free(ctx->jrdev);
631 + kfree(ctx);
632 + }
633 +
634 + dma_unmap_single(ctrldev, dma_sh_desc->desc_dma,
635 + desc_bytes(dma_sh_desc->desc), DMA_TO_DEVICE);
636 +
637 + kfree(dma_sh_desc);
638 + kfree(dma_dev);
639 +
640 + dev_info(dev, "caam dma support disabled\n");
641 + return 0;
642 +}
643 +
644 +static const struct of_device_id caam_dma_match[] = {
645 + { .compatible = "fsl,sec-v5.4-dma", },
646 + { .compatible = "fsl,sec-v5.0-dma", },
647 + { .compatible = "fsl,sec-v4.0-dma", },
648 + {},
649 +};
650 +MODULE_DEVICE_TABLE(of, caam_dma_match);
651 +
652 +static struct platform_driver caam_dma_driver = {
653 + .driver = {
654 + .name = "caam-dma",
655 + .of_match_table = caam_dma_match,
656 + },
657 + .probe = caam_dma_probe,
658 + .remove = caam_dma_remove,
659 +};
660 +module_platform_driver(caam_dma_driver);
661 +
662 +MODULE_LICENSE("Dual BSD/GPL");
663 +MODULE_DESCRIPTION("NXP CAAM support for SG DMA");
664 +MODULE_AUTHOR("NXP Semiconductors");
665 --- /dev/null
666 +++ b/drivers/dma/dpaa2-qdma/Kconfig
667 @@ -0,0 +1,8 @@
668 +menuconfig FSL_DPAA2_QDMA
669 + tristate "NXP DPAA2 QDMA"
670 + depends on FSL_MC_BUS && FSL_MC_DPIO
671 + select DMA_ENGINE
672 + select DMA_VIRTUAL_CHANNELS
673 + ---help---
674 + NXP Data Path Acceleration Architecture 2 QDMA driver,
675 + using the NXP MC bus driver.
676 --- /dev/null
677 +++ b/drivers/dma/dpaa2-qdma/Makefile
678 @@ -0,0 +1,8 @@
679 +#
680 +# Makefile for the NXP DPAA2 CAAM controllers
681 +#
682 +ccflags-y += -DVERSION=\"\"
683 +
684 +obj-$(CONFIG_FSL_DPAA2_QDMA) += fsl-dpaa2-qdma.o
685 +
686 +fsl-dpaa2-qdma-objs := dpaa2-qdma.o dpdmai.o
687 --- /dev/null
688 +++ b/drivers/dma/dpaa2-qdma/dpaa2-qdma.c
689 @@ -0,0 +1,940 @@
690 +/*
691 + * drivers/dma/dpaa2-qdma/dpaa2-qdma.c
692 + *
693 + * Copyright 2015-2017 NXP Semiconductor, Inc.
694 + * Author: Changming Huang <jerry.huang@nxp.com>
695 + *
696 + * Driver for the NXP QDMA engine with QMan mode.
697 + * Channel virtualization is supported through enqueuing of DMA jobs to,
698 + * or dequeuing DMA jobs from different work queues with QMan portal.
699 + * This module can be found on NXP LS2 SoCs.
700 + *
701 + * This program is free software; you can redistribute it and/or modify it
702 + * under the terms of the GNU General Public License as published by the
703 + * Free Software Foundation; either version 2 of the License, or (at your
704 + * option) any later version.
705 + */
706 +
707 +#include <linux/init.h>
708 +#include <linux/module.h>
709 +#include <linux/interrupt.h>
710 +#include <linux/clk.h>
711 +#include <linux/dma-mapping.h>
712 +#include <linux/dmapool.h>
713 +#include <linux/slab.h>
714 +#include <linux/spinlock.h>
715 +#include <linux/of.h>
716 +#include <linux/of_device.h>
717 +#include <linux/of_address.h>
718 +#include <linux/of_irq.h>
719 +#include <linux/of_dma.h>
720 +#include <linux/types.h>
721 +#include <linux/delay.h>
722 +#include <linux/iommu.h>
723 +
724 +#include "../virt-dma.h"
725 +
726 +#include <linux/fsl/mc.h>
727 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
728 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
729 +#include "fsl_dpdmai_cmd.h"
730 +#include "fsl_dpdmai.h"
731 +#include "dpaa2-qdma.h"
732 +
733 +static bool smmu_disable = true;
734 +
735 +static struct dpaa2_qdma_chan *to_dpaa2_qdma_chan(struct dma_chan *chan)
736 +{
737 + return container_of(chan, struct dpaa2_qdma_chan, vchan.chan);
738 +}
739 +
740 +static struct dpaa2_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
741 +{
742 + return container_of(vd, struct dpaa2_qdma_comp, vdesc);
743 +}
744 +
745 +static int dpaa2_qdma_alloc_chan_resources(struct dma_chan *chan)
746 +{
747 + return 0;
748 +}
749 +
750 +static void dpaa2_qdma_free_chan_resources(struct dma_chan *chan)
751 +{
752 + struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
753 + unsigned long flags;
754 + LIST_HEAD(head);
755 +
756 + spin_lock_irqsave(&dpaa2_chan->vchan.lock, flags);
757 + vchan_get_all_descriptors(&dpaa2_chan->vchan, &head);
758 + spin_unlock_irqrestore(&dpaa2_chan->vchan.lock, flags);
759 +
760 + vchan_dma_desc_free_list(&dpaa2_chan->vchan, &head);
761 +}
762 +
763 +/*
764 + * Request a command descriptor for enqueue.
765 + */
766 +static struct dpaa2_qdma_comp *
767 +dpaa2_qdma_request_desc(struct dpaa2_qdma_chan *dpaa2_chan)
768 +{
769 + struct dpaa2_qdma_comp *comp_temp = NULL;
770 + unsigned long flags;
771 +
772 + spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
773 + if (list_empty(&dpaa2_chan->comp_free)) {
774 + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
775 + comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
776 + if (!comp_temp)
777 + goto err;
778 + comp_temp->fd_virt_addr = dma_pool_alloc(dpaa2_chan->fd_pool,
779 + GFP_NOWAIT, &comp_temp->fd_bus_addr);
780 + if (!comp_temp->fd_virt_addr)
781 + goto err;
782 +
783 + comp_temp->fl_virt_addr =
784 + (void *)((struct dpaa2_fd *)
785 + comp_temp->fd_virt_addr + 1);
786 + comp_temp->fl_bus_addr = comp_temp->fd_bus_addr +
787 + sizeof(struct dpaa2_fd);
788 + comp_temp->desc_virt_addr =
789 + (void *)((struct dpaa2_fl_entry *)
790 + comp_temp->fl_virt_addr + 3);
791 + comp_temp->desc_bus_addr = comp_temp->fl_bus_addr +
792 + sizeof(struct dpaa2_fl_entry) * 3;
793 +
794 + comp_temp->qchan = dpaa2_chan;
795 + comp_temp->sg_blk_num = 0;
796 + INIT_LIST_HEAD(&comp_temp->sg_src_head);
797 + INIT_LIST_HEAD(&comp_temp->sg_dst_head);
798 + return comp_temp;
799 + }
800 + comp_temp = list_first_entry(&dpaa2_chan->comp_free,
801 + struct dpaa2_qdma_comp, list);
802 + list_del(&comp_temp->list);
803 + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
804 +
805 + comp_temp->qchan = dpaa2_chan;
806 +err:
807 + return comp_temp;
808 +}
809 +
810 +static void dpaa2_qdma_populate_fd(uint32_t format,
811 + struct dpaa2_qdma_comp *dpaa2_comp)
812 +{
813 + struct dpaa2_fd *fd;
814 +
815 + fd = (struct dpaa2_fd *)dpaa2_comp->fd_virt_addr;
816 + memset(fd, 0, sizeof(struct dpaa2_fd));
817 +
818 + /* fd populated */
819 + dpaa2_fd_set_addr(fd, dpaa2_comp->fl_bus_addr);
820 + /* Bypass memory translation, Frame list format, short length disable */
821 + /* we need to disable BMT if fsl-mc use iova addr */
822 + if (smmu_disable)
823 + dpaa2_fd_set_bpid(fd, QMAN_FD_BMT_ENABLE);
824 + dpaa2_fd_set_format(fd, QMAN_FD_FMT_ENABLE | QMAN_FD_SL_DISABLE);
825 +
826 + dpaa2_fd_set_frc(fd, format | QDMA_SER_CTX);
827 +}
828 +
829 +/* first frame list for descriptor buffer */
830 +static void dpaa2_qdma_populate_first_framel(
831 + struct dpaa2_fl_entry *f_list,
832 + struct dpaa2_qdma_comp *dpaa2_comp)
833 +{
834 + struct dpaa2_qdma_sd_d *sdd;
835 +
836 + sdd = (struct dpaa2_qdma_sd_d *)dpaa2_comp->desc_virt_addr;
837 + memset(sdd, 0, 2 * (sizeof(*sdd)));
838 + /* source and destination descriptor */
839 + sdd->cmd = cpu_to_le32(QDMA_SD_CMD_RDTTYPE_COHERENT); /* source descriptor CMD */
840 + sdd++;
841 + sdd->cmd = cpu_to_le32(QDMA_DD_CMD_WRTTYPE_COHERENT); /* dest descriptor CMD */
842 +
843 + memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
844 + /* first frame list to source descriptor */
845 +
846 + dpaa2_fl_set_addr(f_list, dpaa2_comp->desc_bus_addr);
847 + dpaa2_fl_set_len(f_list, 0x20);
848 + dpaa2_fl_set_format(f_list, QDMA_FL_FMT_SBF | QDMA_FL_SL_LONG);
849 +
850 + if (smmu_disable)
851 + f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE); /* bypass memory translation */
852 +}
853 +
854 +/* source and destination frame list */
855 +static void dpaa2_qdma_populate_frames(struct dpaa2_fl_entry *f_list,
856 + dma_addr_t dst, dma_addr_t src, size_t len, uint8_t fmt)
857 +{
858 + /* source frame list to source buffer */
859 + memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
860 +
861 +
862 + dpaa2_fl_set_addr(f_list, src);
863 + dpaa2_fl_set_len(f_list, len);
864 + dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG)); /* single buffer frame or scatter gather frame */
865 + if (smmu_disable)
866 + f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE); /* bypass memory translation */
867 +
868 + f_list++;
869 + /* destination frame list to destination buffer */
870 + memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
871 +
872 + dpaa2_fl_set_addr(f_list, dst);
873 + dpaa2_fl_set_len(f_list, len);
874 + dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG));
875 + dpaa2_fl_set_final(f_list, QDMA_FL_F); /* single buffer frame or scatter gather frame */
876 + if (smmu_disable)
877 + f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE); /* bypass memory translation */
878 +}
879 +
880 +static struct dma_async_tx_descriptor *dpaa2_qdma_prep_memcpy(
881 + struct dma_chan *chan, dma_addr_t dst,
882 + dma_addr_t src, size_t len, unsigned long flags)
883 +{
884 + struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
885 + struct dpaa2_qdma_comp *dpaa2_comp;
886 + struct dpaa2_fl_entry *f_list;
887 + uint32_t format;
888 +
889 + dpaa2_comp = dpaa2_qdma_request_desc(dpaa2_chan);
890 +
891 +#ifdef LONG_FORMAT
892 + format = QDMA_FD_LONG_FORMAT;
893 +#else
894 + format = QDMA_FD_SHORT_FORMAT;
895 +#endif
896 + /* populate Frame descriptor */
897 + dpaa2_qdma_populate_fd(format, dpaa2_comp);
898 +
899 + f_list = (struct dpaa2_fl_entry *)dpaa2_comp->fl_virt_addr;
900 +
901 +#ifdef LONG_FORMAT
902 + /* first frame list for descriptor buffer (logn format) */
903 + dpaa2_qdma_populate_first_framel(f_list, dpaa2_comp);
904 +
905 + f_list++;
906 +#endif
907 +
908 + dpaa2_qdma_populate_frames(f_list, dst, src, len, QDMA_FL_FMT_SBF);
909 +
910 + return vchan_tx_prep(&dpaa2_chan->vchan, &dpaa2_comp->vdesc, flags);
911 +}
912 +
913 +static struct qdma_sg_blk *dpaa2_qdma_get_sg_blk(
914 + struct dpaa2_qdma_comp *dpaa2_comp,
915 + struct dpaa2_qdma_chan *dpaa2_chan)
916 +{
917 + struct qdma_sg_blk *sg_blk = NULL;
918 + dma_addr_t phy_sgb;
919 + unsigned long flags;
920 +
921 + spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
922 + if (list_empty(&dpaa2_chan->sgb_free)) {
923 + sg_blk = (struct qdma_sg_blk *)dma_pool_alloc(
924 + dpaa2_chan->sg_blk_pool,
925 + GFP_NOWAIT, &phy_sgb);
926 + if (!sg_blk) {
927 + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
928 + return sg_blk;
929 + }
930 + sg_blk->blk_virt_addr = (void *)(sg_blk + 1);
931 + sg_blk->blk_bus_addr = phy_sgb + sizeof(*sg_blk);
932 + } else {
933 + sg_blk = list_first_entry(&dpaa2_chan->sgb_free,
934 + struct qdma_sg_blk, list);
935 + list_del(&sg_blk->list);
936 + }
937 + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
938 +
939 + return sg_blk;
940 +}
941 +
942 +static uint32_t dpaa2_qdma_populate_sg(struct device *dev,
943 + struct dpaa2_qdma_chan *dpaa2_chan,
944 + struct dpaa2_qdma_comp *dpaa2_comp,
945 + struct scatterlist *dst_sg, u32 dst_nents,
946 + struct scatterlist *src_sg, u32 src_nents)
947 +{
948 + struct dpaa2_qdma_sg *src_sge;
949 + struct dpaa2_qdma_sg *dst_sge;
950 + struct qdma_sg_blk *sg_blk;
951 + struct qdma_sg_blk *sg_blk_dst;
952 + dma_addr_t src;
953 + dma_addr_t dst;
954 + uint32_t num;
955 + uint32_t blocks;
956 + uint32_t len = 0;
957 + uint32_t total_len = 0;
958 + int i, j = 0;
959 +
960 + num = min(dst_nents, src_nents);
961 + blocks = num / (NUM_SG_PER_BLK - 1);
962 + if (num % (NUM_SG_PER_BLK - 1))
963 + blocks += 1;
964 + if (dpaa2_comp->sg_blk_num < blocks) {
965 + len = blocks - dpaa2_comp->sg_blk_num;
966 + for (i = 0; i < len; i++) {
967 + /* source sg blocks */
968 + sg_blk = dpaa2_qdma_get_sg_blk(dpaa2_comp, dpaa2_chan);
969 + if (!sg_blk)
970 + return 0;
971 + list_add_tail(&sg_blk->list, &dpaa2_comp->sg_src_head);
972 + /* destination sg blocks */
973 + sg_blk = dpaa2_qdma_get_sg_blk(dpaa2_comp, dpaa2_chan);
974 + if (!sg_blk)
975 + return 0;
976 + list_add_tail(&sg_blk->list, &dpaa2_comp->sg_dst_head);
977 + }
978 + } else {
979 + len = dpaa2_comp->sg_blk_num - blocks;
980 + for (i = 0; i < len; i++) {
981 + spin_lock(&dpaa2_chan->queue_lock);
982 + /* handle source sg blocks */
983 + sg_blk = list_first_entry(&dpaa2_comp->sg_src_head,
984 + struct qdma_sg_blk, list);
985 + list_del(&sg_blk->list);
986 + list_add_tail(&sg_blk->list, &dpaa2_chan->sgb_free);
987 + /* handle destination sg blocks */
988 + sg_blk = list_first_entry(&dpaa2_comp->sg_dst_head,
989 + struct qdma_sg_blk, list);
990 + list_del(&sg_blk->list);
991 + list_add_tail(&sg_blk->list, &dpaa2_chan->sgb_free);
992 + spin_unlock(&dpaa2_chan->queue_lock);
993 + }
994 + }
995 + dpaa2_comp->sg_blk_num = blocks;
996 +
997 + /* get the first source sg phy address */
998 + sg_blk = list_first_entry(&dpaa2_comp->sg_src_head,
999 + struct qdma_sg_blk, list);
1000 + dpaa2_comp->sge_src_bus_addr = sg_blk->blk_bus_addr;
1001 + /* get the first destinaiton sg phy address */
1002 + sg_blk_dst = list_first_entry(&dpaa2_comp->sg_dst_head,
1003 + struct qdma_sg_blk, list);
1004 + dpaa2_comp->sge_dst_bus_addr = sg_blk_dst->blk_bus_addr;
1005 +
1006 + for (i = 0; i < blocks; i++) {
1007 + src_sge = (struct dpaa2_qdma_sg *)sg_blk->blk_virt_addr;
1008 + dst_sge = (struct dpaa2_qdma_sg *)sg_blk_dst->blk_virt_addr;
1009 +
1010 + for (j = 0; j < (NUM_SG_PER_BLK - 1); j++) {
1011 + len = min(sg_dma_len(dst_sg), sg_dma_len(src_sg));
1012 + if (0 == len)
1013 + goto fetch;
1014 + total_len += len;
1015 + src = sg_dma_address(src_sg);
1016 + dst = sg_dma_address(dst_sg);
1017 +
1018 + /* source SG */
1019 + src_sge->addr_lo = src;
1020 + src_sge->addr_hi = (src >> 32);
1021 + src_sge->data_len.data_len_sl0 = len;
1022 + src_sge->ctrl.sl = QDMA_SG_SL_LONG;
1023 + src_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
1024 + /* destination SG */
1025 + dst_sge->addr_lo = dst;
1026 + dst_sge->addr_hi = (dst >> 32);
1027 + dst_sge->data_len.data_len_sl0 = len;
1028 + dst_sge->ctrl.sl = QDMA_SG_SL_LONG;
1029 + dst_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
1030 +fetch:
1031 + num--;
1032 + if (0 == num) {
1033 + src_sge->ctrl.f = QDMA_SG_F;
1034 + dst_sge->ctrl.f = QDMA_SG_F;
1035 + goto end;
1036 + }
1037 + dst_sg = sg_next(dst_sg);
1038 + src_sg = sg_next(src_sg);
1039 + src_sge++;
1040 + dst_sge++;
1041 + if (j == (NUM_SG_PER_BLK - 2)) {
1042 + /* for next blocks, extension */
1043 + sg_blk = list_next_entry(sg_blk, list);
1044 + sg_blk_dst = list_next_entry(sg_blk_dst, list);
1045 + src_sge->addr_lo = sg_blk->blk_bus_addr;
1046 + src_sge->addr_hi = sg_blk->blk_bus_addr >> 32;
1047 + src_sge->ctrl.sl = QDMA_SG_SL_LONG;
1048 + src_sge->ctrl.fmt = QDMA_SG_FMT_SGTE;
1049 + dst_sge->addr_lo = sg_blk_dst->blk_bus_addr;
1050 + dst_sge->addr_hi =
1051 + sg_blk_dst->blk_bus_addr >> 32;
1052 + dst_sge->ctrl.sl = QDMA_SG_SL_LONG;
1053 + dst_sge->ctrl.fmt = QDMA_SG_FMT_SGTE;
1054 + }
1055 + }
1056 + }
1057 +
1058 +end:
1059 + return total_len;
1060 +}
1061 +
1062 +static enum dma_status dpaa2_qdma_tx_status(struct dma_chan *chan,
1063 + dma_cookie_t cookie, struct dma_tx_state *txstate)
1064 +{
1065 + return dma_cookie_status(chan, cookie, txstate);
1066 +}
1067 +
1068 +static void dpaa2_qdma_free_desc(struct virt_dma_desc *vdesc)
1069 +{
1070 +}
1071 +
1072 +static void dpaa2_qdma_issue_pending(struct dma_chan *chan)
1073 +{
1074 + struct dpaa2_qdma_comp *dpaa2_comp;
1075 + struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
1076 + struct dpaa2_qdma_engine *dpaa2_qdma = dpaa2_chan->qdma;
1077 + struct dpaa2_qdma_priv *priv = dpaa2_qdma->priv;
1078 + struct virt_dma_desc *vdesc;
1079 + struct dpaa2_fd *fd;
1080 + int err;
1081 + unsigned long flags;
1082 +
1083 + spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
1084 + spin_lock(&dpaa2_chan->vchan.lock);
1085 + if (vchan_issue_pending(&dpaa2_chan->vchan)) {
1086 + vdesc = vchan_next_desc(&dpaa2_chan->vchan);
1087 + if (!vdesc)
1088 + goto err_enqueue;
1089 + dpaa2_comp = to_fsl_qdma_comp(vdesc);
1090 +
1091 + fd = (struct dpaa2_fd *)dpaa2_comp->fd_virt_addr;
1092 +
1093 + list_del(&vdesc->node);
1094 + list_add_tail(&dpaa2_comp->list, &dpaa2_chan->comp_used);
1095 +
1096 + /* TOBO: priority hard-coded to zero */
1097 + err = dpaa2_io_service_enqueue_fq(NULL,
1098 + priv->tx_queue_attr[0].fqid, fd);
1099 + if (err) {
1100 + list_del(&dpaa2_comp->list);
1101 + list_add_tail(&dpaa2_comp->list,
1102 + &dpaa2_chan->comp_free);
1103 + }
1104 +
1105 + }
1106 +err_enqueue:
1107 + spin_unlock(&dpaa2_chan->vchan.lock);
1108 + spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
1109 +}
1110 +
1111 +static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
1112 +{
1113 + struct device *dev = &ls_dev->dev;
1114 + struct dpaa2_qdma_priv *priv;
1115 + struct dpaa2_qdma_priv_per_prio *ppriv;
1116 + uint8_t prio_def = DPDMAI_PRIO_NUM;
1117 + int err;
1118 + int i;
1119 +
1120 + priv = dev_get_drvdata(dev);
1121 +
1122 + priv->dev = dev;
1123 + priv->dpqdma_id = ls_dev->obj_desc.id;
1124 +
1125 + /*Get the handle for the DPDMAI this interface is associate with */
1126 + err = dpdmai_open(priv->mc_io, 0, priv->dpqdma_id, &ls_dev->mc_handle);
1127 + if (err) {
1128 + dev_err(dev, "dpdmai_open() failed\n");
1129 + return err;
1130 + }
1131 + dev_info(dev, "Opened dpdmai object successfully\n");
1132 +
1133 + err = dpdmai_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
1134 + &priv->dpdmai_attr);
1135 + if (err) {
1136 + dev_err(dev, "dpdmai_get_attributes() failed\n");
1137 + return err;
1138 + }
1139 +
1140 + if (priv->dpdmai_attr.version.major > DPDMAI_VER_MAJOR) {
1141 + dev_err(dev, "DPDMAI major version mismatch\n"
1142 + "Found %u.%u, supported version is %u.%u\n",
1143 + priv->dpdmai_attr.version.major,
1144 + priv->dpdmai_attr.version.minor,
1145 + DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
1146 + }
1147 +
1148 + if (priv->dpdmai_attr.version.minor > DPDMAI_VER_MINOR) {
1149 + dev_err(dev, "DPDMAI minor version mismatch\n"
1150 + "Found %u.%u, supported version is %u.%u\n",
1151 + priv->dpdmai_attr.version.major,
1152 + priv->dpdmai_attr.version.minor,
1153 + DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
1154 + }
1155 +
1156 + priv->num_pairs = min(priv->dpdmai_attr.num_of_priorities, prio_def);
1157 + ppriv = kcalloc(priv->num_pairs, sizeof(*ppriv), GFP_KERNEL);
1158 + if (!ppriv) {
1159 + dev_err(dev, "kzalloc for ppriv failed\n");
1160 + return -1;
1161 + }
1162 + priv->ppriv = ppriv;
1163 +
1164 + for (i = 0; i < priv->num_pairs; i++) {
1165 + err = dpdmai_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
1166 + i, &priv->rx_queue_attr[i]);
1167 + if (err) {
1168 + dev_err(dev, "dpdmai_get_rx_queue() failed\n");
1169 + return err;
1170 + }
1171 + ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
1172 +
1173 + err = dpdmai_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle,
1174 + i, &priv->tx_queue_attr[i]);
1175 + if (err) {
1176 + dev_err(dev, "dpdmai_get_tx_queue() failed\n");
1177 + return err;
1178 + }
1179 + ppriv->req_fqid = priv->tx_queue_attr[i].fqid;
1180 + ppriv->prio = i;
1181 + ppriv->priv = priv;
1182 + ppriv++;
1183 + }
1184 +
1185 + return 0;
1186 +}
1187 +
1188 +static void dpaa2_qdma_fqdan_cb(struct dpaa2_io_notification_ctx *ctx)
1189 +{
1190 + struct dpaa2_qdma_priv_per_prio *ppriv = container_of(ctx,
1191 + struct dpaa2_qdma_priv_per_prio, nctx);
1192 + struct dpaa2_qdma_priv *priv = ppriv->priv;
1193 + struct dpaa2_qdma_comp *dpaa2_comp, *_comp_tmp;
1194 + struct dpaa2_qdma_chan *qchan;
1195 + const struct dpaa2_fd *fd;
1196 + const struct dpaa2_fd *fd_eq;
1197 + struct dpaa2_dq *dq;
1198 + int err;
1199 + int is_last = 0;
1200 + uint8_t status;
1201 + int i;
1202 + int found;
1203 + uint32_t n_chans = priv->dpaa2_qdma->n_chans;
1204 +
1205 + do {
1206 + err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
1207 + ppriv->store);
1208 + } while (err);
1209 +
1210 + while (!is_last) {
1211 + do {
1212 + dq = dpaa2_io_store_next(ppriv->store, &is_last);
1213 + } while (!is_last && !dq);
1214 + if (!dq) {
1215 + dev_err(priv->dev, "FQID returned no valid frames!\n");
1216 + continue;
1217 + }
1218 +
1219 + /* obtain FD and process the error */
1220 + fd = dpaa2_dq_fd(dq);
1221 +
1222 + status = dpaa2_fd_get_ctrl(fd) & 0xff;
1223 + if (status)
1224 + dev_err(priv->dev, "FD error occurred\n");
1225 + found = 0;
1226 + for (i = 0; i < n_chans; i++) {
1227 + qchan = &priv->dpaa2_qdma->chans[i];
1228 + spin_lock(&qchan->queue_lock);
1229 + if (list_empty(&qchan->comp_used)) {
1230 + spin_unlock(&qchan->queue_lock);
1231 + continue;
1232 + }
1233 + list_for_each_entry_safe(dpaa2_comp, _comp_tmp,
1234 + &qchan->comp_used, list) {
1235 + fd_eq = (struct dpaa2_fd *)
1236 + dpaa2_comp->fd_virt_addr;
1237 +
1238 + if (le64_to_cpu(fd_eq->simple.addr) ==
1239 + le64_to_cpu(fd->simple.addr)) {
1240 +
1241 + list_del(&dpaa2_comp->list);
1242 + list_add_tail(&dpaa2_comp->list,
1243 + &qchan->comp_free);
1244 +
1245 + spin_lock(&qchan->vchan.lock);
1246 + vchan_cookie_complete(
1247 + &dpaa2_comp->vdesc);
1248 + spin_unlock(&qchan->vchan.lock);
1249 + found = 1;
1250 + break;
1251 + }
1252 + }
1253 + spin_unlock(&qchan->queue_lock);
1254 + if (found)
1255 + break;
1256 + }
1257 + }
1258 +
1259 + dpaa2_io_service_rearm(NULL, ctx);
1260 +}
1261 +
1262 +static int __cold dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv *priv)
1263 +{
1264 + int err, i, num;
1265 + struct device *dev = priv->dev;
1266 + struct dpaa2_qdma_priv_per_prio *ppriv;
1267 +
1268 + num = priv->num_pairs;
1269 + ppriv = priv->ppriv;
1270 + for (i = 0; i < num; i++) {
1271 + ppriv->nctx.is_cdan = 0;
1272 + ppriv->nctx.desired_cpu = 1;
1273 + ppriv->nctx.id = ppriv->rsp_fqid;
1274 + ppriv->nctx.cb = dpaa2_qdma_fqdan_cb;
1275 + err = dpaa2_io_service_register(NULL, &ppriv->nctx);
1276 + if (err) {
1277 + dev_err(dev, "Notification register failed\n");
1278 + goto err_service;
1279 + }
1280 +
1281 + ppriv->store = dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE,
1282 + dev);
1283 + if (!ppriv->store) {
1284 + dev_err(dev, "dpaa2_io_store_create() failed\n");
1285 + goto err_store;
1286 + }
1287 +
1288 + ppriv++;
1289 + }
1290 + return 0;
1291 +
1292 +err_store:
1293 + dpaa2_io_service_deregister(NULL, &ppriv->nctx);
1294 +err_service:
1295 + ppriv--;
1296 + while (ppriv >= priv->ppriv) {
1297 + dpaa2_io_service_deregister(NULL, &ppriv->nctx);
1298 + dpaa2_io_store_destroy(ppriv->store);
1299 + ppriv--;
1300 + }
1301 + return -1;
1302 +}
1303 +
1304 +static void __cold dpaa2_dpmai_store_free(struct dpaa2_qdma_priv *priv)
1305 +{
1306 + struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
1307 + int i;
1308 +
1309 + for (i = 0; i < priv->num_pairs; i++) {
1310 + dpaa2_io_store_destroy(ppriv->store);
1311 + ppriv++;
1312 + }
1313 +}
1314 +
1315 +static void __cold dpaa2_dpdmai_dpio_free(struct dpaa2_qdma_priv *priv)
1316 +{
1317 + struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
1318 + int i;
1319 +
1320 + for (i = 0; i < priv->num_pairs; i++) {
1321 + dpaa2_io_service_deregister(NULL, &ppriv->nctx);
1322 + ppriv++;
1323 + }
1324 +}
1325 +
1326 +static int __cold dpaa2_dpdmai_bind(struct dpaa2_qdma_priv *priv)
1327 +{
1328 + int err;
1329 + struct dpdmai_rx_queue_cfg rx_queue_cfg;
1330 + struct device *dev = priv->dev;
1331 + struct dpaa2_qdma_priv_per_prio *ppriv;
1332 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
1333 + int i, num;
1334 +
1335 + num = priv->num_pairs;
1336 + ppriv = priv->ppriv;
1337 + for (i = 0; i < num; i++) {
1338 + rx_queue_cfg.options = DPDMAI_QUEUE_OPT_USER_CTX |
1339 + DPDMAI_QUEUE_OPT_DEST;
1340 + rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
1341 + rx_queue_cfg.dest_cfg.dest_type = DPDMAI_DEST_DPIO;
1342 + rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
1343 + rx_queue_cfg.dest_cfg.priority = ppriv->prio;
1344 + err = dpdmai_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
1345 + rx_queue_cfg.dest_cfg.priority, &rx_queue_cfg);
1346 + if (err) {
1347 + dev_err(dev, "dpdmai_set_rx_queue() failed\n");
1348 + return err;
1349 + }
1350 +
1351 + ppriv++;
1352 + }
1353 +
1354 + return 0;
1355 +}
1356 +
1357 +static int __cold dpaa2_dpdmai_dpio_unbind(struct dpaa2_qdma_priv *priv)
1358 +{
1359 + int err = 0;
1360 + struct device *dev = priv->dev;
1361 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
1362 + struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
1363 + int i;
1364 +
1365 + for (i = 0; i < priv->num_pairs; i++) {
1366 + ppriv->nctx.qman64 = 0;
1367 + ppriv->nctx.dpio_id = 0;
1368 + ppriv++;
1369 + }
1370 +
1371 + err = dpdmai_reset(priv->mc_io, 0, ls_dev->mc_handle);
1372 + if (err)
1373 + dev_err(dev, "dpdmai_reset() failed\n");
1374 +
1375 + return err;
1376 +}
1377 +
1378 +static void __cold dpaa2_dpdmai_free_pool(struct dpaa2_qdma_chan *qchan,
1379 + struct list_head *head)
1380 +{
1381 + struct qdma_sg_blk *sgb_tmp, *_sgb_tmp;
1382 + /* free the QDMA SG pool block */
1383 + list_for_each_entry_safe(sgb_tmp, _sgb_tmp, head, list) {
1384 + sgb_tmp->blk_virt_addr = (void *)((struct qdma_sg_blk *)
1385 + sgb_tmp->blk_virt_addr - 1);
1386 + sgb_tmp->blk_bus_addr = sgb_tmp->blk_bus_addr
1387 + - sizeof(*sgb_tmp);
1388 + dma_pool_free(qchan->sg_blk_pool, sgb_tmp->blk_virt_addr,
1389 + sgb_tmp->blk_bus_addr);
1390 + }
1391 +
1392 +}
1393 +
1394 +static void __cold dpaa2_dpdmai_free_comp(struct dpaa2_qdma_chan *qchan,
1395 + struct list_head *head)
1396 +{
1397 + struct dpaa2_qdma_comp *comp_tmp, *_comp_tmp;
1398 + /* free the QDMA comp resource */
1399 + list_for_each_entry_safe(comp_tmp, _comp_tmp,
1400 + head, list) {
1401 + dma_pool_free(qchan->fd_pool,
1402 + comp_tmp->fd_virt_addr,
1403 + comp_tmp->fd_bus_addr);
1404 + /* free the SG source block on comp */
1405 + dpaa2_dpdmai_free_pool(qchan, &comp_tmp->sg_src_head);
1406 + /* free the SG destination block on comp */
1407 + dpaa2_dpdmai_free_pool(qchan, &comp_tmp->sg_dst_head);
1408 + list_del(&comp_tmp->list);
1409 + kfree(comp_tmp);
1410 + }
1411 +
1412 +}
1413 +
1414 +static void __cold dpaa2_dpdmai_free_channels(
1415 + struct dpaa2_qdma_engine *dpaa2_qdma)
1416 +{
1417 + struct dpaa2_qdma_chan *qchan;
1418 + int num, i;
1419 +
1420 + num = dpaa2_qdma->n_chans;
1421 + for (i = 0; i < num; i++) {
1422 + qchan = &dpaa2_qdma->chans[i];
1423 + dpaa2_dpdmai_free_comp(qchan, &qchan->comp_used);
1424 + dpaa2_dpdmai_free_comp(qchan, &qchan->comp_free);
1425 + dpaa2_dpdmai_free_pool(qchan, &qchan->sgb_free);
1426 + dma_pool_destroy(qchan->fd_pool);
1427 + dma_pool_destroy(qchan->sg_blk_pool);
1428 + }
1429 +}
1430 +
1431 +static int dpaa2_dpdmai_alloc_channels(struct dpaa2_qdma_engine *dpaa2_qdma)
1432 +{
1433 + struct dpaa2_qdma_chan *dpaa2_chan;
1434 + struct device *dev = &dpaa2_qdma->priv->dpdmai_dev->dev;
1435 + int i;
1436 +
1437 + INIT_LIST_HEAD(&dpaa2_qdma->dma_dev.channels);
1438 + for (i = 0; i < dpaa2_qdma->n_chans; i++) {
1439 + dpaa2_chan = &dpaa2_qdma->chans[i];
1440 + dpaa2_chan->qdma = dpaa2_qdma;
1441 + dpaa2_chan->vchan.desc_free = dpaa2_qdma_free_desc;
1442 + vchan_init(&dpaa2_chan->vchan, &dpaa2_qdma->dma_dev);
1443 +
1444 + dpaa2_chan->fd_pool = dma_pool_create("fd_pool",
1445 + dev, FD_POOL_SIZE, 32, 0);
1446 + if (!dpaa2_chan->fd_pool)
1447 + return -1;
1448 + dpaa2_chan->sg_blk_pool = dma_pool_create("sg_blk_pool",
1449 + dev, SG_POOL_SIZE, 32, 0);
1450 + if (!dpaa2_chan->sg_blk_pool)
1451 + return -1;
1452 +
1453 + spin_lock_init(&dpaa2_chan->queue_lock);
1454 + INIT_LIST_HEAD(&dpaa2_chan->comp_used);
1455 + INIT_LIST_HEAD(&dpaa2_chan->comp_free);
1456 + INIT_LIST_HEAD(&dpaa2_chan->sgb_free);
1457 + }
1458 + return 0;
1459 +}
1460 +
1461 +static int dpaa2_qdma_probe(struct fsl_mc_device *dpdmai_dev)
1462 +{
1463 + struct dpaa2_qdma_priv *priv;
1464 + struct device *dev = &dpdmai_dev->dev;
1465 + struct dpaa2_qdma_engine *dpaa2_qdma;
1466 + int err;
1467 +
1468 + priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1469 + if (!priv)
1470 + return -ENOMEM;
1471 + dev_set_drvdata(dev, priv);
1472 + priv->dpdmai_dev = dpdmai_dev;
1473 +
1474 + priv->iommu_domain = iommu_get_domain_for_dev(dev);
1475 + if (priv->iommu_domain)
1476 + smmu_disable = false;
1477 +
1478 + /* obtain a MC portal */
1479 + err = fsl_mc_portal_allocate(dpdmai_dev, 0, &priv->mc_io);
1480 + if (err) {
1481 + dev_err(dev, "MC portal allocation failed\n");
1482 + goto err_mcportal;
1483 + }
1484 +
1485 + /* DPDMAI initialization */
1486 + err = dpaa2_qdma_setup(dpdmai_dev);
1487 + if (err) {
1488 + dev_err(dev, "dpaa2_dpdmai_setup() failed\n");
1489 + goto err_dpdmai_setup;
1490 + }
1491 +
1492 + /* DPIO */
1493 + err = dpaa2_qdma_dpio_setup(priv);
1494 + if (err) {
1495 + dev_err(dev, "dpaa2_dpdmai_dpio_setup() failed\n");
1496 + goto err_dpio_setup;
1497 + }
1498 +
1499 + /* DPDMAI binding to DPIO */
1500 + err = dpaa2_dpdmai_bind(priv);
1501 + if (err) {
1502 + dev_err(dev, "dpaa2_dpdmai_bind() failed\n");
1503 + goto err_bind;
1504 + }
1505 +
1506 + /* DPDMAI enable */
1507 + err = dpdmai_enable(priv->mc_io, 0, dpdmai_dev->mc_handle);
1508 + if (err) {
1509 + dev_err(dev, "dpdmai_enable() faile\n");
1510 + goto err_enable;
1511 + }
1512 +
1513 + dpaa2_qdma = kzalloc(sizeof(*dpaa2_qdma), GFP_KERNEL);
1514 + if (!dpaa2_qdma) {
1515 + err = -ENOMEM;
1516 + goto err_eng;
1517 + }
1518 +
1519 + priv->dpaa2_qdma = dpaa2_qdma;
1520 + dpaa2_qdma->priv = priv;
1521 +
1522 + dpaa2_qdma->n_chans = NUM_CH;
1523 +
1524 + err = dpaa2_dpdmai_alloc_channels(dpaa2_qdma);
1525 + if (err) {
1526 + dev_err(dev, "QDMA alloc channels faile\n");
1527 + goto err_reg;
1528 + }
1529 +
1530 + dma_cap_set(DMA_PRIVATE, dpaa2_qdma->dma_dev.cap_mask);
1531 + dma_cap_set(DMA_SLAVE, dpaa2_qdma->dma_dev.cap_mask);
1532 + dma_cap_set(DMA_MEMCPY, dpaa2_qdma->dma_dev.cap_mask);
1533 +
1534 + dpaa2_qdma->dma_dev.dev = dev;
1535 + dpaa2_qdma->dma_dev.device_alloc_chan_resources
1536 + = dpaa2_qdma_alloc_chan_resources;
1537 + dpaa2_qdma->dma_dev.device_free_chan_resources
1538 + = dpaa2_qdma_free_chan_resources;
1539 + dpaa2_qdma->dma_dev.device_tx_status = dpaa2_qdma_tx_status;
1540 + dpaa2_qdma->dma_dev.device_prep_dma_memcpy = dpaa2_qdma_prep_memcpy;
1541 + dpaa2_qdma->dma_dev.device_issue_pending = dpaa2_qdma_issue_pending;
1542 +
1543 + err = dma_async_device_register(&dpaa2_qdma->dma_dev);
1544 + if (err) {
1545 + dev_err(dev, "Can't register NXP QDMA engine.\n");
1546 + goto err_reg;
1547 + }
1548 +
1549 + return 0;
1550 +
1551 +err_reg:
1552 + dpaa2_dpdmai_free_channels(dpaa2_qdma);
1553 + kfree(dpaa2_qdma);
1554 +err_eng:
1555 + dpdmai_disable(priv->mc_io, 0, dpdmai_dev->mc_handle);
1556 +err_enable:
1557 + dpaa2_dpdmai_dpio_unbind(priv);
1558 +err_bind:
1559 + dpaa2_dpmai_store_free(priv);
1560 + dpaa2_dpdmai_dpio_free(priv);
1561 +err_dpio_setup:
1562 + dpdmai_close(priv->mc_io, 0, dpdmai_dev->mc_handle);
1563 +err_dpdmai_setup:
1564 + fsl_mc_portal_free(priv->mc_io);
1565 +err_mcportal:
1566 + kfree(priv->ppriv);
1567 + kfree(priv);
1568 + dev_set_drvdata(dev, NULL);
1569 + return err;
1570 +}
1571 +
1572 +static int dpaa2_qdma_remove(struct fsl_mc_device *ls_dev)
1573 +{
1574 + struct device *dev;
1575 + struct dpaa2_qdma_priv *priv;
1576 + struct dpaa2_qdma_engine *dpaa2_qdma;
1577 +
1578 + dev = &ls_dev->dev;
1579 + priv = dev_get_drvdata(dev);
1580 + dpaa2_qdma = priv->dpaa2_qdma;
1581 +
1582 + dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle);
1583 + dpaa2_dpdmai_dpio_unbind(priv);
1584 + dpaa2_dpmai_store_free(priv);
1585 + dpaa2_dpdmai_dpio_free(priv);
1586 + dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
1587 + fsl_mc_portal_free(priv->mc_io);
1588 + dev_set_drvdata(dev, NULL);
1589 + dpaa2_dpdmai_free_channels(dpaa2_qdma);
1590 +
1591 + dma_async_device_unregister(&dpaa2_qdma->dma_dev);
1592 + kfree(priv);
1593 + kfree(dpaa2_qdma);
1594 +
1595 + return 0;
1596 +}
1597 +
1598 +static const struct fsl_mc_device_id dpaa2_qdma_id_table[] = {
1599 + {
1600 + .vendor = FSL_MC_VENDOR_FREESCALE,
1601 + .obj_type = "dpdmai",
1602 + },
1603 + { .vendor = 0x0 }
1604 +};
1605 +
1606 +static struct fsl_mc_driver dpaa2_qdma_driver = {
1607 + .driver = {
1608 + .name = "dpaa2-qdma",
1609 + .owner = THIS_MODULE,
1610 + },
1611 + .probe = dpaa2_qdma_probe,
1612 + .remove = dpaa2_qdma_remove,
1613 + .match_id_table = dpaa2_qdma_id_table
1614 +};
1615 +
1616 +static int __init dpaa2_qdma_driver_init(void)
1617 +{
1618 + return fsl_mc_driver_register(&(dpaa2_qdma_driver));
1619 +}
1620 +late_initcall(dpaa2_qdma_driver_init);
1621 +
1622 +static void __exit fsl_qdma_exit(void)
1623 +{
1624 + fsl_mc_driver_unregister(&(dpaa2_qdma_driver));
1625 +}
1626 +module_exit(fsl_qdma_exit);
1627 +
1628 +MODULE_DESCRIPTION("NXP DPAA2 qDMA driver");
1629 +MODULE_LICENSE("Dual BSD/GPL");
1630 --- /dev/null
1631 +++ b/drivers/dma/dpaa2-qdma/dpaa2-qdma.h
1632 @@ -0,0 +1,227 @@
1633 +/* Copyright 2015 NXP Semiconductor Inc.
1634 + *
1635 + * Redistribution and use in source and binary forms, with or without
1636 + * modification, are permitted provided that the following conditions are met:
1637 + * * Redistributions of source code must retain the above copyright
1638 + * notice, this list of conditions and the following disclaimer.
1639 + * * Redistributions in binary form must reproduce the above copyright
1640 + * notice, this list of conditions and the following disclaimer in the
1641 + * documentation and/or other materials provided with the distribution.
1642 + * * Neither the name of NXP Semiconductor nor the
1643 + * names of its contributors may be used to endorse or promote products
1644 + * derived from this software without specific prior written permission.
1645 + *
1646 + *
1647 + * ALTERNATIVELY, this software may be distributed under the terms of the
1648 + * GNU General Public License ("GPL") as published by the Free Software
1649 + * Foundation, either version 2 of that License or (at your option) any
1650 + * later version.
1651 + *
1652 + * THIS SOFTWARE IS PROVIDED BY NXP Semiconductor ``AS IS'' AND ANY
1653 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1654 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1655 + * DISCLAIMED. IN NO EVENT SHALL NXP Semiconductor BE LIABLE FOR ANY
1656 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1657 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
1658 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
1659 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1660 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1661 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1662 + */
1663 +
1664 +#ifndef __DPAA2_QDMA_H
1665 +#define __DPAA2_QDMA_H
1666 +
1667 +#define LONG_FORMAT 1
1668 +
1669 +#define DPAA2_QDMA_STORE_SIZE 16
1670 +#define NUM_CH 8
1671 +#define NUM_SG_PER_BLK 16
1672 +
1673 +#define QDMA_DMR_OFFSET 0x0
1674 +#define QDMA_DQ_EN (0 << 30)
1675 +#define QDMA_DQ_DIS (1 << 30)
1676 +
1677 +#define QDMA_DSR_M_OFFSET 0x10004
1678 +
1679 +struct dpaa2_qdma_sd_d {
1680 + uint32_t rsv:32;
1681 + union {
1682 + struct {
1683 + uint32_t ssd:12; /* souce stride distance */
1684 + uint32_t sss:12; /* souce stride size */
1685 + uint32_t rsv1:8;
1686 + } sdf;
1687 + struct {
1688 + uint32_t dsd:12; /* Destination stride distance */
1689 + uint32_t dss:12; /* Destination stride size */
1690 + uint32_t rsv2:8;
1691 + } ddf;
1692 + } df;
1693 + uint32_t rbpcmd; /* Route-by-port command */
1694 + uint32_t cmd;
1695 +} __attribute__((__packed__));
1696 +/* Source descriptor command read transaction type for RBP=0:
1697 + coherent copy of cacheable memory */
1698 +#define QDMA_SD_CMD_RDTTYPE_COHERENT (0xb << 28)
1699 +/* Destination descriptor command write transaction type for RBP=0:
1700 + coherent copy of cacheable memory */
1701 +#define QDMA_DD_CMD_WRTTYPE_COHERENT (0x6 << 28)
1702 +
1703 +#define QDMA_SG_FMT_SDB 0x0 /* single data buffer */
1704 +#define QDMA_SG_FMT_FDS 0x1 /* frame data section */
1705 +#define QDMA_SG_FMT_SGTE 0x2 /* SGT extension */
1706 +#define QDMA_SG_SL_SHORT 0x1 /* short length */
1707 +#define QDMA_SG_SL_LONG 0x0 /* short length */
1708 +#define QDMA_SG_F 0x1 /* last sg entry */
1709 +struct dpaa2_qdma_sg {
1710 + uint32_t addr_lo; /* address 0:31 */
1711 + uint32_t addr_hi:17; /* address 32:48 */
1712 + uint32_t rsv:15;
1713 + union {
1714 + uint32_t data_len_sl0; /* SL=0, the long format */
1715 + struct {
1716 + uint32_t len:17; /* SL=1, the short format */
1717 + uint32_t reserve:3;
1718 + uint32_t sf:1;
1719 + uint32_t sr:1;
1720 + uint32_t size:10; /* buff size */
1721 + } data_len_sl1;
1722 + } data_len; /* AVAIL_LENGTH */
1723 + struct {
1724 + uint32_t bpid:14;
1725 + uint32_t ivp:1;
1726 + uint32_t mbt:1;
1727 + uint32_t offset:12;
1728 + uint32_t fmt:2;
1729 + uint32_t sl:1;
1730 + uint32_t f:1;
1731 + } ctrl;
1732 +} __attribute__((__packed__));
1733 +
1734 +#define QMAN_FD_FMT_ENABLE (1) /* frame list table enable */
1735 +#define QMAN_FD_BMT_ENABLE (1 << 15) /* bypass memory translation */
1736 +#define QMAN_FD_BMT_DISABLE (0 << 15) /* bypass memory translation */
1737 +#define QMAN_FD_SL_DISABLE (0 << 14) /* short lengthe disabled */
1738 +#define QMAN_FD_SL_ENABLE (1 << 14) /* short lengthe enabled */
1739 +
1740 +#define QDMA_SB_FRAME (0 << 28) /* single frame */
1741 +#define QDMA_SG_FRAME (2 << 28) /* scatter gather frames */
1742 +#define QDMA_FINAL_BIT_DISABLE (0 << 31) /* final bit disable */
1743 +#define QDMA_FINAL_BIT_ENABLE (1 << 31) /* final bit enable */
1744 +
1745 +#define QDMA_FD_SHORT_FORMAT (1 << 11) /* short format */
1746 +#define QDMA_FD_LONG_FORMAT (0 << 11) /* long format */
1747 +#define QDMA_SER_DISABLE (0 << 8) /* no notification */
1748 +#define QDMA_SER_CTX (1 << 8) /* notification by FQD_CTX[fqid] */
1749 +#define QDMA_SER_DEST (2 << 8) /* notification by destination desc */
1750 +#define QDMA_SER_BOTH (3 << 8) /* soruce and dest notification */
1751 +#define QDMA_FD_SPF_ENALBE (1 << 30) /* source prefetch enable */
1752 +
1753 +#define QMAN_FD_VA_ENABLE (1 << 14) /* Address used is virtual address */
1754 +#define QMAN_FD_VA_DISABLE (0 << 14)/* Address used is a real address */
1755 +#define QMAN_FD_CBMT_ENABLE (1 << 15) /* Flow Context: 49bit physical address */
1756 +#define QMAN_FD_CBMT_DISABLE (0 << 15) /* Flow Context: 64bit virtual address */
1757 +#define QMAN_FD_SC_DISABLE (0 << 27) /* stashing control */
1758 +
1759 +#define QDMA_FL_FMT_SBF (0x0) /* Single buffer frame */
1760 +#define QDMA_FL_FMT_SGE 0x2 /* Scatter gather frame */
1761 +#define QDMA_FL_BMT_ENABLE (0x1 << 15)/* enable bypass memory translation */
1762 +#define QDMA_FL_BMT_DISABLE 0x0 /* enable bypass memory translation */
1763 +#define QDMA_FL_SL_LONG (0x0 << 2)/* long length */
1764 +#define QDMA_FL_SL_SHORT 0x1 /* short length */
1765 +#define QDMA_FL_F (0x1)/* last frame list bit */
1766 +/*Description of Frame list table structure*/
1767 +
1768 +struct dpaa2_qdma_chan {
1769 + struct virt_dma_chan vchan;
1770 + struct virt_dma_desc vdesc;
1771 + enum dma_status status;
1772 + struct dpaa2_qdma_engine *qdma;
1773 +
1774 + struct mutex dpaa2_queue_mutex;
1775 + spinlock_t queue_lock;
1776 + struct dma_pool *fd_pool;
1777 + struct dma_pool *sg_blk_pool;
1778 +
1779 + struct list_head comp_used;
1780 + struct list_head comp_free;
1781 +
1782 + struct list_head sgb_free;
1783 +};
1784 +
1785 +struct qdma_sg_blk {
1786 + dma_addr_t blk_bus_addr;
1787 + void *blk_virt_addr;
1788 + struct list_head list;
1789 +};
1790 +
1791 +struct dpaa2_qdma_comp {
1792 + dma_addr_t fd_bus_addr;
1793 + dma_addr_t fl_bus_addr;
1794 + dma_addr_t desc_bus_addr;
1795 + dma_addr_t sge_src_bus_addr;
1796 + dma_addr_t sge_dst_bus_addr;
1797 + void *fd_virt_addr;
1798 + void *fl_virt_addr;
1799 + void *desc_virt_addr;
1800 + void *sg_src_virt_addr;
1801 + void *sg_dst_virt_addr;
1802 + struct qdma_sg_blk *sg_blk;
1803 + uint32_t sg_blk_num;
1804 + struct list_head sg_src_head;
1805 + struct list_head sg_dst_head;
1806 + struct dpaa2_qdma_chan *qchan;
1807 + struct virt_dma_desc vdesc;
1808 + struct list_head list;
1809 +};
1810 +
1811 +struct dpaa2_qdma_engine {
1812 + struct dma_device dma_dev;
1813 + u32 n_chans;
1814 + struct dpaa2_qdma_chan chans[NUM_CH];
1815 +
1816 + struct dpaa2_qdma_priv *priv;
1817 +};
1818 +
1819 +/*
1820 + * dpaa2_qdma_priv - driver private data
1821 + */
1822 +struct dpaa2_qdma_priv {
1823 + int dpqdma_id;
1824 +
1825 + struct iommu_domain *iommu_domain;
1826 + struct dpdmai_attr dpdmai_attr;
1827 + struct device *dev;
1828 + struct fsl_mc_io *mc_io;
1829 + struct fsl_mc_device *dpdmai_dev;
1830 +
1831 + struct dpdmai_rx_queue_attr rx_queue_attr[DPDMAI_PRIO_NUM];
1832 + struct dpdmai_tx_queue_attr tx_queue_attr[DPDMAI_PRIO_NUM];
1833 +
1834 + uint8_t num_pairs;
1835 +
1836 + struct dpaa2_qdma_engine *dpaa2_qdma;
1837 + struct dpaa2_qdma_priv_per_prio *ppriv;
1838 +};
1839 +
1840 +struct dpaa2_qdma_priv_per_prio {
1841 + int req_fqid;
1842 + int rsp_fqid;
1843 + int prio;
1844 +
1845 + struct dpaa2_io_store *store;
1846 + struct dpaa2_io_notification_ctx nctx;
1847 +
1848 + struct dpaa2_qdma_priv *priv;
1849 +};
1850 +
1851 +/* FD pool size: one FD + 3 Frame list + 2 source/destination descriptor */
1852 +#define FD_POOL_SIZE (sizeof(struct dpaa2_fd) + \
1853 + sizeof(struct dpaa2_fl_entry) * 3 + \
1854 + sizeof(struct dpaa2_qdma_sd_d) * 2)
1855 +
1856 +/* qdma_sg_blk + 16 SGs */
1857 +#define SG_POOL_SIZE (sizeof(struct qdma_sg_blk) +\
1858 + sizeof(struct dpaa2_qdma_sg) * NUM_SG_PER_BLK)
1859 +#endif /* __DPAA2_QDMA_H */
1860 --- /dev/null
1861 +++ b/drivers/dma/dpaa2-qdma/dpdmai.c
1862 @@ -0,0 +1,515 @@
1863 +/* Copyright 2013-2015 Freescale Semiconductor Inc.
1864 + *
1865 + * Redistribution and use in source and binary forms, with or without
1866 + * modification, are permitted provided that the following conditions are met:
1867 + * * Redistributions of source code must retain the above copyright
1868 + * notice, this list of conditions and the following disclaimer.
1869 + * * Redistributions in binary form must reproduce the above copyright
1870 + * notice, this list of conditions and the following disclaimer in the
1871 + * documentation and/or other materials provided with the distribution.
1872 + * * Neither the name of the above-listed copyright holders nor the
1873 + * names of any contributors may be used to endorse or promote products
1874 + * derived from this software without specific prior written permission.
1875 + *
1876 + *
1877 + * ALTERNATIVELY, this software may be distributed under the terms of the
1878 + * GNU General Public License ("GPL") as published by the Free Software
1879 + * Foundation, either version 2 of that License or (at your option) any
1880 + * later version.
1881 + *
1882 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
1883 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1884 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1885 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
1886 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
1887 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
1888 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
1889 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
1890 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
1891 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
1892 + * POSSIBILITY OF SUCH DAMAGE.
1893 + */
1894 +#include <linux/types.h>
1895 +#include <linux/io.h>
1896 +#include "fsl_dpdmai.h"
1897 +#include "fsl_dpdmai_cmd.h"
1898 +#include <linux/fsl/mc.h>
1899 +
1900 +struct dpdmai_cmd_open {
1901 + __le32 dpdmai_id;
1902 +};
1903 +
1904 +struct dpdmai_rsp_get_attributes {
1905 + __le32 id;
1906 + u8 num_of_priorities;
1907 + u8 pad0[3];
1908 + __le16 major;
1909 + __le16 minor;
1910 +};
1911 +
1912 +
1913 +struct dpdmai_cmd_queue {
1914 + __le32 dest_id;
1915 + u8 priority;
1916 + u8 queue;
1917 + u8 dest_type;
1918 + u8 pad;
1919 + __le64 user_ctx;
1920 + union {
1921 + __le32 options;
1922 + __le32 fqid;
1923 + };
1924 +};
1925 +
1926 +struct dpdmai_rsp_get_tx_queue {
1927 + __le64 pad;
1928 + __le32 fqid;
1929 +};
1930 +
1931 +
1932 +int dpdmai_open(struct fsl_mc_io *mc_io,
1933 + uint32_t cmd_flags,
1934 + int dpdmai_id,
1935 + uint16_t *token)
1936 +{
1937 + struct fsl_mc_command cmd = { 0 };
1938 + struct dpdmai_cmd_open *cmd_params;
1939 + int err;
1940 +
1941 + /* prepare command */
1942 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_OPEN,
1943 + cmd_flags,
1944 + 0);
1945 +
1946 + cmd_params = (struct dpdmai_cmd_open *)cmd.params;
1947 + cmd_params->dpdmai_id = cpu_to_le32(dpdmai_id);
1948 +
1949 + /* send command to mc*/
1950 + err = mc_send_command(mc_io, &cmd);
1951 + if (err)
1952 + return err;
1953 +
1954 + /* retrieve response parameters */
1955 + *token = mc_cmd_hdr_read_token(&cmd);
1956 + return 0;
1957 +}
1958 +
1959 +int dpdmai_close(struct fsl_mc_io *mc_io,
1960 + uint32_t cmd_flags,
1961 + uint16_t token)
1962 +{
1963 + struct fsl_mc_command cmd = { 0 };
1964 +
1965 + /* prepare command */
1966 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLOSE,
1967 + cmd_flags, token);
1968 +
1969 + /* send command to mc*/
1970 + return mc_send_command(mc_io, &cmd);
1971 +}
1972 +
1973 +int dpdmai_create(struct fsl_mc_io *mc_io,
1974 + uint32_t cmd_flags,
1975 + const struct dpdmai_cfg *cfg,
1976 + uint16_t *token)
1977 +{
1978 + struct fsl_mc_command cmd = { 0 };
1979 + int err;
1980 +
1981 + /* prepare command */
1982 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CREATE,
1983 + cmd_flags,
1984 + 0);
1985 + DPDMAI_CMD_CREATE(cmd, cfg);
1986 +
1987 + /* send command to mc*/
1988 + err = mc_send_command(mc_io, &cmd);
1989 + if (err)
1990 + return err;
1991 +
1992 + /* retrieve response parameters */
1993 + *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
1994 +
1995 + return 0;
1996 +}
1997 +
1998 +int dpdmai_destroy(struct fsl_mc_io *mc_io,
1999 + uint32_t cmd_flags,
2000 + uint16_t token)
2001 +{
2002 + struct fsl_mc_command cmd = { 0 };
2003 +
2004 + /* prepare command */
2005 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DESTROY,
2006 + cmd_flags,
2007 + token);
2008 +
2009 + /* send command to mc*/
2010 + return mc_send_command(mc_io, &cmd);
2011 +}
2012 +
2013 +int dpdmai_enable(struct fsl_mc_io *mc_io,
2014 + uint32_t cmd_flags,
2015 + uint16_t token)
2016 +{
2017 + struct fsl_mc_command cmd = { 0 };
2018 +
2019 + /* prepare command */
2020 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_ENABLE,
2021 + cmd_flags,
2022 + token);
2023 +
2024 + /* send command to mc*/
2025 + return mc_send_command(mc_io, &cmd);
2026 +}
2027 +
2028 +int dpdmai_disable(struct fsl_mc_io *mc_io,
2029 + uint32_t cmd_flags,
2030 + uint16_t token)
2031 +{
2032 + struct fsl_mc_command cmd = { 0 };
2033 +
2034 + /* prepare command */
2035 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DISABLE,
2036 + cmd_flags,
2037 + token);
2038 +
2039 + /* send command to mc*/
2040 + return mc_send_command(mc_io, &cmd);
2041 +}
2042 +
2043 +int dpdmai_is_enabled(struct fsl_mc_io *mc_io,
2044 + uint32_t cmd_flags,
2045 + uint16_t token,
2046 + int *en)
2047 +{
2048 + struct fsl_mc_command cmd = { 0 };
2049 + int err;
2050 + /* prepare command */
2051 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_IS_ENABLED,
2052 + cmd_flags,
2053 + token);
2054 +
2055 + /* send command to mc*/
2056 + err = mc_send_command(mc_io, &cmd);
2057 + if (err)
2058 + return err;
2059 +
2060 + /* retrieve response parameters */
2061 + DPDMAI_RSP_IS_ENABLED(cmd, *en);
2062 +
2063 + return 0;
2064 +}
2065 +
2066 +int dpdmai_reset(struct fsl_mc_io *mc_io,
2067 + uint32_t cmd_flags,
2068 + uint16_t token)
2069 +{
2070 + struct fsl_mc_command cmd = { 0 };
2071 +
2072 + /* prepare command */
2073 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_RESET,
2074 + cmd_flags,
2075 + token);
2076 +
2077 + /* send command to mc*/
2078 + return mc_send_command(mc_io, &cmd);
2079 +}
2080 +
2081 +int dpdmai_get_irq(struct fsl_mc_io *mc_io,
2082 + uint32_t cmd_flags,
2083 + uint16_t token,
2084 + uint8_t irq_index,
2085 + int *type,
2086 + struct dpdmai_irq_cfg *irq_cfg)
2087 +{
2088 + struct fsl_mc_command cmd = { 0 };
2089 + int err;
2090 +
2091 + /* prepare command */
2092 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ,
2093 + cmd_flags,
2094 + token);
2095 + DPDMAI_CMD_GET_IRQ(cmd, irq_index);
2096 +
2097 + /* send command to mc*/
2098 + err = mc_send_command(mc_io, &cmd);
2099 + if (err)
2100 + return err;
2101 +
2102 + /* retrieve response parameters */
2103 + DPDMAI_RSP_GET_IRQ(cmd, *type, irq_cfg);
2104 +
2105 + return 0;
2106 +}
2107 +
2108 +int dpdmai_set_irq(struct fsl_mc_io *mc_io,
2109 + uint32_t cmd_flags,
2110 + uint16_t token,
2111 + uint8_t irq_index,
2112 + struct dpdmai_irq_cfg *irq_cfg)
2113 +{
2114 + struct fsl_mc_command cmd = { 0 };
2115 +
2116 + /* prepare command */
2117 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ,
2118 + cmd_flags,
2119 + token);
2120 + DPDMAI_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
2121 +
2122 + /* send command to mc*/
2123 + return mc_send_command(mc_io, &cmd);
2124 +}
2125 +
2126 +int dpdmai_get_irq_enable(struct fsl_mc_io *mc_io,
2127 + uint32_t cmd_flags,
2128 + uint16_t token,
2129 + uint8_t irq_index,
2130 + uint8_t *en)
2131 +{
2132 + struct fsl_mc_command cmd = { 0 };
2133 + int err;
2134 +
2135 + /* prepare command */
2136 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_ENABLE,
2137 + cmd_flags,
2138 + token);
2139 + DPDMAI_CMD_GET_IRQ_ENABLE(cmd, irq_index);
2140 +
2141 + /* send command to mc*/
2142 + err = mc_send_command(mc_io, &cmd);
2143 + if (err)
2144 + return err;
2145 +
2146 + /* retrieve response parameters */
2147 + DPDMAI_RSP_GET_IRQ_ENABLE(cmd, *en);
2148 +
2149 + return 0;
2150 +}
2151 +
2152 +int dpdmai_set_irq_enable(struct fsl_mc_io *mc_io,
2153 + uint32_t cmd_flags,
2154 + uint16_t token,
2155 + uint8_t irq_index,
2156 + uint8_t en)
2157 +{
2158 + struct fsl_mc_command cmd = { 0 };
2159 +
2160 + /* prepare command */
2161 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ_ENABLE,
2162 + cmd_flags,
2163 + token);
2164 + DPDMAI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
2165 +
2166 + /* send command to mc*/
2167 + return mc_send_command(mc_io, &cmd);
2168 +}
2169 +
2170 +int dpdmai_get_irq_mask(struct fsl_mc_io *mc_io,
2171 + uint32_t cmd_flags,
2172 + uint16_t token,
2173 + uint8_t irq_index,
2174 + uint32_t *mask)
2175 +{
2176 + struct fsl_mc_command cmd = { 0 };
2177 + int err;
2178 +
2179 + /* prepare command */
2180 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_MASK,
2181 + cmd_flags,
2182 + token);
2183 + DPDMAI_CMD_GET_IRQ_MASK(cmd, irq_index);
2184 +
2185 + /* send command to mc*/
2186 + err = mc_send_command(mc_io, &cmd);
2187 + if (err)
2188 + return err;
2189 +
2190 + /* retrieve response parameters */
2191 + DPDMAI_RSP_GET_IRQ_MASK(cmd, *mask);
2192 +
2193 + return 0;
2194 +}
2195 +
2196 +int dpdmai_set_irq_mask(struct fsl_mc_io *mc_io,
2197 + uint32_t cmd_flags,
2198 + uint16_t token,
2199 + uint8_t irq_index,
2200 + uint32_t mask)
2201 +{
2202 + struct fsl_mc_command cmd = { 0 };
2203 +
2204 + /* prepare command */
2205 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ_MASK,
2206 + cmd_flags,
2207 + token);
2208 + DPDMAI_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
2209 +
2210 + /* send command to mc*/
2211 + return mc_send_command(mc_io, &cmd);
2212 +}
2213 +
2214 +int dpdmai_get_irq_status(struct fsl_mc_io *mc_io,
2215 + uint32_t cmd_flags,
2216 + uint16_t token,
2217 + uint8_t irq_index,
2218 + uint32_t *status)
2219 +{
2220 + struct fsl_mc_command cmd = { 0 };
2221 + int err;
2222 +
2223 + /* prepare command */
2224 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_STATUS,
2225 + cmd_flags,
2226 + token);
2227 + DPDMAI_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
2228 +
2229 + /* send command to mc*/
2230 + err = mc_send_command(mc_io, &cmd);
2231 + if (err)
2232 + return err;
2233 +
2234 + /* retrieve response parameters */
2235 + DPDMAI_RSP_GET_IRQ_STATUS(cmd, *status);
2236 +
2237 + return 0;
2238 +}
2239 +
2240 +int dpdmai_clear_irq_status(struct fsl_mc_io *mc_io,
2241 + uint32_t cmd_flags,
2242 + uint16_t token,
2243 + uint8_t irq_index,
2244 + uint32_t status)
2245 +{
2246 + struct fsl_mc_command cmd = { 0 };
2247 +
2248 + /* prepare command */
2249 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLEAR_IRQ_STATUS,
2250 + cmd_flags,
2251 + token);
2252 + DPDMAI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
2253 +
2254 + /* send command to mc*/
2255 + return mc_send_command(mc_io, &cmd);
2256 +}
2257 +
2258 +int dpdmai_get_attributes(struct fsl_mc_io *mc_io,
2259 + uint32_t cmd_flags,
2260 + uint16_t token,
2261 + struct dpdmai_attr *attr)
2262 +{
2263 + struct fsl_mc_command cmd = { 0 };
2264 + int err;
2265 + struct dpdmai_rsp_get_attributes *rsp_params;
2266 +
2267 + /* prepare command */
2268 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_ATTR,
2269 + cmd_flags,
2270 + token);
2271 +
2272 + /* send command to mc*/
2273 + err = mc_send_command(mc_io, &cmd);
2274 + if (err)
2275 + return err;
2276 +
2277 + /* retrieve response parameters */
2278 + rsp_params = (struct dpdmai_rsp_get_attributes *)cmd.params;
2279 + attr->id = le32_to_cpu(rsp_params->id);
2280 + attr->version.major = le16_to_cpu(rsp_params->major);
2281 + attr->version.minor = le16_to_cpu(rsp_params->minor);
2282 + attr->num_of_priorities = rsp_params->num_of_priorities;
2283 +
2284 +
2285 + return 0;
2286 +}
2287 +
2288 +int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io,
2289 + uint32_t cmd_flags,
2290 + uint16_t token,
2291 + uint8_t priority,
2292 + const struct dpdmai_rx_queue_cfg *cfg)
2293 +{
2294 + struct fsl_mc_command cmd = { 0 };
2295 + struct dpdmai_cmd_queue *cmd_params;
2296 +
2297 + /* prepare command */
2298 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_RX_QUEUE,
2299 + cmd_flags,
2300 + token);
2301 +
2302 + cmd_params = (struct dpdmai_cmd_queue *)cmd.params;
2303 + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
2304 + cmd_params->priority = cfg->dest_cfg.priority;
2305 + cmd_params->queue = priority;
2306 + cmd_params->dest_type = cfg->dest_cfg.dest_type;
2307 + cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
2308 + cmd_params->options = cpu_to_le32(cfg->options);
2309 +
2310 +
2311 + /* send command to mc*/
2312 + return mc_send_command(mc_io, &cmd);
2313 +}
2314 +
2315 +int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io,
2316 + uint32_t cmd_flags,
2317 + uint16_t token,
2318 + uint8_t priority, struct dpdmai_rx_queue_attr *attr)
2319 +{
2320 + struct fsl_mc_command cmd = { 0 };
2321 + struct dpdmai_cmd_queue *cmd_params;
2322 + int err;
2323 +
2324 + /* prepare command */
2325 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_RX_QUEUE,
2326 + cmd_flags,
2327 + token);
2328 +
2329 + cmd_params = (struct dpdmai_cmd_queue *)cmd.params;
2330 + cmd_params->queue = priority;
2331 +
2332 + /* send command to mc*/
2333 + err = mc_send_command(mc_io, &cmd);
2334 + if (err)
2335 + return err;
2336 +
2337 + /* retrieve response parameters */
2338 + attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id);
2339 + attr->dest_cfg.priority = cmd_params->priority;
2340 + attr->dest_cfg.dest_type = cmd_params->dest_type;
2341 + attr->user_ctx = le64_to_cpu(cmd_params->user_ctx);
2342 + attr->fqid = le32_to_cpu(cmd_params->fqid);
2343 +
2344 + return 0;
2345 +}
2346 +
2347 +int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io,
2348 + uint32_t cmd_flags,
2349 + uint16_t token,
2350 + uint8_t priority,
2351 + struct dpdmai_tx_queue_attr *attr)
2352 +{
2353 + struct fsl_mc_command cmd = { 0 };
2354 + struct dpdmai_cmd_queue *cmd_params;
2355 + struct dpdmai_rsp_get_tx_queue *rsp_params;
2356 + int err;
2357 +
2358 + /* prepare command */
2359 + cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_TX_QUEUE,
2360 + cmd_flags,
2361 + token);
2362 +
2363 + cmd_params = (struct dpdmai_cmd_queue *)cmd.params;
2364 + cmd_params->queue = priority;
2365 +
2366 + /* send command to mc*/
2367 + err = mc_send_command(mc_io, &cmd);
2368 + if (err)
2369 + return err;
2370 +
2371 + /* retrieve response parameters */
2372 +
2373 + rsp_params = (struct dpdmai_rsp_get_tx_queue *)cmd.params;
2374 + attr->fqid = le32_to_cpu(rsp_params->fqid);
2375 +
2376 + return 0;
2377 +}
2378 --- /dev/null
2379 +++ b/drivers/dma/dpaa2-qdma/fsl_dpdmai.h
2380 @@ -0,0 +1,521 @@
2381 +/* Copyright 2013-2015 Freescale Semiconductor Inc.
2382 + *
2383 + * Redistribution and use in source and binary forms, with or without
2384 + * modification, are permitted provided that the following conditions are met:
2385 + * * Redistributions of source code must retain the above copyright
2386 + * notice, this list of conditions and the following disclaimer.
2387 + * * Redistributions in binary form must reproduce the above copyright
2388 + * notice, this list of conditions and the following disclaimer in the
2389 + * documentation and/or other materials provided with the distribution.
2390 + * * Neither the name of the above-listed copyright holders nor the
2391 + * names of any contributors may be used to endorse or promote products
2392 + * derived from this software without specific prior written permission.
2393 + *
2394 + *
2395 + * ALTERNATIVELY, this software may be distributed under the terms of the
2396 + * GNU General Public License ("GPL") as published by the Free Software
2397 + * Foundation, either version 2 of that License or (at your option) any
2398 + * later version.
2399 + *
2400 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
2401 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2402 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2403 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
2404 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2405 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
2406 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
2407 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
2408 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
2409 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
2410 + * POSSIBILITY OF SUCH DAMAGE.
2411 + */
2412 +#ifndef __FSL_DPDMAI_H
2413 +#define __FSL_DPDMAI_H
2414 +
2415 +struct fsl_mc_io;
2416 +
2417 +/* Data Path DMA Interface API
2418 + * Contains initialization APIs and runtime control APIs for DPDMAI
2419 + */
2420 +
2421 +/* General DPDMAI macros */
2422 +
2423 +/**
2424 + * Maximum number of Tx/Rx priorities per DPDMAI object
2425 + */
2426 +#define DPDMAI_PRIO_NUM 2
2427 +
2428 +/**
2429 + * All queues considered; see dpdmai_set_rx_queue()
2430 + */
2431 +#define DPDMAI_ALL_QUEUES (uint8_t)(-1)
2432 +
2433 +/**
2434 + * dpdmai_open() - Open a control session for the specified object
2435 + * @mc_io: Pointer to MC portal's I/O object
2436 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2437 + * @dpdmai_id: DPDMAI unique ID
2438 + * @token: Returned token; use in subsequent API calls
2439 + *
2440 + * This function can be used to open a control session for an
2441 + * already created object; an object may have been declared in
2442 + * the DPL or by calling the dpdmai_create() function.
2443 + * This function returns a unique authentication token,
2444 + * associated with the specific object ID and the specific MC
2445 + * portal; this token must be used in all subsequent commands for
2446 + * this specific object.
2447 + *
2448 + * Return: '0' on Success; Error code otherwise.
2449 + */
2450 +int dpdmai_open(struct fsl_mc_io *mc_io,
2451 + uint32_t cmd_flags,
2452 + int dpdmai_id,
2453 + uint16_t *token);
2454 +
2455 +/**
2456 + * dpdmai_close() - Close the control session of the object
2457 + * @mc_io: Pointer to MC portal's I/O object
2458 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2459 + * @token: Token of DPDMAI object
2460 + *
2461 + * After this function is called, no further operations are
2462 + * allowed on the object without opening a new control session.
2463 + *
2464 + * Return: '0' on Success; Error code otherwise.
2465 + */
2466 +int dpdmai_close(struct fsl_mc_io *mc_io,
2467 + uint32_t cmd_flags,
2468 + uint16_t token);
2469 +
2470 +/**
2471 + * struct dpdmai_cfg - Structure representing DPDMAI configuration
2472 + * @priorities: Priorities for the DMA hardware processing; valid priorities are
2473 + * configured with values 1-8; the entry following last valid entry
2474 + * should be configured with 0
2475 + */
2476 +struct dpdmai_cfg {
2477 + uint8_t priorities[DPDMAI_PRIO_NUM];
2478 +};
2479 +
2480 +/**
2481 + * dpdmai_create() - Create the DPDMAI object
2482 + * @mc_io: Pointer to MC portal's I/O object
2483 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2484 + * @cfg: Configuration structure
2485 + * @token: Returned token; use in subsequent API calls
2486 + *
2487 + * Create the DPDMAI object, allocate required resources and
2488 + * perform required initialization.
2489 + *
2490 + * The object can be created either by declaring it in the
2491 + * DPL file, or by calling this function.
2492 + *
2493 + * This function returns a unique authentication token,
2494 + * associated with the specific object ID and the specific MC
2495 + * portal; this token must be used in all subsequent calls to
2496 + * this specific object. For objects that are created using the
2497 + * DPL file, call dpdmai_open() function to get an authentication
2498 + * token first.
2499 + *
2500 + * Return: '0' on Success; Error code otherwise.
2501 + */
2502 +int dpdmai_create(struct fsl_mc_io *mc_io,
2503 + uint32_t cmd_flags,
2504 + const struct dpdmai_cfg *cfg,
2505 + uint16_t *token);
2506 +
2507 +/**
2508 + * dpdmai_destroy() - Destroy the DPDMAI object and release all its resources.
2509 + * @mc_io: Pointer to MC portal's I/O object
2510 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2511 + * @token: Token of DPDMAI object
2512 + *
2513 + * Return: '0' on Success; error code otherwise.
2514 + */
2515 +int dpdmai_destroy(struct fsl_mc_io *mc_io,
2516 + uint32_t cmd_flags,
2517 + uint16_t token);
2518 +
2519 +/**
2520 + * dpdmai_enable() - Enable the DPDMAI, allow sending and receiving frames.
2521 + * @mc_io: Pointer to MC portal's I/O object
2522 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2523 + * @token: Token of DPDMAI object
2524 + *
2525 + * Return: '0' on Success; Error code otherwise.
2526 + */
2527 +int dpdmai_enable(struct fsl_mc_io *mc_io,
2528 + uint32_t cmd_flags,
2529 + uint16_t token);
2530 +
2531 +/**
2532 + * dpdmai_disable() - Disable the DPDMAI, stop sending and receiving frames.
2533 + * @mc_io: Pointer to MC portal's I/O object
2534 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2535 + * @token: Token of DPDMAI object
2536 + *
2537 + * Return: '0' on Success; Error code otherwise.
2538 + */
2539 +int dpdmai_disable(struct fsl_mc_io *mc_io,
2540 + uint32_t cmd_flags,
2541 + uint16_t token);
2542 +
2543 +/**
2544 + * dpdmai_is_enabled() - Check if the DPDMAI is enabled.
2545 + * @mc_io: Pointer to MC portal's I/O object
2546 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2547 + * @token: Token of DPDMAI object
2548 + * @en: Returns '1' if object is enabled; '0' otherwise
2549 + *
2550 + * Return: '0' on Success; Error code otherwise.
2551 + */
2552 +int dpdmai_is_enabled(struct fsl_mc_io *mc_io,
2553 + uint32_t cmd_flags,
2554 + uint16_t token,
2555 + int *en);
2556 +
2557 +/**
2558 + * dpdmai_reset() - Reset the DPDMAI, returns the object to initial state.
2559 + * @mc_io: Pointer to MC portal's I/O object
2560 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2561 + * @token: Token of DPDMAI object
2562 + *
2563 + * Return: '0' on Success; Error code otherwise.
2564 + */
2565 +int dpdmai_reset(struct fsl_mc_io *mc_io,
2566 + uint32_t cmd_flags,
2567 + uint16_t token);
2568 +
2569 +/**
2570 + * struct dpdmai_irq_cfg - IRQ configuration
2571 + * @addr: Address that must be written to signal a message-based interrupt
2572 + * @val: Value to write into irq_addr address
2573 + * @irq_num: A user defined number associated with this IRQ
2574 + */
2575 +struct dpdmai_irq_cfg {
2576 + uint64_t addr;
2577 + uint32_t val;
2578 + int irq_num;
2579 +};
2580 +
2581 +/**
2582 + * dpdmai_set_irq() - Set IRQ information for the DPDMAI to trigger an interrupt.
2583 + * @mc_io: Pointer to MC portal's I/O object
2584 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2585 + * @token: Token of DPDMAI object
2586 + * @irq_index: Identifies the interrupt index to configure
2587 + * @irq_cfg: IRQ configuration
2588 + *
2589 + * Return: '0' on Success; Error code otherwise.
2590 + */
2591 +int dpdmai_set_irq(struct fsl_mc_io *mc_io,
2592 + uint32_t cmd_flags,
2593 + uint16_t token,
2594 + uint8_t irq_index,
2595 + struct dpdmai_irq_cfg *irq_cfg);
2596 +
2597 +/**
2598 + * dpdmai_get_irq() - Get IRQ information from the DPDMAI
2599 + *
2600 + * @mc_io: Pointer to MC portal's I/O object
2601 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2602 + * @token: Token of DPDMAI object
2603 + * @irq_index: The interrupt index to configure
2604 + * @type: Interrupt type: 0 represents message interrupt
2605 + * type (both irq_addr and irq_val are valid)
2606 + * @irq_cfg: IRQ attributes
2607 + *
2608 + * Return: '0' on Success; Error code otherwise.
2609 + */
2610 +int dpdmai_get_irq(struct fsl_mc_io *mc_io,
2611 + uint32_t cmd_flags,
2612 + uint16_t token,
2613 + uint8_t irq_index,
2614 + int *type,
2615 + struct dpdmai_irq_cfg *irq_cfg);
2616 +
2617 +/**
2618 + * dpdmai_set_irq_enable() - Set overall interrupt state.
2619 + * @mc_io: Pointer to MC portal's I/O object
2620 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2621 + * @token: Token of DPDMAI object
2622 + * @irq_index: The interrupt index to configure
2623 + * @en: Interrupt state - enable = 1, disable = 0
2624 + *
2625 + * Allows GPP software to control when interrupts are generated.
2626 + * Each interrupt can have up to 32 causes. The enable/disable control's the
2627 + * overall interrupt state. if the interrupt is disabled no causes will cause
2628 + * an interrupt
2629 + *
2630 + * Return: '0' on Success; Error code otherwise.
2631 + */
2632 +int dpdmai_set_irq_enable(struct fsl_mc_io *mc_io,
2633 + uint32_t cmd_flags,
2634 + uint16_t token,
2635 + uint8_t irq_index,
2636 + uint8_t en);
2637 +
2638 +/**
2639 + * dpdmai_get_irq_enable() - Get overall interrupt state
2640 + * @mc_io: Pointer to MC portal's I/O object
2641 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2642 + * @token: Token of DPDMAI object
2643 + * @irq_index: The interrupt index to configure
2644 + * @en: Returned Interrupt state - enable = 1, disable = 0
2645 + *
2646 + * Return: '0' on Success; Error code otherwise.
2647 + */
2648 +int dpdmai_get_irq_enable(struct fsl_mc_io *mc_io,
2649 + uint32_t cmd_flags,
2650 + uint16_t token,
2651 + uint8_t irq_index,
2652 + uint8_t *en);
2653 +
2654 +/**
2655 + * dpdmai_set_irq_mask() - Set interrupt mask.
2656 + * @mc_io: Pointer to MC portal's I/O object
2657 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2658 + * @token: Token of DPDMAI object
2659 + * @irq_index: The interrupt index to configure
2660 + * @mask: event mask to trigger interrupt;
2661 + * each bit:
2662 + * 0 = ignore event
2663 + * 1 = consider event for asserting IRQ
2664 + *
2665 + * Every interrupt can have up to 32 causes and the interrupt model supports
2666 + * masking/unmasking each cause independently
2667 + *
2668 + * Return: '0' on Success; Error code otherwise.
2669 + */
2670 +int dpdmai_set_irq_mask(struct fsl_mc_io *mc_io,
2671 + uint32_t cmd_flags,
2672 + uint16_t token,
2673 + uint8_t irq_index,
2674 + uint32_t mask);
2675 +
2676 +/**
2677 + * dpdmai_get_irq_mask() - Get interrupt mask.
2678 + * @mc_io: Pointer to MC portal's I/O object
2679 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2680 + * @token: Token of DPDMAI object
2681 + * @irq_index: The interrupt index to configure
2682 + * @mask: Returned event mask to trigger interrupt
2683 + *
2684 + * Every interrupt can have up to 32 causes and the interrupt model supports
2685 + * masking/unmasking each cause independently
2686 + *
2687 + * Return: '0' on Success; Error code otherwise.
2688 + */
2689 +int dpdmai_get_irq_mask(struct fsl_mc_io *mc_io,
2690 + uint32_t cmd_flags,
2691 + uint16_t token,
2692 + uint8_t irq_index,
2693 + uint32_t *mask);
2694 +
2695 +/**
2696 + * dpdmai_get_irq_status() - Get the current status of any pending interrupts
2697 + * @mc_io: Pointer to MC portal's I/O object
2698 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2699 + * @token: Token of DPDMAI object
2700 + * @irq_index: The interrupt index to configure
2701 + * @status: Returned interrupts status - one bit per cause:
2702 + * 0 = no interrupt pending
2703 + * 1 = interrupt pending
2704 + *
2705 + * Return: '0' on Success; Error code otherwise.
2706 + */
2707 +int dpdmai_get_irq_status(struct fsl_mc_io *mc_io,
2708 + uint32_t cmd_flags,
2709 + uint16_t token,
2710 + uint8_t irq_index,
2711 + uint32_t *status);
2712 +
2713 +/**
2714 + * dpdmai_clear_irq_status() - Clear a pending interrupt's status
2715 + * @mc_io: Pointer to MC portal's I/O object
2716 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2717 + * @token: Token of DPDMAI object
2718 + * @irq_index: The interrupt index to configure
2719 + * @status: bits to clear (W1C) - one bit per cause:
2720 + * 0 = don't change
2721 + * 1 = clear status bit
2722 + *
2723 + * Return: '0' on Success; Error code otherwise.
2724 + */
2725 +int dpdmai_clear_irq_status(struct fsl_mc_io *mc_io,
2726 + uint32_t cmd_flags,
2727 + uint16_t token,
2728 + uint8_t irq_index,
2729 + uint32_t status);
2730 +
2731 +/**
2732 + * struct dpdmai_attr - Structure representing DPDMAI attributes
2733 + * @id: DPDMAI object ID
2734 + * @version: DPDMAI version
2735 + * @num_of_priorities: number of priorities
2736 + */
2737 +struct dpdmai_attr {
2738 + int id;
2739 + /**
2740 + * struct version - DPDMAI version
2741 + * @major: DPDMAI major version
2742 + * @minor: DPDMAI minor version
2743 + */
2744 + struct {
2745 + uint16_t major;
2746 + uint16_t minor;
2747 + } version;
2748 + uint8_t num_of_priorities;
2749 +};
2750 +
2751 +/**
2752 + * dpdmai_get_attributes() - Retrieve DPDMAI attributes.
2753 + * @mc_io: Pointer to MC portal's I/O object
2754 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2755 + * @token: Token of DPDMAI object
2756 + * @attr: Returned object's attributes
2757 + *
2758 + * Return: '0' on Success; Error code otherwise.
2759 + */
2760 +int dpdmai_get_attributes(struct fsl_mc_io *mc_io,
2761 + uint32_t cmd_flags,
2762 + uint16_t token,
2763 + struct dpdmai_attr *attr);
2764 +
2765 +/**
2766 + * enum dpdmai_dest - DPDMAI destination types
2767 + * @DPDMAI_DEST_NONE: Unassigned destination; The queue is set in parked mode
2768 + * and does not generate FQDAN notifications; user is expected to dequeue
2769 + * from the queue based on polling or other user-defined method
2770 + * @DPDMAI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
2771 + * notifications to the specified DPIO; user is expected to dequeue
2772 + * from the queue only after notification is received
2773 + * @DPDMAI_DEST_DPCON: The queue is set in schedule mode and does not generate
2774 + * FQDAN notifications, but is connected to the specified DPCON object;
2775 + * user is expected to dequeue from the DPCON channel
2776 + */
2777 +enum dpdmai_dest {
2778 + DPDMAI_DEST_NONE = 0,
2779 + DPDMAI_DEST_DPIO = 1,
2780 + DPDMAI_DEST_DPCON = 2
2781 +};
2782 +
2783 +/**
2784 + * struct dpdmai_dest_cfg - Structure representing DPDMAI destination parameters
2785 + * @dest_type: Destination type
2786 + * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
2787 + * @priority: Priority selection within the DPIO or DPCON channel; valid values
2788 + * are 0-1 or 0-7, depending on the number of priorities in that
2789 + * channel; not relevant for 'DPDMAI_DEST_NONE' option
2790 + */
2791 +struct dpdmai_dest_cfg {
2792 + enum dpdmai_dest dest_type;
2793 + int dest_id;
2794 + uint8_t priority;
2795 +};
2796 +
2797 +/* DPDMAI queue modification options */
2798 +
2799 +/**
2800 + * Select to modify the user's context associated with the queue
2801 + */
2802 +#define DPDMAI_QUEUE_OPT_USER_CTX 0x00000001
2803 +
2804 +/**
2805 + * Select to modify the queue's destination
2806 + */
2807 +#define DPDMAI_QUEUE_OPT_DEST 0x00000002
2808 +
2809 +/**
2810 + * struct dpdmai_rx_queue_cfg - DPDMAI RX queue configuration
2811 + * @options: Flags representing the suggested modifications to the queue;
2812 + * Use any combination of 'DPDMAI_QUEUE_OPT_<X>' flags
2813 + * @user_ctx: User context value provided in the frame descriptor of each
2814 + * dequeued frame;
2815 + * valid only if 'DPDMAI_QUEUE_OPT_USER_CTX' is contained in 'options'
2816 + * @dest_cfg: Queue destination parameters;
2817 + * valid only if 'DPDMAI_QUEUE_OPT_DEST' is contained in 'options'
2818 + */
2819 +struct dpdmai_rx_queue_cfg {
2820 + uint32_t options;
2821 + uint64_t user_ctx;
2822 + struct dpdmai_dest_cfg dest_cfg;
2823 +
2824 +};
2825 +
2826 +/**
2827 + * dpdmai_set_rx_queue() - Set Rx queue configuration
2828 + * @mc_io: Pointer to MC portal's I/O object
2829 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2830 + * @token: Token of DPDMAI object
2831 + * @priority: Select the queue relative to number of
2832 + * priorities configured at DPDMAI creation; use
2833 + * DPDMAI_ALL_QUEUES to configure all Rx queues
2834 + * identically.
2835 + * @cfg: Rx queue configuration
2836 + *
2837 + * Return: '0' on Success; Error code otherwise.
2838 + */
2839 +int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io,
2840 + uint32_t cmd_flags,
2841 + uint16_t token,
2842 + uint8_t priority,
2843 + const struct dpdmai_rx_queue_cfg *cfg);
2844 +
2845 +/**
2846 + * struct dpdmai_rx_queue_attr - Structure representing attributes of Rx queues
2847 + * @user_ctx: User context value provided in the frame descriptor of each
2848 + * dequeued frame
2849 + * @dest_cfg: Queue destination configuration
2850 + * @fqid: Virtual FQID value to be used for dequeue operations
2851 + */
2852 +struct dpdmai_rx_queue_attr {
2853 + uint64_t user_ctx;
2854 + struct dpdmai_dest_cfg dest_cfg;
2855 + uint32_t fqid;
2856 +};
2857 +
2858 +/**
2859 + * dpdmai_get_rx_queue() - Retrieve Rx queue attributes.
2860 + * @mc_io: Pointer to MC portal's I/O object
2861 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2862 + * @token: Token of DPDMAI object
2863 + * @priority: Select the queue relative to number of
2864 + * priorities configured at DPDMAI creation
2865 + * @attr: Returned Rx queue attributes
2866 + *
2867 + * Return: '0' on Success; Error code otherwise.
2868 + */
2869 +int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io,
2870 + uint32_t cmd_flags,
2871 + uint16_t token,
2872 + uint8_t priority,
2873 + struct dpdmai_rx_queue_attr *attr);
2874 +
2875 +/**
2876 + * struct dpdmai_tx_queue_attr - Structure representing attributes of Tx queues
2877 + * @fqid: Virtual FQID to be used for sending frames to DMA hardware
2878 + */
2879 +
2880 +struct dpdmai_tx_queue_attr {
2881 + uint32_t fqid;
2882 +};
2883 +
2884 +/**
2885 + * dpdmai_get_tx_queue() - Retrieve Tx queue attributes.
2886 + * @mc_io: Pointer to MC portal's I/O object
2887 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
2888 + * @token: Token of DPDMAI object
2889 + * @priority: Select the queue relative to number of
2890 + * priorities configured at DPDMAI creation
2891 + * @attr: Returned Tx queue attributes
2892 + *
2893 + * Return: '0' on Success; Error code otherwise.
2894 + */
2895 +int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io,
2896 + uint32_t cmd_flags,
2897 + uint16_t token,
2898 + uint8_t priority,
2899 + struct dpdmai_tx_queue_attr *attr);
2900 +
2901 +#endif /* __FSL_DPDMAI_H */
2902 --- /dev/null
2903 +++ b/drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h
2904 @@ -0,0 +1,222 @@
2905 +/* Copyright 2013-2016 Freescale Semiconductor Inc.
2906 + *
2907 + * Redistribution and use in source and binary forms, with or without
2908 + * modification, are permitted provided that the following conditions are met:
2909 + * * Redistributions of source code must retain the above copyright
2910 + * notice, this list of conditions and the following disclaimer.
2911 + * * Redistributions in binary form must reproduce the above copyright
2912 + * notice, this list of conditions and the following disclaimer in the
2913 + * documentation and/or other materials provided with the distribution.
2914 + * * Neither the name of the above-listed copyright holders nor the
2915 + * names of any contributors may be used to endorse or promote products
2916 + * derived from this software without specific prior written permission.
2917 + *
2918 + *
2919 + * ALTERNATIVELY, this software may be distributed under the terms of the
2920 + * GNU General Public License ("GPL") as published by the Free Software
2921 + * Foundation, either version 2 of that License or (at your option) any
2922 + * later version.
2923 + *
2924 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
2925 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2926 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2927 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
2928 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2929 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
2930 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
2931 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
2932 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
2933 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
2934 + * POSSIBILITY OF SUCH DAMAGE.
2935 + */
2936 +#ifndef _FSL_DPDMAI_CMD_H
2937 +#define _FSL_DPDMAI_CMD_H
2938 +
2939 +/* DPDMAI Version */
2940 +#define DPDMAI_VER_MAJOR 2
2941 +#define DPDMAI_VER_MINOR 2
2942 +
2943 +#define DPDMAI_CMD_BASE_VERSION 0
2944 +#define DPDMAI_CMD_ID_OFFSET 4
2945 +
2946 +/* Command IDs */
2947 +#define DPDMAI_CMDID_CLOSE ((0x800 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2948 +#define DPDMAI_CMDID_OPEN ((0x80E << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2949 +#define DPDMAI_CMDID_CREATE ((0x90E << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2950 +#define DPDMAI_CMDID_DESTROY ((0x900 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2951 +
2952 +#define DPDMAI_CMDID_ENABLE ((0x002 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2953 +#define DPDMAI_CMDID_DISABLE ((0x003 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2954 +#define DPDMAI_CMDID_GET_ATTR ((0x004 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2955 +#define DPDMAI_CMDID_RESET ((0x005 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2956 +#define DPDMAI_CMDID_IS_ENABLED ((0x006 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2957 +
2958 +#define DPDMAI_CMDID_SET_IRQ ((0x010 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2959 +#define DPDMAI_CMDID_GET_IRQ ((0x011 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2960 +#define DPDMAI_CMDID_SET_IRQ_ENABLE ((0x012 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2961 +#define DPDMAI_CMDID_GET_IRQ_ENABLE ((0x013 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2962 +#define DPDMAI_CMDID_SET_IRQ_MASK ((0x014 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2963 +#define DPDMAI_CMDID_GET_IRQ_MASK ((0x015 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2964 +#define DPDMAI_CMDID_GET_IRQ_STATUS ((0x016 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2965 +#define DPDMAI_CMDID_CLEAR_IRQ_STATUS ((0x017 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2966 +
2967 +#define DPDMAI_CMDID_SET_RX_QUEUE ((0x1A0 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2968 +#define DPDMAI_CMDID_GET_RX_QUEUE ((0x1A1 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2969 +#define DPDMAI_CMDID_GET_TX_QUEUE ((0x1A2 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
2970 +
2971 +
2972 +#define MC_CMD_HDR_TOKEN_O 32 /* Token field offset */
2973 +#define MC_CMD_HDR_TOKEN_S 16 /* Token field size */
2974 +
2975 +
2976 +#define MAKE_UMASK64(_width) \
2977 + ((uint64_t)((_width) < 64 ? ((uint64_t)1 << (_width)) - 1 : \
2978 + (uint64_t)-1))
2979 +
2980 +static inline uint64_t mc_enc(int lsoffset, int width, uint64_t val)
2981 +{
2982 + return (uint64_t)(((uint64_t)val & MAKE_UMASK64(width)) << lsoffset);
2983 +}
2984 +
2985 +static inline uint64_t mc_dec(uint64_t val, int lsoffset, int width)
2986 +{
2987 + return (uint64_t)((val >> lsoffset) & MAKE_UMASK64(width));
2988 +}
2989 +
2990 +#define MC_CMD_OP(_cmd, _param, _offset, _width, _type, _arg) \
2991 + ((_cmd).params[_param] |= mc_enc((_offset), (_width), _arg))
2992 +
2993 +#define MC_RSP_OP(_cmd, _param, _offset, _width, _type, _arg) \
2994 + (_arg = (_type)mc_dec(_cmd.params[_param], (_offset), (_width)))
2995 +
2996 +#define MC_CMD_HDR_READ_TOKEN(_hdr) \
2997 + ((uint16_t)mc_dec((_hdr), MC_CMD_HDR_TOKEN_O, MC_CMD_HDR_TOKEN_S))
2998 +
2999 +/* cmd, param, offset, width, type, arg_name */
3000 +#define DPDMAI_CMD_OPEN(cmd, dpdmai_id) \
3001 + MC_CMD_OP(cmd, 0, 0, 32, int, dpdmai_id)
3002 +
3003 +/* cmd, param, offset, width, type, arg_name */
3004 +#define DPDMAI_CMD_CREATE(cmd, cfg) \
3005 +do { \
3006 + MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->priorities[0]);\
3007 + MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->priorities[1]);\
3008 +} while (0)
3009 +
3010 +/* cmd, param, offset, width, type, arg_name */
3011 +#define DPDMAI_RSP_IS_ENABLED(cmd, en) \
3012 + MC_RSP_OP(cmd, 0, 0, 1, int, en)
3013 +
3014 +/* cmd, param, offset, width, type, arg_name */
3015 +#define DPDMAI_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
3016 +do { \
3017 + MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
3018 + MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
3019 + MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
3020 + MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
3021 +} while (0)
3022 +
3023 +/* cmd, param, offset, width, type, arg_name */
3024 +#define DPDMAI_CMD_GET_IRQ(cmd, irq_index) \
3025 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
3026 +
3027 +/* cmd, param, offset, width, type, arg_name */
3028 +#define DPDMAI_RSP_GET_IRQ(cmd, type, irq_cfg) \
3029 +do { \
3030 + MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
3031 + MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
3032 + MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
3033 + MC_RSP_OP(cmd, 2, 32, 32, int, type); \
3034 +} while (0)
3035 +
3036 +/* cmd, param, offset, width, type, arg_name */
3037 +#define DPDMAI_CMD_SET_IRQ_ENABLE(cmd, irq_index, enable_state) \
3038 +do { \
3039 + MC_CMD_OP(cmd, 0, 0, 8, uint8_t, enable_state); \
3040 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
3041 +} while (0)
3042 +
3043 +/* cmd, param, offset, width, type, arg_name */
3044 +#define DPDMAI_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
3045 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
3046 +
3047 +/* cmd, param, offset, width, type, arg_name */
3048 +#define DPDMAI_RSP_GET_IRQ_ENABLE(cmd, enable_state) \
3049 + MC_RSP_OP(cmd, 0, 0, 8, uint8_t, enable_state)
3050 +
3051 +/* cmd, param, offset, width, type, arg_name */
3052 +#define DPDMAI_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
3053 +do { \
3054 + MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \
3055 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
3056 +} while (0)
3057 +
3058 +/* cmd, param, offset, width, type, arg_name */
3059 +#define DPDMAI_CMD_GET_IRQ_MASK(cmd, irq_index) \
3060 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
3061 +
3062 +/* cmd, param, offset, width, type, arg_name */
3063 +#define DPDMAI_RSP_GET_IRQ_MASK(cmd, mask) \
3064 + MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
3065 +
3066 +/* cmd, param, offset, width, type, arg_name */
3067 +#define DPDMAI_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
3068 +do { \
3069 + MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
3070 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
3071 +} while (0)
3072 +
3073 +/* cmd, param, offset, width, type, arg_name */
3074 +#define DPDMAI_RSP_GET_IRQ_STATUS(cmd, status) \
3075 + MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
3076 +
3077 +/* cmd, param, offset, width, type, arg_name */
3078 +#define DPDMAI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
3079 +do { \
3080 + MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
3081 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
3082 +} while (0)
3083 +
3084 +/* cmd, param, offset, width, type, arg_name */
3085 +#define DPDMAI_RSP_GET_ATTR(cmd, attr) \
3086 +do { \
3087 + MC_RSP_OP(cmd, 0, 0, 32, int, attr->id); \
3088 + MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->num_of_priorities); \
3089 + MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\
3090 + MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\
3091 +} while (0)
3092 +
3093 +/* cmd, param, offset, width, type, arg_name */
3094 +#define DPDMAI_CMD_SET_RX_QUEUE(cmd, priority, cfg) \
3095 +do { \
3096 + MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \
3097 + MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority); \
3098 + MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority); \
3099 + MC_CMD_OP(cmd, 0, 48, 4, enum dpdmai_dest, cfg->dest_cfg.dest_type); \
3100 + MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \
3101 + MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\
3102 +} while (0)
3103 +
3104 +/* cmd, param, offset, width, type, arg_name */
3105 +#define DPDMAI_CMD_GET_RX_QUEUE(cmd, priority) \
3106 + MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority)
3107 +
3108 +/* cmd, param, offset, width, type, arg_name */
3109 +#define DPDMAI_RSP_GET_RX_QUEUE(cmd, attr) \
3110 +do { \
3111 + MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id);\
3112 + MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\
3113 + MC_RSP_OP(cmd, 0, 48, 4, enum dpdmai_dest, attr->dest_cfg.dest_type);\
3114 + MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->user_ctx);\
3115 + MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->fqid);\
3116 +} while (0)
3117 +
3118 +/* cmd, param, offset, width, type, arg_name */
3119 +#define DPDMAI_CMD_GET_TX_QUEUE(cmd, priority) \
3120 + MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority)
3121 +
3122 +/* cmd, param, offset, width, type, arg_name */
3123 +#define DPDMAI_RSP_GET_TX_QUEUE(cmd, attr) \
3124 + MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->fqid)
3125 +
3126 +#endif /* _FSL_DPDMAI_CMD_H */
3127 --- /dev/null
3128 +++ b/drivers/dma/fsl-qdma.c
3129 @@ -0,0 +1,1243 @@
3130 +/*
3131 + * drivers/dma/fsl-qdma.c
3132 + *
3133 + * Copyright 2014-2015 Freescale Semiconductor, Inc.
3134 + *
3135 + * Driver for the Freescale qDMA engine with software command queue mode.
3136 + * Channel virtualization is supported through enqueuing of DMA jobs to,
3137 + * or dequeuing DMA jobs from, different work queues.
3138 + * This module can be found on Freescale LS SoCs.
3139 + *
3140 + * This program is free software; you can redistribute it and/or modify it
3141 + * under the terms of the GNU General Public License as published by the
3142 + * Free Software Foundation; either version 2 of the License, or (at your
3143 + * option) any later version.
3144 + */
3145 +
3146 +#include <asm/cacheflush.h>
3147 +#include <linux/clk.h>
3148 +#include <linux/delay.h>
3149 +#include <linux/dma-mapping.h>
3150 +#include <linux/dmapool.h>
3151 +#include <linux/init.h>
3152 +#include <linux/interrupt.h>
3153 +#include <linux/module.h>
3154 +#include <linux/of.h>
3155 +#include <linux/of_address.h>
3156 +#include <linux/of_device.h>
3157 +#include <linux/of_dma.h>
3158 +#include <linux/of_irq.h>
3159 +#include <linux/slab.h>
3160 +#include <linux/spinlock.h>
3161 +
3162 +#include "virt-dma.h"
3163 +
3164 +#define FSL_QDMA_DMR 0x0
3165 +#define FSL_QDMA_DSR 0x4
3166 +#define FSL_QDMA_DEIER 0xe00
3167 +#define FSL_QDMA_DEDR 0xe04
3168 +#define FSL_QDMA_DECFDW0R 0xe10
3169 +#define FSL_QDMA_DECFDW1R 0xe14
3170 +#define FSL_QDMA_DECFDW2R 0xe18
3171 +#define FSL_QDMA_DECFDW3R 0xe1c
3172 +#define FSL_QDMA_DECFQIDR 0xe30
3173 +#define FSL_QDMA_DECBR 0xe34
3174 +
3175 +#define FSL_QDMA_BCQMR(x) (0xc0 + 0x100 * (x))
3176 +#define FSL_QDMA_BCQSR(x) (0xc4 + 0x100 * (x))
3177 +#define FSL_QDMA_BCQEDPA_SADDR(x) (0xc8 + 0x100 * (x))
3178 +#define FSL_QDMA_BCQDPA_SADDR(x) (0xcc + 0x100 * (x))
3179 +#define FSL_QDMA_BCQEEPA_SADDR(x) (0xd0 + 0x100 * (x))
3180 +#define FSL_QDMA_BCQEPA_SADDR(x) (0xd4 + 0x100 * (x))
3181 +#define FSL_QDMA_BCQIER(x) (0xe0 + 0x100 * (x))
3182 +#define FSL_QDMA_BCQIDR(x) (0xe4 + 0x100 * (x))
3183 +
3184 +#define FSL_QDMA_SQDPAR 0x80c
3185 +#define FSL_QDMA_SQEPAR 0x814
3186 +#define FSL_QDMA_BSQMR 0x800
3187 +#define FSL_QDMA_BSQSR 0x804
3188 +#define FSL_QDMA_BSQICR 0x828
3189 +#define FSL_QDMA_CQMR 0xa00
3190 +#define FSL_QDMA_CQDSCR1 0xa08
3191 +#define FSL_QDMA_CQDSCR2 0xa0c
3192 +#define FSL_QDMA_CQIER 0xa10
3193 +#define FSL_QDMA_CQEDR 0xa14
3194 +#define FSL_QDMA_SQCCMR 0xa20
3195 +
3196 +#define FSL_QDMA_SQICR_ICEN
3197 +
3198 +#define FSL_QDMA_CQIDR_CQT 0xff000000
3199 +#define FSL_QDMA_CQIDR_SQPE 0x800000
3200 +#define FSL_QDMA_CQIDR_SQT 0x8000
3201 +
3202 +#define FSL_QDMA_BCQIER_CQTIE 0x8000
3203 +#define FSL_QDMA_BCQIER_CQPEIE 0x800000
3204 +#define FSL_QDMA_BSQICR_ICEN 0x80000000
3205 +#define FSL_QDMA_BSQICR_ICST(x) ((x) << 16)
3206 +#define FSL_QDMA_CQIER_MEIE 0x80000000
3207 +#define FSL_QDMA_CQIER_TEIE 0x1
3208 +#define FSL_QDMA_SQCCMR_ENTER_WM 0x200000
3209 +
3210 +#define FSL_QDMA_QUEUE_MAX 8
3211 +
3212 +#define FSL_QDMA_BCQMR_EN 0x80000000
3213 +#define FSL_QDMA_BCQMR_EI 0x40000000
3214 +#define FSL_QDMA_BCQMR_CD_THLD(x) ((x) << 20)
3215 +#define FSL_QDMA_BCQMR_CQ_SIZE(x) ((x) << 16)
3216 +
3217 +#define FSL_QDMA_BCQSR_QF 0x10000
3218 +#define FSL_QDMA_BCQSR_XOFF 0x1
3219 +
3220 +#define FSL_QDMA_BSQMR_EN 0x80000000
3221 +#define FSL_QDMA_BSQMR_DI 0x40000000
3222 +#define FSL_QDMA_BSQMR_CQ_SIZE(x) ((x) << 16)
3223 +
3224 +#define FSL_QDMA_BSQSR_QE 0x20000
3225 +
3226 +#define FSL_QDMA_DMR_DQD 0x40000000
3227 +#define FSL_QDMA_DSR_DB 0x80000000
3228 +
3229 +#define FSL_QDMA_BASE_BUFFER_SIZE 96
3230 +#define FSL_QDMA_EXPECT_SG_ENTRY_NUM 16
3231 +#define FSL_QDMA_CIRCULAR_DESC_SIZE_MIN 64
3232 +#define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX 16384
3233 +#define FSL_QDMA_QUEUE_NUM_MAX 8
3234 +
3235 +#define FSL_QDMA_CMD_RWTTYPE 0x4
3236 +#define FSL_QDMA_CMD_LWC 0x2
3237 +
3238 +#define FSL_QDMA_CMD_RWTTYPE_OFFSET 28
3239 +#define FSL_QDMA_CMD_NS_OFFSET 27
3240 +#define FSL_QDMA_CMD_DQOS_OFFSET 24
3241 +#define FSL_QDMA_CMD_WTHROTL_OFFSET 20
3242 +#define FSL_QDMA_CMD_DSEN_OFFSET 19
3243 +#define FSL_QDMA_CMD_LWC_OFFSET 16
3244 +
3245 +#define FSL_QDMA_E_SG_TABLE 1
3246 +#define FSL_QDMA_E_DATA_BUFFER 0
3247 +#define FSL_QDMA_F_LAST_ENTRY 1
3248 +
3249 +u64 pre_addr, pre_queue;
3250 +
3251 +/* qDMA Command Descriptor Fotmats */
3252 +
3253 +/* Compound Command Descriptor Fotmat */
3254 +struct fsl_qdma_ccdf {
3255 + __le32 status; /* ser, status */
3256 + __le32 cfg; /* format, offset */
3257 + union {
3258 + struct {
3259 + __le32 addr_lo; /* low 32-bits of 40-bit address */
3260 + u8 addr_hi; /* high 8-bits of 40-bit address */
3261 + u8 __reserved1[2];
3262 + u8 cfg8b_w1; /* dd, queue*/
3263 + } __packed;
3264 + __le64 data;
3265 + };
3266 +} __packed;
3267 +
3268 +#define QDMA_CCDF_STATUS 20
3269 +#define QDMA_CCDF_OFFSET 20
3270 +#define QDMA_CCDF_MASK GENMASK(28, 20)
3271 +#define QDMA_CCDF_FOTMAT BIT(29)
3272 +#define QDMA_CCDF_SER BIT(30)
3273 +
3274 +static inline u64 qdma_ccdf_addr_get64(const struct fsl_qdma_ccdf *ccdf)
3275 +{
3276 + return le64_to_cpu(ccdf->data) & 0xffffffffffLLU;
3277 +}
3278 +static inline u64 qdma_ccdf_get_queue(const struct fsl_qdma_ccdf *ccdf)
3279 +{
3280 + return ccdf->cfg8b_w1 & 0xff;
3281 +}
3282 +static inline void qdma_ccdf_addr_set64(struct fsl_qdma_ccdf *ccdf, u64 addr)
3283 +{
3284 + ccdf->addr_hi = upper_32_bits(addr);
3285 + ccdf->addr_lo = cpu_to_le32(lower_32_bits(addr));
3286 +}
3287 +static inline int qdma_ccdf_get_offset(const struct fsl_qdma_ccdf *ccdf)
3288 +{
3289 + return (le32_to_cpu(ccdf->cfg) & QDMA_CCDF_MASK) >> QDMA_CCDF_OFFSET;
3290 +}
3291 +static inline void qdma_ccdf_set_format(struct fsl_qdma_ccdf *ccdf, int offset)
3292 +{
3293 + ccdf->cfg = cpu_to_le32(QDMA_CCDF_FOTMAT | offset);
3294 +}
3295 +static inline int qdma_ccdf_get_status(const struct fsl_qdma_ccdf *ccdf)
3296 +{
3297 + return (le32_to_cpu(ccdf->status) & QDMA_CCDF_MASK) >> QDMA_CCDF_STATUS;
3298 +}
3299 +static inline void qdma_ccdf_set_ser(struct fsl_qdma_ccdf *ccdf, int status)
3300 +{
3301 + ccdf->status = cpu_to_le32(QDMA_CCDF_SER | status);
3302 +}
3303 +/* qDMA Compound S/G Format */
3304 +struct fsl_qdma_csgf {
3305 + __le32 offset; /* offset */
3306 + __le32 cfg; /* E bit, F bit, length */
3307 + union {
3308 + struct {
3309 + __le32 addr_lo; /* low 32-bits of 40-bit address */
3310 + u8 addr_hi; /* high 8-bits of 40-bit address */
3311 + u8 __reserved1[3];
3312 + };
3313 + __le64 data;
3314 + };
3315 +} __packed;
3316 +
3317 +#define QDMA_SG_FIN BIT(30)
3318 +#define QDMA_SG_EXT BIT(31)
3319 +#define QDMA_SG_LEN_MASK GENMASK(29, 0)
3320 +static inline u64 qdma_csgf_addr_get64(const struct fsl_qdma_csgf *sg)
3321 +{
3322 + return be64_to_cpu(sg->data) & 0xffffffffffLLU;
3323 +}
3324 +static inline void qdma_csgf_addr_set64(struct fsl_qdma_csgf *sg, u64 addr)
3325 +{
3326 + sg->addr_hi = upper_32_bits(addr);
3327 + sg->addr_lo = cpu_to_le32(lower_32_bits(addr));
3328 +}
3329 +static inline void qdma_csgf_set_len(struct fsl_qdma_csgf *csgf, int len)
3330 +{
3331 + csgf->cfg = cpu_to_le32(len & QDMA_SG_LEN_MASK);
3332 +}
3333 +static inline void qdma_csgf_set_f(struct fsl_qdma_csgf *csgf, int len)
3334 +{
3335 + csgf->cfg = cpu_to_le32(QDMA_SG_FIN | (len & QDMA_SG_LEN_MASK));
3336 +}
3337 +static inline void qdma_csgf_set_e(struct fsl_qdma_csgf *csgf, int len)
3338 +{
3339 + csgf->cfg = cpu_to_le32(QDMA_SG_EXT | (len & QDMA_SG_LEN_MASK));
3340 +}
3341 +
3342 +/* qDMA Source Descriptor Format */
3343 +struct fsl_qdma_sdf {
3344 + __le32 rev3;
3345 + __le32 cfg; /* rev4, bit[0-11] - ssd, bit[12-23] sss */
3346 + __le32 rev5;
3347 + __le32 cmd;
3348 +} __packed;
3349 +
3350 +/*qDMA Destination Descriptor Format*/
3351 +struct fsl_qdma_ddf {
3352 + __le32 rev1;
3353 + __le32 cfg; /* rev2, bit[0-11] - dsd, bit[12-23] - dss */
3354 + __le32 rev3;
3355 + __le32 cmd;
3356 +} __packed;
3357 +
3358 +struct fsl_qdma_chan {
3359 + struct virt_dma_chan vchan;
3360 + struct virt_dma_desc vdesc;
3361 + enum dma_status status;
3362 + u32 slave_id;
3363 + struct fsl_qdma_engine *qdma;
3364 + struct fsl_qdma_queue *queue;
3365 + struct list_head qcomp;
3366 +};
3367 +
3368 +struct fsl_qdma_queue {
3369 + struct fsl_qdma_ccdf *virt_head;
3370 + struct fsl_qdma_ccdf *virt_tail;
3371 + struct list_head comp_used;
3372 + struct list_head comp_free;
3373 + struct dma_pool *comp_pool;
3374 + struct dma_pool *sg_pool;
3375 + spinlock_t queue_lock;
3376 + dma_addr_t bus_addr;
3377 + u32 n_cq;
3378 + u32 id;
3379 + struct fsl_qdma_ccdf *cq;
3380 +};
3381 +
3382 +struct fsl_qdma_sg {
3383 + dma_addr_t bus_addr;
3384 + void *virt_addr;
3385 +};
3386 +
3387 +struct fsl_qdma_comp {
3388 + dma_addr_t bus_addr;
3389 + void *virt_addr;
3390 + struct fsl_qdma_chan *qchan;
3391 + struct fsl_qdma_sg *sg_block;
3392 + struct virt_dma_desc vdesc;
3393 + struct list_head list;
3394 + u32 sg_block_src;
3395 + u32 sg_block_dst;
3396 +};
3397 +
3398 +struct fsl_qdma_engine {
3399 + struct dma_device dma_dev;
3400 + void __iomem *ctrl_base;
3401 + void __iomem *status_base;
3402 + void __iomem *block_base;
3403 + u32 n_chans;
3404 + u32 n_queues;
3405 + struct mutex fsl_qdma_mutex;
3406 + int error_irq;
3407 + int queue_irq;
3408 + bool big_endian;
3409 + struct fsl_qdma_queue *queue;
3410 + struct fsl_qdma_queue *status;
3411 + struct fsl_qdma_chan chans[];
3412 +
3413 +};
3414 +
3415 +static u32 qdma_readl(struct fsl_qdma_engine *qdma, void __iomem *addr)
3416 +{
3417 + if (qdma->big_endian)
3418 + return ioread32be(addr);
3419 + else
3420 + return ioread32(addr);
3421 +}
3422 +
3423 +static void qdma_writel(struct fsl_qdma_engine *qdma, u32 val,
3424 + void __iomem *addr)
3425 +{
3426 + if (qdma->big_endian)
3427 + iowrite32be(val, addr);
3428 + else
3429 + iowrite32(val, addr);
3430 +}
3431 +
3432 +static struct fsl_qdma_chan *to_fsl_qdma_chan(struct dma_chan *chan)
3433 +{
3434 + return container_of(chan, struct fsl_qdma_chan, vchan.chan);
3435 +}
3436 +
3437 +static struct fsl_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
3438 +{
3439 + return container_of(vd, struct fsl_qdma_comp, vdesc);
3440 +}
3441 +
3442 +static int fsl_qdma_alloc_chan_resources(struct dma_chan *chan)
3443 +{
3444 + /*
3445 + * In QDMA mode, We don't need to do anything.
3446 + */
3447 + return 0;
3448 +}
3449 +
3450 +static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
3451 +{
3452 + struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
3453 + unsigned long flags;
3454 + LIST_HEAD(head);
3455 +
3456 + spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
3457 + vchan_get_all_descriptors(&fsl_chan->vchan, &head);
3458 + spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
3459 +
3460 + vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
3461 +}
3462 +
3463 +static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
3464 + dma_addr_t dst, dma_addr_t src, u32 len)
3465 +{
3466 + struct fsl_qdma_ccdf *ccdf;
3467 + struct fsl_qdma_csgf *csgf_desc, *csgf_src, *csgf_dest;
3468 + struct fsl_qdma_sdf *sdf;
3469 + struct fsl_qdma_ddf *ddf;
3470 +
3471 + ccdf = (struct fsl_qdma_ccdf *)fsl_comp->virt_addr;
3472 + csgf_desc = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 1;
3473 + csgf_src = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 2;
3474 + csgf_dest = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 3;
3475 + sdf = (struct fsl_qdma_sdf *)fsl_comp->virt_addr + 4;
3476 + ddf = (struct fsl_qdma_ddf *)fsl_comp->virt_addr + 5;
3477 +
3478 + memset(fsl_comp->virt_addr, 0, FSL_QDMA_BASE_BUFFER_SIZE);
3479 + /* Head Command Descriptor(Frame Descriptor) */
3480 + qdma_ccdf_addr_set64(ccdf, fsl_comp->bus_addr + 16);
3481 + qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(ccdf));
3482 + qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(ccdf));
3483 + /* Status notification is enqueued to status queue. */
3484 + /* Compound Command Descriptor(Frame List Table) */
3485 + qdma_csgf_addr_set64(csgf_desc, fsl_comp->bus_addr + 64);
3486 + /* It must be 32 as Compound S/G Descriptor */
3487 + qdma_csgf_set_len(csgf_desc, 32);
3488 + qdma_csgf_addr_set64(csgf_src, src);
3489 + qdma_csgf_set_len(csgf_src, len);
3490 + qdma_csgf_addr_set64(csgf_dest, dst);
3491 + qdma_csgf_set_len(csgf_dest, len);
3492 + /* This entry is the last entry. */
3493 + qdma_csgf_set_f(csgf_dest, len);
3494 + /* Descriptor Buffer */
3495 + sdf->cmd = cpu_to_le32(
3496 + FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET);
3497 + ddf->cmd = cpu_to_le32(
3498 + FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET);
3499 + ddf->cmd |= cpu_to_le32(
3500 + FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET);
3501 +}
3502 +
3503 +static void fsl_qdma_comp_fill_sg(
3504 + struct fsl_qdma_comp *fsl_comp,
3505 + struct scatterlist *dst_sg, unsigned int dst_nents,
3506 + struct scatterlist *src_sg, unsigned int src_nents)
3507 +{
3508 + struct fsl_qdma_ccdf *ccdf;
3509 + struct fsl_qdma_csgf *csgf_desc, *csgf_src, *csgf_dest, *csgf_sg;
3510 + struct fsl_qdma_sdf *sdf;
3511 + struct fsl_qdma_ddf *ddf;
3512 + struct fsl_qdma_sg *sg_block, *temp;
3513 + struct scatterlist *sg;
3514 + u64 total_src_len = 0;
3515 + u64 total_dst_len = 0;
3516 + u32 i;
3517 +
3518 + ccdf = (struct fsl_qdma_ccdf *)fsl_comp->virt_addr;
3519 + csgf_desc = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 1;
3520 + csgf_src = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 2;
3521 + csgf_dest = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 3;
3522 + sdf = (struct fsl_qdma_sdf *)fsl_comp->virt_addr + 4;
3523 + ddf = (struct fsl_qdma_ddf *)fsl_comp->virt_addr + 5;
3524 + memset(fsl_comp->virt_addr, 0, FSL_QDMA_BASE_BUFFER_SIZE);
3525 + /* Head Command Descriptor(Frame Descriptor) */
3526 + qdma_ccdf_addr_set64(ccdf, fsl_comp->bus_addr + 16);
3527 + qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(ccdf));
3528 + /* Status notification is enqueued to status queue. */
3529 + qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(ccdf));
3530 +
3531 + /* Compound Command Descriptor(Frame List Table) */
3532 + qdma_csgf_addr_set64(csgf_desc, fsl_comp->bus_addr + 64);
3533 + /* It must be 32 as Compound S/G Descriptor */
3534 + qdma_csgf_set_len(csgf_desc, 32);
3535 +
3536 + sg_block = fsl_comp->sg_block;
3537 + qdma_csgf_addr_set64(csgf_src, sg_block->bus_addr);
3538 + /* This entry link to the s/g entry. */
3539 + qdma_csgf_set_e(csgf_src, 32);
3540 +
3541 + temp = sg_block + fsl_comp->sg_block_src;
3542 + qdma_csgf_addr_set64(csgf_dest, temp->bus_addr);
3543 + /* This entry is the last entry. */
3544 + qdma_csgf_set_f(csgf_dest, 32);
3545 + /* This entry link to the s/g entry. */
3546 + qdma_csgf_set_e(csgf_dest, 32);
3547 +
3548 + for_each_sg(src_sg, sg, src_nents, i) {
3549 + temp = sg_block + i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1);
3550 + csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr +
3551 + i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1);
3552 + qdma_csgf_addr_set64(csgf_sg, sg_dma_address(sg));
3553 + qdma_csgf_set_len(csgf_sg, sg_dma_len(sg));
3554 + total_src_len += sg_dma_len(sg);
3555 +
3556 + if (i == src_nents - 1)
3557 + qdma_csgf_set_f(csgf_sg, sg_dma_len(sg));
3558 + if (i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) ==
3559 + FSL_QDMA_EXPECT_SG_ENTRY_NUM - 2) {
3560 + csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr +
3561 + FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1;
3562 + temp = sg_block +
3563 + i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;
3564 + qdma_csgf_addr_set64(csgf_sg, temp->bus_addr);
3565 + qdma_csgf_set_e(csgf_sg, sg_dma_len(sg));
3566 + }
3567 + }
3568 +
3569 + sg_block += fsl_comp->sg_block_src;
3570 + for_each_sg(dst_sg, sg, dst_nents, i) {
3571 + temp = sg_block + i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1);
3572 + csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr +
3573 + i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1);
3574 + qdma_csgf_addr_set64(csgf_sg, sg_dma_address(sg));
3575 + qdma_csgf_set_len(csgf_sg, sg_dma_len(sg));
3576 + total_dst_len += sg_dma_len(sg);
3577 +
3578 + if (i == dst_nents - 1)
3579 + qdma_csgf_set_f(csgf_sg, sg_dma_len(sg));
3580 + if (i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) ==
3581 + FSL_QDMA_EXPECT_SG_ENTRY_NUM - 2) {
3582 + csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr +
3583 + FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1;
3584 + temp = sg_block +
3585 + i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;
3586 + qdma_csgf_addr_set64(csgf_sg, temp->bus_addr);
3587 + qdma_csgf_set_e(csgf_sg, sg_dma_len(sg));
3588 + }
3589 + }
3590 +
3591 + if (total_src_len != total_dst_len)
3592 + dev_err(&fsl_comp->qchan->vchan.chan.dev->device,
3593 + "The data length for src and dst isn't match.\n");
3594 +
3595 + qdma_csgf_set_len(csgf_src, total_src_len);
3596 + qdma_csgf_set_len(csgf_dest, total_dst_len);
3597 +
3598 + /* Descriptor Buffer */
3599 +}
3600 +
3601 +/*
3602 + * Prei-request full command descriptor for enqueue.
3603 + */
3604 +static int fsl_qdma_pre_request_enqueue_desc(struct fsl_qdma_queue *queue)
3605 +{
3606 + struct fsl_qdma_comp *comp_temp;
3607 + int i;
3608 +
3609 + for (i = 0; i < queue->n_cq; i++) {
3610 + comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
3611 + if (!comp_temp)
3612 + return -1;
3613 + comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool,
3614 + GFP_NOWAIT,
3615 + &comp_temp->bus_addr);
3616 + if (!comp_temp->virt_addr)
3617 + return -1;
3618 + list_add_tail(&comp_temp->list, &queue->comp_free);
3619 + }
3620 + return 0;
3621 +}
3622 +
3623 +/*
3624 + * Request a command descriptor for enqueue.
3625 + */
3626 +static struct fsl_qdma_comp *fsl_qdma_request_enqueue_desc(
3627 + struct fsl_qdma_chan *fsl_chan,
3628 + unsigned int dst_nents,
3629 + unsigned int src_nents)
3630 +{
3631 + struct fsl_qdma_comp *comp_temp;
3632 + struct fsl_qdma_sg *sg_block;
3633 + struct fsl_qdma_queue *queue = fsl_chan->queue;
3634 + unsigned long flags;
3635 + unsigned int dst_sg_entry_block, src_sg_entry_block, sg_entry_total, i;
3636 +
3637 + spin_lock_irqsave(&queue->queue_lock, flags);
3638 + if (list_empty(&queue->comp_free)) {
3639 + spin_unlock_irqrestore(&queue->queue_lock, flags);
3640 + comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
3641 + if (!comp_temp)
3642 + return NULL;
3643 + comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool,
3644 + GFP_NOWAIT,
3645 + &comp_temp->bus_addr);
3646 + if (!comp_temp->virt_addr)
3647 + return NULL;
3648 + } else {
3649 + comp_temp = list_first_entry(&queue->comp_free,
3650 + struct fsl_qdma_comp,
3651 + list);
3652 + list_del(&comp_temp->list);
3653 + spin_unlock_irqrestore(&queue->queue_lock, flags);
3654 + }
3655 +
3656 + if (dst_nents != 0)
3657 + dst_sg_entry_block = dst_nents /
3658 + (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;
3659 + else
3660 + dst_sg_entry_block = 0;
3661 +
3662 + if (src_nents != 0)
3663 + src_sg_entry_block = src_nents /
3664 + (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1;
3665 + else
3666 + src_sg_entry_block = 0;
3667 +
3668 + sg_entry_total = dst_sg_entry_block + src_sg_entry_block;
3669 + if (sg_entry_total) {
3670 + sg_block = kzalloc(sizeof(*sg_block) *
3671 + sg_entry_total,
3672 + GFP_KERNEL);
3673 + if (!sg_block)
3674 + return NULL;
3675 + comp_temp->sg_block = sg_block;
3676 + for (i = 0; i < sg_entry_total; i++) {
3677 + sg_block->virt_addr = dma_pool_alloc(queue->sg_pool,
3678 + GFP_NOWAIT,
3679 + &sg_block->bus_addr);
3680 + memset(sg_block->virt_addr, 0,
3681 + FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16);
3682 + sg_block++;
3683 + }
3684 + }
3685 +
3686 + comp_temp->sg_block_src = src_sg_entry_block;
3687 + comp_temp->sg_block_dst = dst_sg_entry_block;
3688 + comp_temp->qchan = fsl_chan;
3689 +
3690 + return comp_temp;
3691 +}
3692 +
3693 +static struct fsl_qdma_queue *fsl_qdma_alloc_queue_resources(
3694 + struct platform_device *pdev,
3695 + unsigned int queue_num)
3696 +{
3697 + struct device_node *np = pdev->dev.of_node;
3698 + struct fsl_qdma_queue *queue_head, *queue_temp;
3699 + int ret, len, i;
3700 + unsigned int queue_size[FSL_QDMA_QUEUE_MAX];
3701 +
3702 + if (queue_num > FSL_QDMA_QUEUE_MAX)
3703 + queue_num = FSL_QDMA_QUEUE_MAX;
3704 + len = sizeof(*queue_head) * queue_num;
3705 + queue_head = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
3706 + if (!queue_head)
3707 + return NULL;
3708 +
3709 + ret = of_property_read_u32_array(np, "queue-sizes", queue_size,
3710 + queue_num);
3711 + if (ret) {
3712 + dev_err(&pdev->dev, "Can't get queue-sizes.\n");
3713 + return NULL;
3714 + }
3715 +
3716 + for (i = 0; i < queue_num; i++) {
3717 + if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX
3718 + || queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
3719 + dev_err(&pdev->dev, "Get wrong queue-sizes.\n");
3720 + return NULL;
3721 + }
3722 + queue_temp = queue_head + i;
3723 + queue_temp->cq = dma_alloc_coherent(&pdev->dev,
3724 + sizeof(struct fsl_qdma_ccdf) *
3725 + queue_size[i],
3726 + &queue_temp->bus_addr,
3727 + GFP_KERNEL);
3728 + if (!queue_temp->cq)
3729 + return NULL;
3730 + queue_temp->n_cq = queue_size[i];
3731 + queue_temp->id = i;
3732 + queue_temp->virt_head = queue_temp->cq;
3733 + queue_temp->virt_tail = queue_temp->cq;
3734 + /*
3735 + * The dma pool for queue command buffer
3736 + */
3737 + queue_temp->comp_pool = dma_pool_create("comp_pool",
3738 + &pdev->dev,
3739 + FSL_QDMA_BASE_BUFFER_SIZE,
3740 + 16, 0);
3741 + if (!queue_temp->comp_pool) {
3742 + dma_free_coherent(&pdev->dev,
3743 + sizeof(struct fsl_qdma_ccdf) *
3744 + queue_size[i],
3745 + queue_temp->cq,
3746 + queue_temp->bus_addr);
3747 + return NULL;
3748 + }
3749 + /*
3750 + * The dma pool for queue command buffer
3751 + */
3752 + queue_temp->sg_pool = dma_pool_create("sg_pool",
3753 + &pdev->dev,
3754 + FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16,
3755 + 64, 0);
3756 + if (!queue_temp->sg_pool) {
3757 + dma_free_coherent(&pdev->dev,
3758 + sizeof(struct fsl_qdma_ccdf) *
3759 + queue_size[i],
3760 + queue_temp->cq,
3761 + queue_temp->bus_addr);
3762 + dma_pool_destroy(queue_temp->comp_pool);
3763 + return NULL;
3764 + }
3765 + /*
3766 + * List for queue command buffer
3767 + */
3768 + INIT_LIST_HEAD(&queue_temp->comp_used);
3769 + INIT_LIST_HEAD(&queue_temp->comp_free);
3770 + spin_lock_init(&queue_temp->queue_lock);
3771 + }
3772 +
3773 + return queue_head;
3774 +}
3775 +
3776 +static struct fsl_qdma_queue *fsl_qdma_prep_status_queue(
3777 + struct platform_device *pdev)
3778 +{
3779 + struct device_node *np = pdev->dev.of_node;
3780 + struct fsl_qdma_queue *status_head;
3781 + unsigned int status_size;
3782 + int ret;
3783 +
3784 + ret = of_property_read_u32(np, "status-sizes", &status_size);
3785 + if (ret) {
3786 + dev_err(&pdev->dev, "Can't get status-sizes.\n");
3787 + return NULL;
3788 + }
3789 + if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX
3790 + || status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
3791 + dev_err(&pdev->dev, "Get wrong status_size.\n");
3792 + return NULL;
3793 + }
3794 + status_head = devm_kzalloc(&pdev->dev, sizeof(*status_head),
3795 + GFP_KERNEL);
3796 + if (!status_head)
3797 + return NULL;
3798 +
3799 + /*
3800 + * Buffer for queue command
3801 + */
3802 + status_head->cq = dma_alloc_coherent(&pdev->dev,
3803 + sizeof(struct fsl_qdma_ccdf) *
3804 + status_size,
3805 + &status_head->bus_addr,
3806 + GFP_KERNEL);
3807 + if (!status_head->cq)
3808 + return NULL;
3809 + status_head->n_cq = status_size;
3810 + status_head->virt_head = status_head->cq;
3811 + status_head->virt_tail = status_head->cq;
3812 + status_head->comp_pool = NULL;
3813 +
3814 + return status_head;
3815 +}
3816 +
3817 +static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
3818 +{
3819 + void __iomem *ctrl = fsl_qdma->ctrl_base;
3820 + void __iomem *block = fsl_qdma->block_base;
3821 + int i, count = 5;
3822 + u32 reg;
3823 +
3824 + /* Disable the command queue and wait for idle state. */
3825 + reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
3826 + reg |= FSL_QDMA_DMR_DQD;
3827 + qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
3828 + for (i = 0; i < FSL_QDMA_QUEUE_NUM_MAX; i++)
3829 + qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQMR(i));
3830 +
3831 + while (1) {
3832 + reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DSR);
3833 + if (!(reg & FSL_QDMA_DSR_DB))
3834 + break;
3835 + if (count-- < 0)
3836 + return -EBUSY;
3837 + udelay(100);
3838 + }
3839 +
3840 + /* Disable status queue. */
3841 + qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BSQMR);
3842 +
3843 + /*
3844 + * Clear the command queue interrupt detect register for all queues.
3845 + */
3846 + qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
3847 +
3848 + return 0;
3849 +}
3850 +
3851 +static int fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma)
3852 +{
3853 + struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
3854 + struct fsl_qdma_queue *fsl_status = fsl_qdma->status;
3855 + struct fsl_qdma_queue *temp_queue;
3856 + struct fsl_qdma_comp *fsl_comp;
3857 + struct fsl_qdma_ccdf *status_addr;
3858 + struct fsl_qdma_csgf *csgf_src;
3859 + void __iomem *block = fsl_qdma->block_base;
3860 + u32 reg, i;
3861 + bool duplicate, duplicate_handle;
3862 +
3863 + while (1) {
3864 + duplicate = 0;
3865 + duplicate_handle = 0;
3866 + reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQSR);
3867 + if (reg & FSL_QDMA_BSQSR_QE)
3868 + return 0;
3869 + status_addr = fsl_status->virt_head;
3870 + if (qdma_ccdf_get_queue(status_addr) == pre_queue &&
3871 + qdma_ccdf_addr_get64(status_addr) == pre_addr)
3872 + duplicate = 1;
3873 + i = qdma_ccdf_get_queue(status_addr);
3874 + pre_queue = qdma_ccdf_get_queue(status_addr);
3875 + pre_addr = qdma_ccdf_addr_get64(status_addr);
3876 + temp_queue = fsl_queue + i;
3877 + spin_lock(&temp_queue->queue_lock);
3878 + if (list_empty(&temp_queue->comp_used)) {
3879 + if (duplicate)
3880 + duplicate_handle = 1;
3881 + else {
3882 + spin_unlock(&temp_queue->queue_lock);
3883 + return -1;
3884 + }
3885 + } else {
3886 + fsl_comp = list_first_entry(&temp_queue->comp_used,
3887 + struct fsl_qdma_comp,
3888 + list);
3889 + csgf_src = (struct fsl_qdma_csgf *)fsl_comp->virt_addr
3890 + + 2;
3891 + if (fsl_comp->bus_addr + 16 != pre_addr) {
3892 + if (duplicate)
3893 + duplicate_handle = 1;
3894 + else {
3895 + spin_unlock(&temp_queue->queue_lock);
3896 + return -1;
3897 + }
3898 + }
3899 + }
3900 +
3901 + if (duplicate_handle) {
3902 + reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
3903 + reg |= FSL_QDMA_BSQMR_DI;
3904 + qdma_ccdf_addr_set64(status_addr, 0x0);
3905 + fsl_status->virt_head++;
3906 + if (fsl_status->virt_head == fsl_status->cq
3907 + + fsl_status->n_cq)
3908 + fsl_status->virt_head = fsl_status->cq;
3909 + qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
3910 + spin_unlock(&temp_queue->queue_lock);
3911 + continue;
3912 + }
3913 + list_del(&fsl_comp->list);
3914 +
3915 + reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
3916 + reg |= FSL_QDMA_BSQMR_DI;
3917 + qdma_ccdf_addr_set64(status_addr, 0x0);
3918 + fsl_status->virt_head++;
3919 + if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq)
3920 + fsl_status->virt_head = fsl_status->cq;
3921 + qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
3922 + spin_unlock(&temp_queue->queue_lock);
3923 +
3924 + spin_lock(&fsl_comp->qchan->vchan.lock);
3925 + vchan_cookie_complete(&fsl_comp->vdesc);
3926 + fsl_comp->qchan->status = DMA_COMPLETE;
3927 + spin_unlock(&fsl_comp->qchan->vchan.lock);
3928 + }
3929 + return 0;
3930 +}
3931 +
3932 +static irqreturn_t fsl_qdma_error_handler(int irq, void *dev_id)
3933 +{
3934 + struct fsl_qdma_engine *fsl_qdma = dev_id;
3935 + unsigned int intr;
3936 + void __iomem *status = fsl_qdma->status_base;
3937 +
3938 + intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR);
3939 +
3940 + if (intr)
3941 + dev_err(fsl_qdma->dma_dev.dev, "DMA transaction error!\n");
3942 +
3943 + qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEDR);
3944 + return IRQ_HANDLED;
3945 +}
3946 +
3947 +static irqreturn_t fsl_qdma_queue_handler(int irq, void *dev_id)
3948 +{
3949 + struct fsl_qdma_engine *fsl_qdma = dev_id;
3950 + unsigned int intr, reg;
3951 + void __iomem *block = fsl_qdma->block_base;
3952 + void __iomem *ctrl = fsl_qdma->ctrl_base;
3953 +
3954 + intr = qdma_readl(fsl_qdma, block + FSL_QDMA_BCQIDR(0));
3955 +
3956 + if ((intr & FSL_QDMA_CQIDR_SQT) != 0)
3957 + intr = fsl_qdma_queue_transfer_complete(fsl_qdma);
3958 +
3959 + if (intr != 0) {
3960 + reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
3961 + reg |= FSL_QDMA_DMR_DQD;
3962 + qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
3963 + qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQIER(0));
3964 + dev_err(fsl_qdma->dma_dev.dev, "QDMA: status err!\n");
3965 + }
3966 +
3967 + qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
3968 +
3969 + return IRQ_HANDLED;
3970 +}
3971 +
3972 +static int
3973 +fsl_qdma_irq_init(struct platform_device *pdev,
3974 + struct fsl_qdma_engine *fsl_qdma)
3975 +{
3976 + int ret;
3977 +
3978 + fsl_qdma->error_irq = platform_get_irq_byname(pdev,
3979 + "qdma-error");
3980 + if (fsl_qdma->error_irq < 0) {
3981 + dev_err(&pdev->dev, "Can't get qdma controller irq.\n");
3982 + return fsl_qdma->error_irq;
3983 + }
3984 +
3985 + fsl_qdma->queue_irq = platform_get_irq_byname(pdev, "qdma-queue");
3986 + if (fsl_qdma->queue_irq < 0) {
3987 + dev_err(&pdev->dev, "Can't get qdma queue irq.\n");
3988 + return fsl_qdma->queue_irq;
3989 + }
3990 +
3991 + ret = devm_request_irq(&pdev->dev, fsl_qdma->error_irq,
3992 + fsl_qdma_error_handler, 0, "qDMA error", fsl_qdma);
3993 + if (ret) {
3994 + dev_err(&pdev->dev, "Can't register qDMA controller IRQ.\n");
3995 + return ret;
3996 + }
3997 + ret = devm_request_irq(&pdev->dev, fsl_qdma->queue_irq,
3998 + fsl_qdma_queue_handler, 0, "qDMA queue", fsl_qdma);
3999 + if (ret) {
4000 + dev_err(&pdev->dev, "Can't register qDMA queue IRQ.\n");
4001 + return ret;
4002 + }
4003 +
4004 + return 0;
4005 +}
4006 +
4007 +static int fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
4008 +{
4009 + struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
4010 + struct fsl_qdma_queue *temp;
4011 + void __iomem *ctrl = fsl_qdma->ctrl_base;
4012 + void __iomem *status = fsl_qdma->status_base;
4013 + void __iomem *block = fsl_qdma->block_base;
4014 + int i, ret;
4015 + u32 reg;
4016 +
4017 + /* Try to halt the qDMA engine first. */
4018 + ret = fsl_qdma_halt(fsl_qdma);
4019 + if (ret) {
4020 + dev_err(fsl_qdma->dma_dev.dev, "DMA halt failed!");
4021 + return ret;
4022 + }
4023 +
4024 + /*
4025 + * Clear the command queue interrupt detect register for all queues.
4026 + */
4027 + qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0));
4028 +
4029 + for (i = 0; i < fsl_qdma->n_queues; i++) {
4030 + temp = fsl_queue + i;
4031 + /*
4032 + * Initialize Command Queue registers to point to the first
4033 + * command descriptor in memory.
4034 + * Dequeue Pointer Address Registers
4035 + * Enqueue Pointer Address Registers
4036 + */
4037 + qdma_writel(fsl_qdma, temp->bus_addr,
4038 + block + FSL_QDMA_BCQDPA_SADDR(i));
4039 + qdma_writel(fsl_qdma, temp->bus_addr,
4040 + block + FSL_QDMA_BCQEPA_SADDR(i));
4041 +
4042 + /* Initialize the queue mode. */
4043 + reg = FSL_QDMA_BCQMR_EN;
4044 + reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq)-4);
4045 + reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq)-6);
4046 + qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BCQMR(i));
4047 + }
4048 +
4049 + /*
4050 + * Workaround for erratum: ERR010812.
4051 + * We must enable XOFF to avoid the enqueue rejection occurs.
4052 + * Setting SQCCMR ENTER_WM to 0x20.
4053 + */
4054 + qdma_writel(fsl_qdma, FSL_QDMA_SQCCMR_ENTER_WM,
4055 + block + FSL_QDMA_SQCCMR);
4056 + /*
4057 + * Initialize status queue registers to point to the first
4058 + * command descriptor in memory.
4059 + * Dequeue Pointer Address Registers
4060 + * Enqueue Pointer Address Registers
4061 + */
4062 + qdma_writel(fsl_qdma, fsl_qdma->status->bus_addr,
4063 + block + FSL_QDMA_SQEPAR);
4064 + qdma_writel(fsl_qdma, fsl_qdma->status->bus_addr,
4065 + block + FSL_QDMA_SQDPAR);
4066 + /* Initialize status queue interrupt. */
4067 + qdma_writel(fsl_qdma, FSL_QDMA_BCQIER_CQTIE,
4068 + block + FSL_QDMA_BCQIER(0));
4069 + qdma_writel(fsl_qdma, FSL_QDMA_BSQICR_ICEN | FSL_QDMA_BSQICR_ICST(5)
4070 + | 0x8000,
4071 + block + FSL_QDMA_BSQICR);
4072 + qdma_writel(fsl_qdma, FSL_QDMA_CQIER_MEIE | FSL_QDMA_CQIER_TEIE,
4073 + block + FSL_QDMA_CQIER);
4074 + /* Initialize controller interrupt register. */
4075 + qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEDR);
4076 + qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEIER);
4077 +
4078 + /* Initialize the status queue mode. */
4079 + reg = FSL_QDMA_BSQMR_EN;
4080 + reg |= FSL_QDMA_BSQMR_CQ_SIZE(ilog2(fsl_qdma->status->n_cq)-6);
4081 + qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
4082 +
4083 + reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
4084 + reg &= ~FSL_QDMA_DMR_DQD;
4085 + qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
4086 +
4087 + return 0;
4088 +}
4089 +
4090 +static struct dma_async_tx_descriptor *fsl_qdma_prep_dma_sg(
4091 + struct dma_chan *chan,
4092 + struct scatterlist *dst_sg, unsigned int dst_nents,
4093 + struct scatterlist *src_sg, unsigned int src_nents,
4094 + unsigned long flags)
4095 +{
4096 + struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
4097 + struct fsl_qdma_comp *fsl_comp;
4098 +
4099 + fsl_comp = fsl_qdma_request_enqueue_desc(fsl_chan,
4100 + dst_nents,
4101 + src_nents);
4102 + fsl_qdma_comp_fill_sg(fsl_comp, dst_sg, dst_nents, src_sg, src_nents);
4103 +
4104 + return vchan_tx_prep(&fsl_chan->vchan, &fsl_comp->vdesc, flags);
4105 +}
4106 +
4107 +static struct dma_async_tx_descriptor *
4108 +fsl_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
4109 + dma_addr_t src, size_t len, unsigned long flags)
4110 +{
4111 + struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
4112 + struct fsl_qdma_comp *fsl_comp;
4113 +
4114 + fsl_comp = fsl_qdma_request_enqueue_desc(fsl_chan, 0, 0);
4115 + fsl_qdma_comp_fill_memcpy(fsl_comp, dst, src, len);
4116 +
4117 + return vchan_tx_prep(&fsl_chan->vchan, &fsl_comp->vdesc, flags);
4118 +}
4119 +
4120 +static void fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
4121 +{
4122 + void __iomem *block = fsl_chan->qdma->block_base;
4123 + struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
4124 + struct fsl_qdma_comp *fsl_comp;
4125 + struct virt_dma_desc *vdesc;
4126 + u32 reg;
4127 +
4128 + reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQSR(fsl_queue->id));
4129 + if (reg & (FSL_QDMA_BCQSR_QF | FSL_QDMA_BCQSR_XOFF))
4130 + return;
4131 + vdesc = vchan_next_desc(&fsl_chan->vchan);
4132 + if (!vdesc)
4133 + return;
4134 + list_del(&vdesc->node);
4135 + fsl_comp = to_fsl_qdma_comp(vdesc);
4136 +
4137 + memcpy(fsl_queue->virt_head++, fsl_comp->virt_addr, 16);
4138 + if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq)
4139 + fsl_queue->virt_head = fsl_queue->cq;
4140 +
4141 + list_add_tail(&fsl_comp->list, &fsl_queue->comp_used);
4142 + barrier();
4143 + reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQMR(fsl_queue->id));
4144 + reg |= FSL_QDMA_BCQMR_EI;
4145 + qdma_writel(fsl_chan->qdma, reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
4146 + fsl_chan->status = DMA_IN_PROGRESS;
4147 +}
4148 +
4149 +static enum dma_status fsl_qdma_tx_status(struct dma_chan *chan,
4150 + dma_cookie_t cookie, struct dma_tx_state *txstate)
4151 +{
4152 + return dma_cookie_status(chan, cookie, txstate);
4153 +}
4154 +
4155 +static void fsl_qdma_free_desc(struct virt_dma_desc *vdesc)
4156 +{
4157 + struct fsl_qdma_comp *fsl_comp;
4158 + struct fsl_qdma_queue *fsl_queue;
4159 + struct fsl_qdma_sg *sg_block;
4160 + unsigned long flags;
4161 + unsigned int i;
4162 +
4163 + fsl_comp = to_fsl_qdma_comp(vdesc);
4164 + fsl_queue = fsl_comp->qchan->queue;
4165 +
4166 + if (fsl_comp->sg_block) {
4167 + for (i = 0; i < fsl_comp->sg_block_src +
4168 + fsl_comp->sg_block_dst; i++) {
4169 + sg_block = fsl_comp->sg_block + i;
4170 + dma_pool_free(fsl_queue->sg_pool,
4171 + sg_block->virt_addr,
4172 + sg_block->bus_addr);
4173 + }
4174 + kfree(fsl_comp->sg_block);
4175 + }
4176 +
4177 + spin_lock_irqsave(&fsl_queue->queue_lock, flags);
4178 + list_add_tail(&fsl_comp->list, &fsl_queue->comp_free);
4179 + spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
4180 +}
4181 +
4182 +static void fsl_qdma_issue_pending(struct dma_chan *chan)
4183 +{
4184 + struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
4185 + struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
4186 + unsigned long flags;
4187 +
4188 + spin_lock_irqsave(&fsl_queue->queue_lock, flags);
4189 + spin_lock(&fsl_chan->vchan.lock);
4190 + if (vchan_issue_pending(&fsl_chan->vchan))
4191 + fsl_qdma_enqueue_desc(fsl_chan);
4192 + spin_unlock(&fsl_chan->vchan.lock);
4193 + spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
4194 +}
4195 +
4196 +static int fsl_qdma_probe(struct platform_device *pdev)
4197 +{
4198 + struct device_node *np = pdev->dev.of_node;
4199 + struct fsl_qdma_engine *fsl_qdma;
4200 + struct fsl_qdma_chan *fsl_chan;
4201 + struct resource *res;
4202 + unsigned int len, chans, queues;
4203 + int ret, i;
4204 +
4205 + ret = of_property_read_u32(np, "channels", &chans);
4206 + if (ret) {
4207 + dev_err(&pdev->dev, "Can't get channels.\n");
4208 + return ret;
4209 + }
4210 +
4211 + len = sizeof(*fsl_qdma) + sizeof(*fsl_chan) * chans;
4212 + fsl_qdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
4213 + if (!fsl_qdma)
4214 + return -ENOMEM;
4215 +
4216 + ret = of_property_read_u32(np, "queues", &queues);
4217 + if (ret) {
4218 + dev_err(&pdev->dev, "Can't get queues.\n");
4219 + return ret;
4220 + }
4221 +
4222 + fsl_qdma->queue = fsl_qdma_alloc_queue_resources(pdev, queues);
4223 + if (!fsl_qdma->queue)
4224 + return -ENOMEM;
4225 +
4226 + fsl_qdma->status = fsl_qdma_prep_status_queue(pdev);
4227 + if (!fsl_qdma->status)
4228 + return -ENOMEM;
4229 +
4230 + fsl_qdma->n_chans = chans;
4231 + fsl_qdma->n_queues = queues;
4232 + mutex_init(&fsl_qdma->fsl_qdma_mutex);
4233 +
4234 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4235 + fsl_qdma->ctrl_base = devm_ioremap_resource(&pdev->dev, res);
4236 + if (IS_ERR(fsl_qdma->ctrl_base))
4237 + return PTR_ERR(fsl_qdma->ctrl_base);
4238 +
4239 + res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
4240 + fsl_qdma->status_base = devm_ioremap_resource(&pdev->dev, res);
4241 + if (IS_ERR(fsl_qdma->status_base))
4242 + return PTR_ERR(fsl_qdma->status_base);
4243 +
4244 + res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
4245 + fsl_qdma->block_base = devm_ioremap_resource(&pdev->dev, res);
4246 + if (IS_ERR(fsl_qdma->block_base))
4247 + return PTR_ERR(fsl_qdma->block_base);
4248 +
4249 + ret = fsl_qdma_irq_init(pdev, fsl_qdma);
4250 + if (ret)
4251 + return ret;
4252 +
4253 + fsl_qdma->big_endian = of_property_read_bool(np, "big-endian");
4254 + INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels);
4255 + for (i = 0; i < fsl_qdma->n_chans; i++) {
4256 + struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
4257 +
4258 + fsl_chan->qdma = fsl_qdma;
4259 + fsl_chan->queue = fsl_qdma->queue + i % fsl_qdma->n_queues;
4260 + fsl_chan->vchan.desc_free = fsl_qdma_free_desc;
4261 + INIT_LIST_HEAD(&fsl_chan->qcomp);
4262 + vchan_init(&fsl_chan->vchan, &fsl_qdma->dma_dev);
4263 + }
4264 + for (i = 0; i < fsl_qdma->n_queues; i++)
4265 + fsl_qdma_pre_request_enqueue_desc(fsl_qdma->queue + i);
4266 +
4267 + dma_cap_set(DMA_MEMCPY, fsl_qdma->dma_dev.cap_mask);
4268 + dma_cap_set(DMA_SG, fsl_qdma->dma_dev.cap_mask);
4269 +
4270 + fsl_qdma->dma_dev.dev = &pdev->dev;
4271 + fsl_qdma->dma_dev.device_alloc_chan_resources
4272 + = fsl_qdma_alloc_chan_resources;
4273 + fsl_qdma->dma_dev.device_free_chan_resources
4274 + = fsl_qdma_free_chan_resources;
4275 + fsl_qdma->dma_dev.device_tx_status = fsl_qdma_tx_status;
4276 + fsl_qdma->dma_dev.device_prep_dma_memcpy = fsl_qdma_prep_memcpy;
4277 + fsl_qdma->dma_dev.device_prep_dma_sg = fsl_qdma_prep_dma_sg;
4278 + fsl_qdma->dma_dev.device_issue_pending = fsl_qdma_issue_pending;
4279 +
4280 + dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
4281 +
4282 + platform_set_drvdata(pdev, fsl_qdma);
4283 +
4284 + ret = dma_async_device_register(&fsl_qdma->dma_dev);
4285 + if (ret) {
4286 + dev_err(&pdev->dev, "Can't register Freescale qDMA engine.\n");
4287 + return ret;
4288 + }
4289 +
4290 + ret = fsl_qdma_reg_init(fsl_qdma);
4291 + if (ret) {
4292 + dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
4293 + return ret;
4294 + }
4295 +
4296 +
4297 + return 0;
4298 +}
4299 +
4300 +static int fsl_qdma_remove(struct platform_device *pdev)
4301 +{
4302 + struct device_node *np = pdev->dev.of_node;
4303 + struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev);
4304 + struct fsl_qdma_queue *queue_temp;
4305 + struct fsl_qdma_queue *status = fsl_qdma->status;
4306 + struct fsl_qdma_comp *comp_temp, *_comp_temp;
4307 + int i;
4308 +
4309 + of_dma_controller_free(np);
4310 + dma_async_device_unregister(&fsl_qdma->dma_dev);
4311 +
4312 + /* Free descriptor areas */
4313 + for (i = 0; i < fsl_qdma->n_queues; i++) {
4314 + queue_temp = fsl_qdma->queue + i;
4315 + list_for_each_entry_safe(comp_temp, _comp_temp,
4316 + &queue_temp->comp_used, list) {
4317 + dma_pool_free(queue_temp->comp_pool,
4318 + comp_temp->virt_addr,
4319 + comp_temp->bus_addr);
4320 + list_del(&comp_temp->list);
4321 + kfree(comp_temp);
4322 + }
4323 + list_for_each_entry_safe(comp_temp, _comp_temp,
4324 + &queue_temp->comp_free, list) {
4325 + dma_pool_free(queue_temp->comp_pool,
4326 + comp_temp->virt_addr,
4327 + comp_temp->bus_addr);
4328 + list_del(&comp_temp->list);
4329 + kfree(comp_temp);
4330 + }
4331 + dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_ccdf) *
4332 + queue_temp->n_cq, queue_temp->cq,
4333 + queue_temp->bus_addr);
4334 + dma_pool_destroy(queue_temp->comp_pool);
4335 + }
4336 +
4337 + dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_ccdf) *
4338 + status->n_cq, status->cq, status->bus_addr);
4339 + return 0;
4340 +}
4341 +
4342 +static const struct of_device_id fsl_qdma_dt_ids[] = {
4343 + { .compatible = "fsl,ls1021a-qdma", },
4344 + { /* sentinel */ }
4345 +};
4346 +MODULE_DEVICE_TABLE(of, fsl_qdma_dt_ids);
4347 +
4348 +static struct platform_driver fsl_qdma_driver = {
4349 + .driver = {
4350 + .name = "fsl-qdma",
4351 + .owner = THIS_MODULE,
4352 + .of_match_table = fsl_qdma_dt_ids,
4353 + },
4354 + .probe = fsl_qdma_probe,
4355 + .remove = fsl_qdma_remove,
4356 +};
4357 +
4358 +static int __init fsl_qdma_init(void)
4359 +{
4360 + return platform_driver_register(&fsl_qdma_driver);
4361 +}
4362 +subsys_initcall(fsl_qdma_init);
4363 +
4364 +static void __exit fsl_qdma_exit(void)
4365 +{
4366 + platform_driver_unregister(&fsl_qdma_driver);
4367 +}
4368 +module_exit(fsl_qdma_exit);
4369 +
4370 +MODULE_ALIAS("platform:fsl-qdma");
4371 +MODULE_DESCRIPTION("Freescale qDMA engine driver");
4372 +MODULE_LICENSE("GPL v2");