mediatek: mt7988a: sync dts compatible string
[openwrt/openwrt.git] / target / linux / layerscape / patches-4.14 / 711-dpaa-bqman-support-layerscape.patch
1 From 371e99a257cb714f9a6027d6571cb1a43855d926 Mon Sep 17 00:00:00 2001
2 From: Biwen Li <biwen.li@nxp.com>
3 Date: Wed, 17 Apr 2019 18:58:24 +0800
4 Subject: [PATCH] dpaa-bqman: support layerscape
5
6 This is an integrated patch of dpaa-bqman for layerscape
7
8 Signed-off-by: Biwen Li <biwen.li@nxp.com>
9 Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
10 Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
11 Signed-off-by: Madalin Bucur <madalin.bucur@nxp.com>
12 Signed-off-by: Roy Pledge <roy.pledge@nxp.com>
13 Signed-off-by: Stuart Yoder <stuart.yoder@nxp.com>
14 Signed-off-by: Valentin Rothberg <valentinrothberg@gmail.com>
15 ---
16 drivers/soc/fsl/qbman/Kconfig | 2 +-
17 drivers/soc/fsl/qbman/bman.c | 24 +++-
18 drivers/soc/fsl/qbman/bman_ccsr.c | 57 +++++++++-
19 drivers/soc/fsl/qbman/bman_portal.c | 44 ++++++--
20 drivers/soc/fsl/qbman/bman_priv.h | 3 +
21 drivers/soc/fsl/qbman/dpaa_sys.h | 8 +-
22 drivers/soc/fsl/qbman/qman.c | 46 +++++++-
23 drivers/soc/fsl/qbman/qman_ccsr.c | 168 +++++++++++++++++++++++-----
24 drivers/soc/fsl/qbman/qman_portal.c | 60 ++++++++--
25 drivers/soc/fsl/qbman/qman_priv.h | 5 +-
26 drivers/soc/fsl/qbman/qman_test.h | 2 -
27 include/soc/fsl/bman.h | 16 +++
28 include/soc/fsl/qman.h | 17 +++
29 13 files changed, 390 insertions(+), 62 deletions(-)
30
31 --- a/drivers/soc/fsl/qbman/Kconfig
32 +++ b/drivers/soc/fsl/qbman/Kconfig
33 @@ -1,6 +1,6 @@
34 menuconfig FSL_DPAA
35 bool "Freescale DPAA 1.x support"
36 - depends on FSL_SOC_BOOKE
37 + depends on (FSL_SOC_BOOKE || ARCH_LAYERSCAPE)
38 select GENERIC_ALLOCATOR
39 help
40 The Freescale Data Path Acceleration Architecture (DPAA) is a set of
41 --- a/drivers/soc/fsl/qbman/bman.c
42 +++ b/drivers/soc/fsl/qbman/bman.c
43 @@ -35,6 +35,27 @@
44
45 /* Portal register assists */
46
47 +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
48 +/* Cache-inhibited register offsets */
49 +#define BM_REG_RCR_PI_CINH 0x3000
50 +#define BM_REG_RCR_CI_CINH 0x3100
51 +#define BM_REG_RCR_ITR 0x3200
52 +#define BM_REG_CFG 0x3300
53 +#define BM_REG_SCN(n) (0x3400 + ((n) << 6))
54 +#define BM_REG_ISR 0x3e00
55 +#define BM_REG_IER 0x3e40
56 +#define BM_REG_ISDR 0x3e80
57 +#define BM_REG_IIR 0x3ec0
58 +
59 +/* Cache-enabled register offsets */
60 +#define BM_CL_CR 0x0000
61 +#define BM_CL_RR0 0x0100
62 +#define BM_CL_RR1 0x0140
63 +#define BM_CL_RCR 0x1000
64 +#define BM_CL_RCR_PI_CENA 0x3000
65 +#define BM_CL_RCR_CI_CENA 0x3100
66 +
67 +#else
68 /* Cache-inhibited register offsets */
69 #define BM_REG_RCR_PI_CINH 0x0000
70 #define BM_REG_RCR_CI_CINH 0x0004
71 @@ -53,6 +74,7 @@
72 #define BM_CL_RCR 0x1000
73 #define BM_CL_RCR_PI_CENA 0x3000
74 #define BM_CL_RCR_CI_CENA 0x3100
75 +#endif
76
77 /*
78 * Portal modes.
79 @@ -607,7 +629,7 @@ int bman_p_irqsource_add(struct bman_por
80 unsigned long irqflags;
81
82 local_irq_save(irqflags);
83 - set_bits(bits & BM_PIRQ_VISIBLE, &p->irq_sources);
84 + p->irq_sources |= bits & BM_PIRQ_VISIBLE;
85 bm_out(&p->p, BM_REG_IER, p->irq_sources);
86 local_irq_restore(irqflags);
87 return 0;
88 --- a/drivers/soc/fsl/qbman/bman_ccsr.c
89 +++ b/drivers/soc/fsl/qbman/bman_ccsr.c
90 @@ -29,6 +29,7 @@
91 */
92
93 #include "bman_priv.h"
94 +#include <linux/iommu.h>
95
96 u16 bman_ip_rev;
97 EXPORT_SYMBOL(bman_ip_rev);
98 @@ -120,6 +121,7 @@ static void bm_set_memory(u64 ba, u32 si
99 */
100 static dma_addr_t fbpr_a;
101 static size_t fbpr_sz;
102 +static int __bman_probed;
103
104 static int bman_fbpr(struct reserved_mem *rmem)
105 {
106 @@ -166,14 +168,24 @@ static irqreturn_t bman_isr(int irq, voi
107 return IRQ_HANDLED;
108 }
109
110 +int bman_is_probed(void)
111 +{
112 + return __bman_probed;
113 +}
114 +EXPORT_SYMBOL_GPL(bman_is_probed);
115 +
116 static int fsl_bman_probe(struct platform_device *pdev)
117 {
118 int ret, err_irq;
119 struct device *dev = &pdev->dev;
120 - struct device_node *node = dev->of_node;
121 + struct device_node *mem_node, *node = dev->of_node;
122 + struct iommu_domain *domain;
123 struct resource *res;
124 u16 id, bm_pool_cnt;
125 u8 major, minor;
126 + u64 size;
127 +
128 + __bman_probed = -1;
129
130 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
131 if (!res) {
132 @@ -201,6 +213,47 @@ static int fsl_bman_probe(struct platfor
133 return -ENODEV;
134 }
135
136 + /*
137 + * If FBPR memory wasn't defined using the qbman compatiable string
138 + * try using the of_reserved_mem_device method
139 + */
140 + if (!fbpr_a) {
141 + ret = of_reserved_mem_device_init(dev);
142 + if (ret) {
143 + dev_err(dev, "of_reserved_mem_device_init() failed 0x%x\n",
144 + ret);
145 + return -ENODEV;
146 + }
147 + mem_node = of_parse_phandle(dev->of_node, "memory-region", 0);
148 + if (mem_node) {
149 + ret = of_property_read_u64(mem_node, "size", &size);
150 + if (ret) {
151 + dev_err(dev, "FBPR: of_address_to_resource fails 0x%x\n",
152 + ret);
153 + return -ENODEV;
154 + }
155 + fbpr_sz = size;
156 + } else {
157 + dev_err(dev, "No memory-region found for FBPR\n");
158 + return -ENODEV;
159 + }
160 + if (!dma_zalloc_coherent(dev, fbpr_sz, &fbpr_a, 0)) {
161 + dev_err(dev, "Alloc FBPR memory failed\n");
162 + return -ENODEV;
163 + }
164 + }
165 +
166 + dev_dbg(dev, "Allocated FBPR 0x%llx 0x%zx\n", fbpr_a, fbpr_sz);
167 +
168 + /* Create an 1-to-1 iommu mapping for FBPR area */
169 + domain = iommu_get_domain_for_dev(dev);
170 + if (domain) {
171 + ret = iommu_map(domain, fbpr_a, fbpr_a, PAGE_ALIGN(fbpr_sz),
172 + IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
173 + if (ret)
174 + dev_warn(dev, "failed to iommu_map() %d\n", ret);
175 + }
176 +
177 bm_set_memory(fbpr_a, fbpr_sz);
178
179 err_irq = platform_get_irq(pdev, 0);
180 @@ -240,6 +293,8 @@ static int fsl_bman_probe(struct platfor
181 return ret;
182 }
183
184 + __bman_probed = 1;
185 +
186 return 0;
187 };
188
189 --- a/drivers/soc/fsl/qbman/bman_portal.c
190 +++ b/drivers/soc/fsl/qbman/bman_portal.c
191 @@ -32,6 +32,7 @@
192
193 static struct bman_portal *affine_bportals[NR_CPUS];
194 static struct cpumask portal_cpus;
195 +static int __bman_portals_probed;
196 /* protect bman global registers and global data shared among portals */
197 static DEFINE_SPINLOCK(bman_lock);
198
199 @@ -85,6 +86,12 @@ static int bman_online_cpu(unsigned int
200 return 0;
201 }
202
203 +int bman_portals_probed(void)
204 +{
205 + return __bman_portals_probed;
206 +}
207 +EXPORT_SYMBOL_GPL(bman_portals_probed);
208 +
209 static int bman_portal_probe(struct platform_device *pdev)
210 {
211 struct device *dev = &pdev->dev;
212 @@ -92,11 +99,21 @@ static int bman_portal_probe(struct plat
213 struct bm_portal_config *pcfg;
214 struct resource *addr_phys[2];
215 void __iomem *va;
216 - int irq, cpu;
217 + int irq, cpu, err;
218 +
219 + err = bman_is_probed();
220 + if (!err)
221 + return -EPROBE_DEFER;
222 + if (err < 0) {
223 + dev_err(&pdev->dev, "failing probe due to bman probe error\n");
224 + return -ENODEV;
225 + }
226
227 pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
228 - if (!pcfg)
229 + if (!pcfg) {
230 + __bman_portals_probed = -1;
231 return -ENOMEM;
232 + }
233
234 pcfg->dev = dev;
235
236 @@ -104,14 +121,14 @@ static int bman_portal_probe(struct plat
237 DPAA_PORTAL_CE);
238 if (!addr_phys[0]) {
239 dev_err(dev, "Can't get %pOF property 'reg::CE'\n", node);
240 - return -ENXIO;
241 + goto err_ioremap1;
242 }
243
244 addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
245 DPAA_PORTAL_CI);
246 if (!addr_phys[1]) {
247 dev_err(dev, "Can't get %pOF property 'reg::CI'\n", node);
248 - return -ENXIO;
249 + goto err_ioremap1;
250 }
251
252 pcfg->cpu = -1;
253 @@ -119,11 +136,18 @@ static int bman_portal_probe(struct plat
254 irq = platform_get_irq(pdev, 0);
255 if (irq <= 0) {
256 dev_err(dev, "Can't get %pOF IRQ'\n", node);
257 - return -ENXIO;
258 + goto err_ioremap1;
259 }
260 pcfg->irq = irq;
261
262 - va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
263 +#ifdef CONFIG_PPC
264 + /* PPC requires a cacheable/non-coherent mapping of the portal */
265 + va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]),
266 + (pgprot_val(PAGE_KERNEL) & ~_PAGE_COHERENT));
267 +#else
268 + /* For ARM we can use write combine mapping. */
269 + va = ioremap_wc(addr_phys[0]->start, resource_size(addr_phys[0]));
270 +#endif
271 if (!va) {
272 dev_err(dev, "ioremap::CE failed\n");
273 goto err_ioremap1;
274 @@ -131,8 +155,7 @@ static int bman_portal_probe(struct plat
275
276 pcfg->addr_virt[DPAA_PORTAL_CE] = va;
277
278 - va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
279 - _PAGE_GUARDED | _PAGE_NO_CACHE);
280 + va = ioremap(addr_phys[1]->start, resource_size(addr_phys[1]));
281 if (!va) {
282 dev_err(dev, "ioremap::CI failed\n");
283 goto err_ioremap2;
284 @@ -149,6 +172,9 @@ static int bman_portal_probe(struct plat
285 }
286
287 cpumask_set_cpu(cpu, &portal_cpus);
288 + if (!__bman_portals_probed &&
289 + cpumask_weight(&portal_cpus) == num_online_cpus())
290 + __bman_portals_probed = 1;
291 spin_unlock(&bman_lock);
292 pcfg->cpu = cpu;
293
294 @@ -168,6 +194,8 @@ err_portal_init:
295 err_ioremap2:
296 iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]);
297 err_ioremap1:
298 + __bman_portals_probed = -1;
299 +
300 return -ENXIO;
301 }
302
303 --- a/drivers/soc/fsl/qbman/bman_priv.h
304 +++ b/drivers/soc/fsl/qbman/bman_priv.h
305 @@ -33,6 +33,9 @@
306 #include "dpaa_sys.h"
307
308 #include <soc/fsl/bman.h>
309 +#include <linux/dma-contiguous.h>
310 +#include <linux/of_address.h>
311 +#include <linux/dma-mapping.h>
312
313 /* Portal processing (interrupt) sources */
314 #define BM_PIRQ_RCRI 0x00000002 /* RCR Ring (below threshold) */
315 --- a/drivers/soc/fsl/qbman/dpaa_sys.h
316 +++ b/drivers/soc/fsl/qbman/dpaa_sys.h
317 @@ -44,20 +44,18 @@
318 #include <linux/prefetch.h>
319 #include <linux/genalloc.h>
320 #include <asm/cacheflush.h>
321 +#include <linux/io.h>
322 +#include <linux/delay.h>
323
324 /* For 2-element tables related to cache-inhibited and cache-enabled mappings */
325 #define DPAA_PORTAL_CE 0
326 #define DPAA_PORTAL_CI 1
327
328 -#if (L1_CACHE_BYTES != 32) && (L1_CACHE_BYTES != 64)
329 -#error "Unsupported Cacheline Size"
330 -#endif
331 -
332 static inline void dpaa_flush(void *p)
333 {
334 #ifdef CONFIG_PPC
335 flush_dcache_range((unsigned long)p, (unsigned long)p+64);
336 -#elif defined(CONFIG_ARM32)
337 +#elif defined(CONFIG_ARM)
338 __cpuc_flush_dcache_area(p, 64);
339 #elif defined(CONFIG_ARM64)
340 __flush_dcache_area(p, 64);
341 --- a/drivers/soc/fsl/qbman/qman.c
342 +++ b/drivers/soc/fsl/qbman/qman.c
343 @@ -41,6 +41,43 @@
344
345 /* Portal register assists */
346
347 +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
348 +/* Cache-inhibited register offsets */
349 +#define QM_REG_EQCR_PI_CINH 0x3000
350 +#define QM_REG_EQCR_CI_CINH 0x3040
351 +#define QM_REG_EQCR_ITR 0x3080
352 +#define QM_REG_DQRR_PI_CINH 0x3100
353 +#define QM_REG_DQRR_CI_CINH 0x3140
354 +#define QM_REG_DQRR_ITR 0x3180
355 +#define QM_REG_DQRR_DCAP 0x31C0
356 +#define QM_REG_DQRR_SDQCR 0x3200
357 +#define QM_REG_DQRR_VDQCR 0x3240
358 +#define QM_REG_DQRR_PDQCR 0x3280
359 +#define QM_REG_MR_PI_CINH 0x3300
360 +#define QM_REG_MR_CI_CINH 0x3340
361 +#define QM_REG_MR_ITR 0x3380
362 +#define QM_REG_CFG 0x3500
363 +#define QM_REG_ISR 0x3600
364 +#define QM_REG_IER 0x3640
365 +#define QM_REG_ISDR 0x3680
366 +#define QM_REG_IIR 0x36C0
367 +#define QM_REG_ITPR 0x3740
368 +
369 +/* Cache-enabled register offsets */
370 +#define QM_CL_EQCR 0x0000
371 +#define QM_CL_DQRR 0x1000
372 +#define QM_CL_MR 0x2000
373 +#define QM_CL_EQCR_PI_CENA 0x3000
374 +#define QM_CL_EQCR_CI_CENA 0x3040
375 +#define QM_CL_DQRR_PI_CENA 0x3100
376 +#define QM_CL_DQRR_CI_CENA 0x3140
377 +#define QM_CL_MR_PI_CENA 0x3300
378 +#define QM_CL_MR_CI_CENA 0x3340
379 +#define QM_CL_CR 0x3800
380 +#define QM_CL_RR0 0x3900
381 +#define QM_CL_RR1 0x3940
382 +
383 +#else
384 /* Cache-inhibited register offsets */
385 #define QM_REG_EQCR_PI_CINH 0x0000
386 #define QM_REG_EQCR_CI_CINH 0x0004
387 @@ -75,6 +112,7 @@
388 #define QM_CL_CR 0x3800
389 #define QM_CL_RR0 0x3900
390 #define QM_CL_RR1 0x3940
391 +#endif
392
393 /*
394 * BTW, the drivers (and h/w programming model) already obtain the required
395 @@ -909,12 +947,12 @@ static inline int qm_mc_result_timeout(s
396
397 static inline void fq_set(struct qman_fq *fq, u32 mask)
398 {
399 - set_bits(mask, &fq->flags);
400 + fq->flags |= mask;
401 }
402
403 static inline void fq_clear(struct qman_fq *fq, u32 mask)
404 {
405 - clear_bits(mask, &fq->flags);
406 + fq->flags &= ~mask;
407 }
408
409 static inline int fq_isset(struct qman_fq *fq, u32 mask)
410 @@ -1567,7 +1605,7 @@ void qman_p_irqsource_add(struct qman_po
411 unsigned long irqflags;
412
413 local_irq_save(irqflags);
414 - set_bits(bits & QM_PIRQ_VISIBLE, &p->irq_sources);
415 + p->irq_sources |= bits & QM_PIRQ_VISIBLE;
416 qm_out(&p->p, QM_REG_IER, p->irq_sources);
417 local_irq_restore(irqflags);
418 }
419 @@ -1590,7 +1628,7 @@ void qman_p_irqsource_remove(struct qman
420 */
421 local_irq_save(irqflags);
422 bits &= QM_PIRQ_VISIBLE;
423 - clear_bits(bits, &p->irq_sources);
424 + p->irq_sources &= ~bits;
425 qm_out(&p->p, QM_REG_IER, p->irq_sources);
426 ier = qm_in(&p->p, QM_REG_IER);
427 /*
428 --- a/drivers/soc/fsl/qbman/qman_ccsr.c
429 +++ b/drivers/soc/fsl/qbman/qman_ccsr.c
430 @@ -29,6 +29,7 @@
431 */
432
433 #include "qman_priv.h"
434 +#include <linux/iommu.h>
435
436 u16 qman_ip_rev;
437 EXPORT_SYMBOL(qman_ip_rev);
438 @@ -273,6 +274,7 @@ static const struct qman_error_info_mdat
439 static u32 __iomem *qm_ccsr_start;
440 /* A SDQCR mask comprising all the available/visible pool channels */
441 static u32 qm_pools_sdqcr;
442 +static int __qman_probed;
443
444 static inline u32 qm_ccsr_in(u32 offset)
445 {
446 @@ -401,21 +403,42 @@ static int qm_init_pfdr(struct device *d
447 }
448
449 /*
450 - * Ideally we would use the DMA API to turn rmem->base into a DMA address
451 - * (especially if iommu translations ever get involved). Unfortunately, the
452 - * DMA API currently does not allow mapping anything that is not backed with
453 - * a struct page.
454 + * QMan needs two global memory areas initialized at boot time:
455 + * 1) FQD: Frame Queue Descriptors used to manage frame queues
456 + * 2) PFDR: Packed Frame Queue Descriptor Records used to store frames
457 + * Both areas are reserved using the device tree reserved memory framework
458 + * and the addresses and sizes are initialized when the QMan device is probed
459 */
460 static dma_addr_t fqd_a, pfdr_a;
461 static size_t fqd_sz, pfdr_sz;
462
463 +#ifdef CONFIG_PPC
464 +/*
465 + * Support for PPC Device Tree backward compatibility when compatiable
466 + * string is set to fsl-qman-fqd and fsl-qman-pfdr
467 + */
468 +static int zero_priv_mem(phys_addr_t addr, size_t sz)
469 +{
470 + /* map as cacheable, non-guarded */
471 + void __iomem *tmpp = ioremap_prot(addr, sz, 0);
472 +
473 + if (!tmpp)
474 + return -ENOMEM;
475 +
476 + memset_io(tmpp, 0, sz);
477 + flush_dcache_range((unsigned long)tmpp,
478 + (unsigned long)tmpp + sz);
479 + iounmap(tmpp);
480 +
481 + return 0;
482 +}
483 +
484 static int qman_fqd(struct reserved_mem *rmem)
485 {
486 fqd_a = rmem->base;
487 fqd_sz = rmem->size;
488
489 WARN_ON(!(fqd_a && fqd_sz));
490 -
491 return 0;
492 }
493 RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd);
494 @@ -431,32 +454,13 @@ static int qman_pfdr(struct reserved_mem
495 }
496 RESERVEDMEM_OF_DECLARE(qman_pfdr, "fsl,qman-pfdr", qman_pfdr);
497
498 +#endif
499 +
500 static unsigned int qm_get_fqid_maxcnt(void)
501 {
502 return fqd_sz / 64;
503 }
504
505 -/*
506 - * Flush this memory range from data cache so that QMAN originated
507 - * transactions for this memory region could be marked non-coherent.
508 - */
509 -static int zero_priv_mem(struct device *dev, struct device_node *node,
510 - phys_addr_t addr, size_t sz)
511 -{
512 - /* map as cacheable, non-guarded */
513 - void __iomem *tmpp = ioremap_prot(addr, sz, 0);
514 -
515 - if (!tmpp)
516 - return -ENOMEM;
517 -
518 - memset_io(tmpp, 0, sz);
519 - flush_dcache_range((unsigned long)tmpp,
520 - (unsigned long)tmpp + sz);
521 - iounmap(tmpp);
522 -
523 - return 0;
524 -}
525 -
526 static void log_edata_bits(struct device *dev, u32 bit_count)
527 {
528 u32 i, j, mask = 0xffffffff;
529 @@ -595,6 +599,7 @@ static int qman_init_ccsr(struct device
530 #define LIO_CFG_LIODN_MASK 0x0fff0000
531 void qman_liodn_fixup(u16 channel)
532 {
533 +#ifdef CONFIG_PPC
534 static int done;
535 static u32 liodn_offset;
536 u32 before, after;
537 @@ -614,6 +619,7 @@ void qman_liodn_fixup(u16 channel)
538 qm_ccsr_out(REG_REV3_QCSP_LIO_CFG(idx), after);
539 else
540 qm_ccsr_out(REG_QCSP_LIO_CFG(idx), after);
541 +#endif
542 }
543
544 #define IO_CFG_SDEST_MASK 0x00ff0000
545 @@ -684,14 +690,24 @@ static int qman_resource_init(struct dev
546 return 0;
547 }
548
549 +int qman_is_probed(void)
550 +{
551 + return __qman_probed;
552 +}
553 +EXPORT_SYMBOL_GPL(qman_is_probed);
554 +
555 static int fsl_qman_probe(struct platform_device *pdev)
556 {
557 struct device *dev = &pdev->dev;
558 - struct device_node *node = dev->of_node;
559 + struct device_node *mem_node, *node = dev->of_node;
560 + struct iommu_domain *domain;
561 struct resource *res;
562 int ret, err_irq;
563 u16 id;
564 u8 major, minor;
565 + u64 size;
566 +
567 + __qman_probed = -1;
568
569 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
570 if (!res) {
571 @@ -717,6 +733,8 @@ static int fsl_qman_probe(struct platfor
572 qman_ip_rev = QMAN_REV30;
573 else if (major == 3 && minor == 1)
574 qman_ip_rev = QMAN_REV31;
575 + else if (major == 3 && minor == 2)
576 + qman_ip_rev = QMAN_REV32;
577 else {
578 dev_err(dev, "Unknown QMan version\n");
579 return -ENODEV;
580 @@ -727,10 +745,96 @@ static int fsl_qman_probe(struct platfor
581 qm_channel_caam = QMAN_CHANNEL_CAAM_REV3;
582 }
583
584 - ret = zero_priv_mem(dev, node, fqd_a, fqd_sz);
585 - WARN_ON(ret);
586 - if (ret)
587 - return -ENODEV;
588 + if (fqd_a) {
589 +#ifdef CONFIG_PPC
590 + /*
591 + * For PPC backward DT compatibility
592 + * FQD memory MUST be zero'd by software
593 + */
594 + zero_priv_mem(fqd_a, fqd_sz);
595 +#else
596 + WARN(1, "Unexpected archiceture using non shared-dma-mem reservations");
597 +#endif
598 + } else {
599 + /*
600 + * Order of memory regions is assumed as FQD followed by PFDR
601 + * in order to ensure allocations from the correct regions the
602 + * driver initializes then allocates each piece in order
603 + */
604 + ret = of_reserved_mem_device_init_by_idx(dev, dev->of_node, 0);
605 + if (ret) {
606 + dev_err(dev, "of_reserved_mem_device_init_by_idx(0) failed 0x%x\n",
607 + ret);
608 + return -ENODEV;
609 + }
610 + mem_node = of_parse_phandle(dev->of_node, "memory-region", 0);
611 + if (mem_node) {
612 + ret = of_property_read_u64(mem_node, "size", &size);
613 + if (ret) {
614 + dev_err(dev, "FQD: of_address_to_resource fails 0x%x\n",
615 + ret);
616 + return -ENODEV;
617 + }
618 + fqd_sz = size;
619 + } else {
620 + dev_err(dev, "No memory-region found for FQD\n");
621 + return -ENODEV;
622 + }
623 + if (!dma_zalloc_coherent(dev, fqd_sz, &fqd_a, 0)) {
624 + dev_err(dev, "Alloc FQD memory failed\n");
625 + return -ENODEV;
626 + }
627 +
628 + /*
629 + * Disassociate the FQD reserved memory area from the device
630 + * because a device can only have one DMA memory area. This
631 + * should be fine since the memory is allocated and initialized
632 + * and only ever accessed by the QMan device from now on
633 + */
634 + of_reserved_mem_device_release(dev);
635 + }
636 + dev_dbg(dev, "Allocated FQD 0x%llx 0x%zx\n", fqd_a, fqd_sz);
637 +
638 + if (!pfdr_a) {
639 + /* Setup PFDR memory */
640 + ret = of_reserved_mem_device_init_by_idx(dev, dev->of_node, 1);
641 + if (ret) {
642 + dev_err(dev, "of_reserved_mem_device_init(1) failed 0x%x\n",
643 + ret);
644 + return -ENODEV;
645 + }
646 + mem_node = of_parse_phandle(dev->of_node, "memory-region", 1);
647 + if (mem_node) {
648 + ret = of_property_read_u64(mem_node, "size", &size);
649 + if (ret) {
650 + dev_err(dev, "PFDR: of_address_to_resource fails 0x%x\n",
651 + ret);
652 + return -ENODEV;
653 + }
654 + pfdr_sz = size;
655 + } else {
656 + dev_err(dev, "No memory-region found for PFDR\n");
657 + return -ENODEV;
658 + }
659 + if (!dma_zalloc_coherent(dev, pfdr_sz, &pfdr_a, 0)) {
660 + dev_err(dev, "Alloc PFDR Failed size 0x%zx\n", pfdr_sz);
661 + return -ENODEV;
662 + }
663 + }
664 + dev_info(dev, "Allocated PFDR 0x%llx 0x%zx\n", pfdr_a, pfdr_sz);
665 +
666 + /* Create an 1-to-1 iommu mapping for fqd and pfdr areas */
667 + domain = iommu_get_domain_for_dev(dev);
668 + if (domain) {
669 + ret = iommu_map(domain, fqd_a, fqd_a, PAGE_ALIGN(fqd_sz),
670 + IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
671 + if (ret)
672 + dev_warn(dev, "iommu_map(fqd) failed %d\n", ret);
673 + ret = iommu_map(domain, pfdr_a, pfdr_a, PAGE_ALIGN(pfdr_sz),
674 + IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
675 + if (ret)
676 + dev_warn(dev, "iommu_map(pfdr) failed %d\n", ret);
677 + }
678
679 ret = qman_init_ccsr(dev);
680 if (ret) {
681 @@ -793,6 +897,8 @@ static int fsl_qman_probe(struct platfor
682 if (ret)
683 return ret;
684
685 + __qman_probed = 1;
686 +
687 return 0;
688 }
689
690 --- a/drivers/soc/fsl/qbman/qman_portal.c
691 +++ b/drivers/soc/fsl/qbman/qman_portal.c
692 @@ -29,6 +29,7 @@
693 */
694
695 #include "qman_priv.h"
696 +#include <linux/iommu.h>
697
698 struct qman_portal *qman_dma_portal;
699 EXPORT_SYMBOL(qman_dma_portal);
700 @@ -38,6 +39,7 @@ EXPORT_SYMBOL(qman_dma_portal);
701 #define CONFIG_FSL_DPA_PIRQ_FAST 1
702
703 static struct cpumask portal_cpus;
704 +static int __qman_portals_probed;
705 /* protect qman global registers and global data shared among portals */
706 static DEFINE_SPINLOCK(qman_lock);
707
708 @@ -218,19 +220,36 @@ static int qman_online_cpu(unsigned int
709 return 0;
710 }
711
712 +int qman_portals_probed(void)
713 +{
714 + return __qman_portals_probed;
715 +}
716 +EXPORT_SYMBOL_GPL(qman_portals_probed);
717 +
718 static int qman_portal_probe(struct platform_device *pdev)
719 {
720 struct device *dev = &pdev->dev;
721 struct device_node *node = dev->of_node;
722 + struct iommu_domain *domain;
723 struct qm_portal_config *pcfg;
724 struct resource *addr_phys[2];
725 void __iomem *va;
726 int irq, cpu, err;
727 u32 val;
728
729 + err = qman_is_probed();
730 + if (!err)
731 + return -EPROBE_DEFER;
732 + if (err < 0) {
733 + dev_err(&pdev->dev, "failing probe due to qman probe error\n");
734 + return -ENODEV;
735 + }
736 +
737 pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
738 - if (!pcfg)
739 + if (!pcfg) {
740 + __qman_portals_probed = -1;
741 return -ENOMEM;
742 + }
743
744 pcfg->dev = dev;
745
746 @@ -238,19 +257,20 @@ static int qman_portal_probe(struct plat
747 DPAA_PORTAL_CE);
748 if (!addr_phys[0]) {
749 dev_err(dev, "Can't get %pOF property 'reg::CE'\n", node);
750 - return -ENXIO;
751 + goto err_ioremap1;
752 }
753
754 addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
755 DPAA_PORTAL_CI);
756 if (!addr_phys[1]) {
757 dev_err(dev, "Can't get %pOF property 'reg::CI'\n", node);
758 - return -ENXIO;
759 + goto err_ioremap1;
760 }
761
762 err = of_property_read_u32(node, "cell-index", &val);
763 if (err) {
764 dev_err(dev, "Can't get %pOF property 'cell-index'\n", node);
765 + __qman_portals_probed = -1;
766 return err;
767 }
768 pcfg->channel = val;
769 @@ -258,11 +278,18 @@ static int qman_portal_probe(struct plat
770 irq = platform_get_irq(pdev, 0);
771 if (irq <= 0) {
772 dev_err(dev, "Can't get %pOF IRQ\n", node);
773 - return -ENXIO;
774 + goto err_ioremap1;
775 }
776 pcfg->irq = irq;
777
778 - va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
779 +#ifdef CONFIG_PPC
780 + /* PPC requires a cacheable/non-coherent mapping of the portal */
781 + va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]),
782 + (pgprot_val(PAGE_KERNEL) & ~_PAGE_COHERENT));
783 +#else
784 + /* For ARM we can use write combine mapping. */
785 + va = ioremap_wc(addr_phys[0]->start, resource_size(addr_phys[0]));
786 +#endif
787 if (!va) {
788 dev_err(dev, "ioremap::CE failed\n");
789 goto err_ioremap1;
790 @@ -270,8 +297,7 @@ static int qman_portal_probe(struct plat
791
792 pcfg->addr_virt[DPAA_PORTAL_CE] = va;
793
794 - va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
795 - _PAGE_GUARDED | _PAGE_NO_CACHE);
796 + va = ioremap(addr_phys[1]->start, resource_size(addr_phys[1]));
797 if (!va) {
798 dev_err(dev, "ioremap::CI failed\n");
799 goto err_ioremap2;
800 @@ -279,6 +305,21 @@ static int qman_portal_probe(struct plat
801
802 pcfg->addr_virt[DPAA_PORTAL_CI] = va;
803
804 + /* Create an 1-to-1 iommu mapping for cena portal area */
805 + domain = iommu_get_domain_for_dev(dev);
806 + if (domain) {
807 + /*
808 + * Note: not mapping this as cacheable triggers the infamous
809 + * QMan CIDE error.
810 + */
811 + err = iommu_map(domain,
812 + addr_phys[0]->start, addr_phys[0]->start,
813 + PAGE_ALIGN(resource_size(addr_phys[0])),
814 + IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
815 + if (err)
816 + dev_warn(dev, "failed to iommu_map() %d\n", err);
817 + }
818 +
819 pcfg->pools = qm_get_pools_sdqcr();
820
821 spin_lock(&qman_lock);
822 @@ -290,6 +331,9 @@ static int qman_portal_probe(struct plat
823 }
824
825 cpumask_set_cpu(cpu, &portal_cpus);
826 + if (!__qman_portals_probed &&
827 + cpumask_weight(&portal_cpus) == num_online_cpus())
828 + __qman_portals_probed = 1;
829 spin_unlock(&qman_lock);
830 pcfg->cpu = cpu;
831
832 @@ -314,6 +358,8 @@ err_portal_init:
833 err_ioremap2:
834 iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]);
835 err_ioremap1:
836 + __qman_portals_probed = -1;
837 +
838 return -ENXIO;
839 }
840
841 --- a/drivers/soc/fsl/qbman/qman_priv.h
842 +++ b/drivers/soc/fsl/qbman/qman_priv.h
843 @@ -28,13 +28,13 @@
844 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
845 */
846
847 -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
848 -
849 #include "dpaa_sys.h"
850
851 #include <soc/fsl/qman.h>
852 #include <linux/dma-mapping.h>
853 #include <linux/iommu.h>
854 +#include <linux/dma-contiguous.h>
855 +#include <linux/of_address.h>
856
857 #if defined(CONFIG_FSL_PAMU)
858 #include <asm/fsl_pamu_stash.h>
859 @@ -187,6 +187,7 @@ struct qm_portal_config {
860 #define QMAN_REV20 0x0200
861 #define QMAN_REV30 0x0300
862 #define QMAN_REV31 0x0301
863 +#define QMAN_REV32 0x0302
864 extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */
865
866 #define QM_FQID_RANGE_START 1 /* FQID 0 reserved for internal use */
867 --- a/drivers/soc/fsl/qbman/qman_test.h
868 +++ b/drivers/soc/fsl/qbman/qman_test.h
869 @@ -30,7 +30,5 @@
870
871 #include "qman_priv.h"
872
873 -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
874 -
875 int qman_test_stash(void);
876 int qman_test_api(void);
877 --- a/include/soc/fsl/bman.h
878 +++ b/include/soc/fsl/bman.h
879 @@ -126,4 +126,20 @@ int bman_release(struct bman_pool *pool,
880 */
881 int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num);
882
883 +/**
884 + * bman_is_probed - Check if bman is probed
885 + *
886 + * Returns 1 if the bman driver successfully probed, -1 if the bman driver
887 + * failed to probe or 0 if the bman driver did not probed yet.
888 + */
889 +int bman_is_probed(void);
890 +/**
891 + * bman_portals_probed - Check if all cpu bound bman portals are probed
892 + *
893 + * Returns 1 if all the required cpu bound bman portals successfully probed,
894 + * -1 if probe errors appeared or 0 if the bman portals did not yet finished
895 + * probing.
896 + */
897 +int bman_portals_probed(void);
898 +
899 #endif /* __FSL_BMAN_H */
900 --- a/include/soc/fsl/qman.h
901 +++ b/include/soc/fsl/qman.h
902 @@ -1186,4 +1186,21 @@ int qman_alloc_cgrid_range(u32 *result,
903 */
904 int qman_release_cgrid(u32 id);
905
906 +/**
907 + * qman_is_probed - Check if qman is probed
908 + *
909 + * Returns 1 if the qman driver successfully probed, -1 if the qman driver
910 + * failed to probe or 0 if the qman driver did not probed yet.
911 + */
912 +int qman_is_probed(void);
913 +
914 +/**
915 + * qman_portals_probed - Check if all cpu bound qman portals are probed
916 + *
917 + * Returns 1 if all the required cpu bound qman portals successfully probed,
918 + * -1 if probe errors appeared or 0 if the qman portals did not yet finished
919 + * probing.
920 + */
921 +int qman_portals_probed(void);
922 +
923 #endif /* __FSL_QMAN_H */