1 From 371e99a257cb714f9a6027d6571cb1a43855d926 Mon Sep 17 00:00:00 2001
2 From: Biwen Li <biwen.li@nxp.com>
3 Date: Wed, 17 Apr 2019 18:58:24 +0800
4 Subject: [PATCH] dpaa-bqman: support layerscape
6 This is an integrated patch of dpaa-bqman for layerscape
8 Signed-off-by: Biwen Li <biwen.li@nxp.com>
9 Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
10 Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
11 Signed-off-by: Madalin Bucur <madalin.bucur@nxp.com>
12 Signed-off-by: Roy Pledge <roy.pledge@nxp.com>
13 Signed-off-by: Stuart Yoder <stuart.yoder@nxp.com>
14 Signed-off-by: Valentin Rothberg <valentinrothberg@gmail.com>
16 drivers/soc/fsl/qbman/Kconfig | 2 +-
17 drivers/soc/fsl/qbman/bman.c | 24 +++-
18 drivers/soc/fsl/qbman/bman_ccsr.c | 57 +++++++++-
19 drivers/soc/fsl/qbman/bman_portal.c | 44 ++++++--
20 drivers/soc/fsl/qbman/bman_priv.h | 3 +
21 drivers/soc/fsl/qbman/dpaa_sys.h | 8 +-
22 drivers/soc/fsl/qbman/qman.c | 46 +++++++-
23 drivers/soc/fsl/qbman/qman_ccsr.c | 168 +++++++++++++++++++++++-----
24 drivers/soc/fsl/qbman/qman_portal.c | 60 ++++++++--
25 drivers/soc/fsl/qbman/qman_priv.h | 5 +-
26 drivers/soc/fsl/qbman/qman_test.h | 2 -
27 include/soc/fsl/bman.h | 16 +++
28 include/soc/fsl/qman.h | 17 +++
29 13 files changed, 390 insertions(+), 62 deletions(-)
31 --- a/drivers/soc/fsl/qbman/Kconfig
32 +++ b/drivers/soc/fsl/qbman/Kconfig
35 bool "Freescale DPAA 1.x support"
36 - depends on FSL_SOC_BOOKE
37 + depends on (FSL_SOC_BOOKE || ARCH_LAYERSCAPE)
38 select GENERIC_ALLOCATOR
40 The Freescale Data Path Acceleration Architecture (DPAA) is a set of
41 --- a/drivers/soc/fsl/qbman/bman.c
42 +++ b/drivers/soc/fsl/qbman/bman.c
45 /* Portal register assists */
47 +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
48 +/* Cache-inhibited register offsets */
49 +#define BM_REG_RCR_PI_CINH 0x3000
50 +#define BM_REG_RCR_CI_CINH 0x3100
51 +#define BM_REG_RCR_ITR 0x3200
52 +#define BM_REG_CFG 0x3300
53 +#define BM_REG_SCN(n) (0x3400 + ((n) << 6))
54 +#define BM_REG_ISR 0x3e00
55 +#define BM_REG_IER 0x3e40
56 +#define BM_REG_ISDR 0x3e80
57 +#define BM_REG_IIR 0x3ec0
59 +/* Cache-enabled register offsets */
60 +#define BM_CL_CR 0x0000
61 +#define BM_CL_RR0 0x0100
62 +#define BM_CL_RR1 0x0140
63 +#define BM_CL_RCR 0x1000
64 +#define BM_CL_RCR_PI_CENA 0x3000
65 +#define BM_CL_RCR_CI_CENA 0x3100
68 /* Cache-inhibited register offsets */
69 #define BM_REG_RCR_PI_CINH 0x0000
70 #define BM_REG_RCR_CI_CINH 0x0004
72 #define BM_CL_RCR 0x1000
73 #define BM_CL_RCR_PI_CENA 0x3000
74 #define BM_CL_RCR_CI_CENA 0x3100
79 @@ -607,7 +629,7 @@ int bman_p_irqsource_add(struct bman_por
80 unsigned long irqflags;
82 local_irq_save(irqflags);
83 - set_bits(bits & BM_PIRQ_VISIBLE, &p->irq_sources);
84 + p->irq_sources |= bits & BM_PIRQ_VISIBLE;
85 bm_out(&p->p, BM_REG_IER, p->irq_sources);
86 local_irq_restore(irqflags);
88 --- a/drivers/soc/fsl/qbman/bman_ccsr.c
89 +++ b/drivers/soc/fsl/qbman/bman_ccsr.c
93 #include "bman_priv.h"
94 +#include <linux/iommu.h>
97 EXPORT_SYMBOL(bman_ip_rev);
98 @@ -120,6 +121,7 @@ static void bm_set_memory(u64 ba, u32 si
100 static dma_addr_t fbpr_a;
101 static size_t fbpr_sz;
102 +static int __bman_probed;
104 static int bman_fbpr(struct reserved_mem *rmem)
106 @@ -166,14 +168,24 @@ static irqreturn_t bman_isr(int irq, voi
110 +int bman_is_probed(void)
112 + return __bman_probed;
114 +EXPORT_SYMBOL_GPL(bman_is_probed);
116 static int fsl_bman_probe(struct platform_device *pdev)
119 struct device *dev = &pdev->dev;
120 - struct device_node *node = dev->of_node;
121 + struct device_node *mem_node, *node = dev->of_node;
122 + struct iommu_domain *domain;
123 struct resource *res;
128 + __bman_probed = -1;
130 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
132 @@ -201,6 +213,47 @@ static int fsl_bman_probe(struct platfor
137 + * If FBPR memory wasn't defined using the qbman compatiable string
138 + * try using the of_reserved_mem_device method
141 + ret = of_reserved_mem_device_init(dev);
143 + dev_err(dev, "of_reserved_mem_device_init() failed 0x%x\n",
147 + mem_node = of_parse_phandle(dev->of_node, "memory-region", 0);
149 + ret = of_property_read_u64(mem_node, "size", &size);
151 + dev_err(dev, "FBPR: of_address_to_resource fails 0x%x\n",
157 + dev_err(dev, "No memory-region found for FBPR\n");
160 + if (!dma_zalloc_coherent(dev, fbpr_sz, &fbpr_a, 0)) {
161 + dev_err(dev, "Alloc FBPR memory failed\n");
166 + dev_dbg(dev, "Allocated FBPR 0x%llx 0x%zx\n", fbpr_a, fbpr_sz);
168 + /* Create an 1-to-1 iommu mapping for FBPR area */
169 + domain = iommu_get_domain_for_dev(dev);
171 + ret = iommu_map(domain, fbpr_a, fbpr_a, PAGE_ALIGN(fbpr_sz),
172 + IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
174 + dev_warn(dev, "failed to iommu_map() %d\n", ret);
177 bm_set_memory(fbpr_a, fbpr_sz);
179 err_irq = platform_get_irq(pdev, 0);
180 @@ -240,6 +293,8 @@ static int fsl_bman_probe(struct platfor
189 --- a/drivers/soc/fsl/qbman/bman_portal.c
190 +++ b/drivers/soc/fsl/qbman/bman_portal.c
193 static struct bman_portal *affine_bportals[NR_CPUS];
194 static struct cpumask portal_cpus;
195 +static int __bman_portals_probed;
196 /* protect bman global registers and global data shared among portals */
197 static DEFINE_SPINLOCK(bman_lock);
199 @@ -85,6 +86,12 @@ static int bman_online_cpu(unsigned int
203 +int bman_portals_probed(void)
205 + return __bman_portals_probed;
207 +EXPORT_SYMBOL_GPL(bman_portals_probed);
209 static int bman_portal_probe(struct platform_device *pdev)
211 struct device *dev = &pdev->dev;
212 @@ -92,11 +99,21 @@ static int bman_portal_probe(struct plat
213 struct bm_portal_config *pcfg;
214 struct resource *addr_phys[2];
219 + err = bman_is_probed();
221 + return -EPROBE_DEFER;
223 + dev_err(&pdev->dev, "failing probe due to bman probe error\n");
227 pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
230 + __bman_portals_probed = -1;
236 @@ -104,14 +121,14 @@ static int bman_portal_probe(struct plat
239 dev_err(dev, "Can't get %pOF property 'reg::CE'\n", node);
244 addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
247 dev_err(dev, "Can't get %pOF property 'reg::CI'\n", node);
253 @@ -119,11 +136,18 @@ static int bman_portal_probe(struct plat
254 irq = platform_get_irq(pdev, 0);
256 dev_err(dev, "Can't get %pOF IRQ'\n", node);
262 - va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
264 + /* PPC requires a cacheable/non-coherent mapping of the portal */
265 + va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]),
266 + (pgprot_val(PAGE_KERNEL) & ~_PAGE_COHERENT));
268 + /* For ARM we can use write combine mapping. */
269 + va = ioremap_wc(addr_phys[0]->start, resource_size(addr_phys[0]));
272 dev_err(dev, "ioremap::CE failed\n");
274 @@ -131,8 +155,7 @@ static int bman_portal_probe(struct plat
276 pcfg->addr_virt[DPAA_PORTAL_CE] = va;
278 - va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
279 - _PAGE_GUARDED | _PAGE_NO_CACHE);
280 + va = ioremap(addr_phys[1]->start, resource_size(addr_phys[1]));
282 dev_err(dev, "ioremap::CI failed\n");
284 @@ -149,6 +172,9 @@ static int bman_portal_probe(struct plat
287 cpumask_set_cpu(cpu, &portal_cpus);
288 + if (!__bman_portals_probed &&
289 + cpumask_weight(&portal_cpus) == num_online_cpus())
290 + __bman_portals_probed = 1;
291 spin_unlock(&bman_lock);
294 @@ -168,6 +194,8 @@ err_portal_init:
296 iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]);
298 + __bman_portals_probed = -1;
303 --- a/drivers/soc/fsl/qbman/bman_priv.h
304 +++ b/drivers/soc/fsl/qbman/bman_priv.h
306 #include "dpaa_sys.h"
308 #include <soc/fsl/bman.h>
309 +#include <linux/dma-contiguous.h>
310 +#include <linux/of_address.h>
311 +#include <linux/dma-mapping.h>
313 /* Portal processing (interrupt) sources */
314 #define BM_PIRQ_RCRI 0x00000002 /* RCR Ring (below threshold) */
315 --- a/drivers/soc/fsl/qbman/dpaa_sys.h
316 +++ b/drivers/soc/fsl/qbman/dpaa_sys.h
318 #include <linux/prefetch.h>
319 #include <linux/genalloc.h>
320 #include <asm/cacheflush.h>
321 +#include <linux/io.h>
322 +#include <linux/delay.h>
324 /* For 2-element tables related to cache-inhibited and cache-enabled mappings */
325 #define DPAA_PORTAL_CE 0
326 #define DPAA_PORTAL_CI 1
328 -#if (L1_CACHE_BYTES != 32) && (L1_CACHE_BYTES != 64)
329 -#error "Unsupported Cacheline Size"
332 static inline void dpaa_flush(void *p)
335 flush_dcache_range((unsigned long)p, (unsigned long)p+64);
336 -#elif defined(CONFIG_ARM32)
337 +#elif defined(CONFIG_ARM)
338 __cpuc_flush_dcache_area(p, 64);
339 #elif defined(CONFIG_ARM64)
340 __flush_dcache_area(p, 64);
341 --- a/drivers/soc/fsl/qbman/qman.c
342 +++ b/drivers/soc/fsl/qbman/qman.c
345 /* Portal register assists */
347 +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
348 +/* Cache-inhibited register offsets */
349 +#define QM_REG_EQCR_PI_CINH 0x3000
350 +#define QM_REG_EQCR_CI_CINH 0x3040
351 +#define QM_REG_EQCR_ITR 0x3080
352 +#define QM_REG_DQRR_PI_CINH 0x3100
353 +#define QM_REG_DQRR_CI_CINH 0x3140
354 +#define QM_REG_DQRR_ITR 0x3180
355 +#define QM_REG_DQRR_DCAP 0x31C0
356 +#define QM_REG_DQRR_SDQCR 0x3200
357 +#define QM_REG_DQRR_VDQCR 0x3240
358 +#define QM_REG_DQRR_PDQCR 0x3280
359 +#define QM_REG_MR_PI_CINH 0x3300
360 +#define QM_REG_MR_CI_CINH 0x3340
361 +#define QM_REG_MR_ITR 0x3380
362 +#define QM_REG_CFG 0x3500
363 +#define QM_REG_ISR 0x3600
364 +#define QM_REG_IER 0x3640
365 +#define QM_REG_ISDR 0x3680
366 +#define QM_REG_IIR 0x36C0
367 +#define QM_REG_ITPR 0x3740
369 +/* Cache-enabled register offsets */
370 +#define QM_CL_EQCR 0x0000
371 +#define QM_CL_DQRR 0x1000
372 +#define QM_CL_MR 0x2000
373 +#define QM_CL_EQCR_PI_CENA 0x3000
374 +#define QM_CL_EQCR_CI_CENA 0x3040
375 +#define QM_CL_DQRR_PI_CENA 0x3100
376 +#define QM_CL_DQRR_CI_CENA 0x3140
377 +#define QM_CL_MR_PI_CENA 0x3300
378 +#define QM_CL_MR_CI_CENA 0x3340
379 +#define QM_CL_CR 0x3800
380 +#define QM_CL_RR0 0x3900
381 +#define QM_CL_RR1 0x3940
384 /* Cache-inhibited register offsets */
385 #define QM_REG_EQCR_PI_CINH 0x0000
386 #define QM_REG_EQCR_CI_CINH 0x0004
388 #define QM_CL_CR 0x3800
389 #define QM_CL_RR0 0x3900
390 #define QM_CL_RR1 0x3940
394 * BTW, the drivers (and h/w programming model) already obtain the required
395 @@ -909,12 +947,12 @@ static inline int qm_mc_result_timeout(s
397 static inline void fq_set(struct qman_fq *fq, u32 mask)
399 - set_bits(mask, &fq->flags);
403 static inline void fq_clear(struct qman_fq *fq, u32 mask)
405 - clear_bits(mask, &fq->flags);
406 + fq->flags &= ~mask;
409 static inline int fq_isset(struct qman_fq *fq, u32 mask)
410 @@ -1567,7 +1605,7 @@ void qman_p_irqsource_add(struct qman_po
411 unsigned long irqflags;
413 local_irq_save(irqflags);
414 - set_bits(bits & QM_PIRQ_VISIBLE, &p->irq_sources);
415 + p->irq_sources |= bits & QM_PIRQ_VISIBLE;
416 qm_out(&p->p, QM_REG_IER, p->irq_sources);
417 local_irq_restore(irqflags);
419 @@ -1590,7 +1628,7 @@ void qman_p_irqsource_remove(struct qman
421 local_irq_save(irqflags);
422 bits &= QM_PIRQ_VISIBLE;
423 - clear_bits(bits, &p->irq_sources);
424 + p->irq_sources &= ~bits;
425 qm_out(&p->p, QM_REG_IER, p->irq_sources);
426 ier = qm_in(&p->p, QM_REG_IER);
428 --- a/drivers/soc/fsl/qbman/qman_ccsr.c
429 +++ b/drivers/soc/fsl/qbman/qman_ccsr.c
433 #include "qman_priv.h"
434 +#include <linux/iommu.h>
437 EXPORT_SYMBOL(qman_ip_rev);
438 @@ -273,6 +274,7 @@ static const struct qman_error_info_mdat
439 static u32 __iomem *qm_ccsr_start;
440 /* A SDQCR mask comprising all the available/visible pool channels */
441 static u32 qm_pools_sdqcr;
442 +static int __qman_probed;
444 static inline u32 qm_ccsr_in(u32 offset)
446 @@ -401,21 +403,42 @@ static int qm_init_pfdr(struct device *d
450 - * Ideally we would use the DMA API to turn rmem->base into a DMA address
451 - * (especially if iommu translations ever get involved). Unfortunately, the
452 - * DMA API currently does not allow mapping anything that is not backed with
454 + * QMan needs two global memory areas initialized at boot time:
455 + * 1) FQD: Frame Queue Descriptors used to manage frame queues
456 + * 2) PFDR: Packed Frame Queue Descriptor Records used to store frames
457 + * Both areas are reserved using the device tree reserved memory framework
458 + * and the addresses and sizes are initialized when the QMan device is probed
460 static dma_addr_t fqd_a, pfdr_a;
461 static size_t fqd_sz, pfdr_sz;
465 + * Support for PPC Device Tree backward compatibility when compatiable
466 + * string is set to fsl-qman-fqd and fsl-qman-pfdr
468 +static int zero_priv_mem(phys_addr_t addr, size_t sz)
470 + /* map as cacheable, non-guarded */
471 + void __iomem *tmpp = ioremap_prot(addr, sz, 0);
476 + memset_io(tmpp, 0, sz);
477 + flush_dcache_range((unsigned long)tmpp,
478 + (unsigned long)tmpp + sz);
484 static int qman_fqd(struct reserved_mem *rmem)
489 WARN_ON(!(fqd_a && fqd_sz));
493 RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd);
494 @@ -431,32 +454,13 @@ static int qman_pfdr(struct reserved_mem
496 RESERVEDMEM_OF_DECLARE(qman_pfdr, "fsl,qman-pfdr", qman_pfdr);
500 static unsigned int qm_get_fqid_maxcnt(void)
506 - * Flush this memory range from data cache so that QMAN originated
507 - * transactions for this memory region could be marked non-coherent.
509 -static int zero_priv_mem(struct device *dev, struct device_node *node,
510 - phys_addr_t addr, size_t sz)
512 - /* map as cacheable, non-guarded */
513 - void __iomem *tmpp = ioremap_prot(addr, sz, 0);
518 - memset_io(tmpp, 0, sz);
519 - flush_dcache_range((unsigned long)tmpp,
520 - (unsigned long)tmpp + sz);
526 static void log_edata_bits(struct device *dev, u32 bit_count)
528 u32 i, j, mask = 0xffffffff;
529 @@ -595,6 +599,7 @@ static int qman_init_ccsr(struct device
530 #define LIO_CFG_LIODN_MASK 0x0fff0000
531 void qman_liodn_fixup(u16 channel)
535 static u32 liodn_offset;
537 @@ -614,6 +619,7 @@ void qman_liodn_fixup(u16 channel)
538 qm_ccsr_out(REG_REV3_QCSP_LIO_CFG(idx), after);
540 qm_ccsr_out(REG_QCSP_LIO_CFG(idx), after);
544 #define IO_CFG_SDEST_MASK 0x00ff0000
545 @@ -684,14 +690,24 @@ static int qman_resource_init(struct dev
549 +int qman_is_probed(void)
551 + return __qman_probed;
553 +EXPORT_SYMBOL_GPL(qman_is_probed);
555 static int fsl_qman_probe(struct platform_device *pdev)
557 struct device *dev = &pdev->dev;
558 - struct device_node *node = dev->of_node;
559 + struct device_node *mem_node, *node = dev->of_node;
560 + struct iommu_domain *domain;
561 struct resource *res;
567 + __qman_probed = -1;
569 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
571 @@ -717,6 +733,8 @@ static int fsl_qman_probe(struct platfor
572 qman_ip_rev = QMAN_REV30;
573 else if (major == 3 && minor == 1)
574 qman_ip_rev = QMAN_REV31;
575 + else if (major == 3 && minor == 2)
576 + qman_ip_rev = QMAN_REV32;
578 dev_err(dev, "Unknown QMan version\n");
580 @@ -727,10 +745,96 @@ static int fsl_qman_probe(struct platfor
581 qm_channel_caam = QMAN_CHANNEL_CAAM_REV3;
584 - ret = zero_priv_mem(dev, node, fqd_a, fqd_sz);
591 + * For PPC backward DT compatibility
592 + * FQD memory MUST be zero'd by software
594 + zero_priv_mem(fqd_a, fqd_sz);
596 + WARN(1, "Unexpected archiceture using non shared-dma-mem reservations");
600 + * Order of memory regions is assumed as FQD followed by PFDR
601 + * in order to ensure allocations from the correct regions the
602 + * driver initializes then allocates each piece in order
604 + ret = of_reserved_mem_device_init_by_idx(dev, dev->of_node, 0);
606 + dev_err(dev, "of_reserved_mem_device_init_by_idx(0) failed 0x%x\n",
610 + mem_node = of_parse_phandle(dev->of_node, "memory-region", 0);
612 + ret = of_property_read_u64(mem_node, "size", &size);
614 + dev_err(dev, "FQD: of_address_to_resource fails 0x%x\n",
620 + dev_err(dev, "No memory-region found for FQD\n");
623 + if (!dma_zalloc_coherent(dev, fqd_sz, &fqd_a, 0)) {
624 + dev_err(dev, "Alloc FQD memory failed\n");
629 + * Disassociate the FQD reserved memory area from the device
630 + * because a device can only have one DMA memory area. This
631 + * should be fine since the memory is allocated and initialized
632 + * and only ever accessed by the QMan device from now on
634 + of_reserved_mem_device_release(dev);
636 + dev_dbg(dev, "Allocated FQD 0x%llx 0x%zx\n", fqd_a, fqd_sz);
639 + /* Setup PFDR memory */
640 + ret = of_reserved_mem_device_init_by_idx(dev, dev->of_node, 1);
642 + dev_err(dev, "of_reserved_mem_device_init(1) failed 0x%x\n",
646 + mem_node = of_parse_phandle(dev->of_node, "memory-region", 1);
648 + ret = of_property_read_u64(mem_node, "size", &size);
650 + dev_err(dev, "PFDR: of_address_to_resource fails 0x%x\n",
656 + dev_err(dev, "No memory-region found for PFDR\n");
659 + if (!dma_zalloc_coherent(dev, pfdr_sz, &pfdr_a, 0)) {
660 + dev_err(dev, "Alloc PFDR Failed size 0x%zx\n", pfdr_sz);
664 + dev_info(dev, "Allocated PFDR 0x%llx 0x%zx\n", pfdr_a, pfdr_sz);
666 + /* Create an 1-to-1 iommu mapping for fqd and pfdr areas */
667 + domain = iommu_get_domain_for_dev(dev);
669 + ret = iommu_map(domain, fqd_a, fqd_a, PAGE_ALIGN(fqd_sz),
670 + IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
672 + dev_warn(dev, "iommu_map(fqd) failed %d\n", ret);
673 + ret = iommu_map(domain, pfdr_a, pfdr_a, PAGE_ALIGN(pfdr_sz),
674 + IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
676 + dev_warn(dev, "iommu_map(pfdr) failed %d\n", ret);
679 ret = qman_init_ccsr(dev);
681 @@ -793,6 +897,8 @@ static int fsl_qman_probe(struct platfor
690 --- a/drivers/soc/fsl/qbman/qman_portal.c
691 +++ b/drivers/soc/fsl/qbman/qman_portal.c
695 #include "qman_priv.h"
696 +#include <linux/iommu.h>
698 struct qman_portal *qman_dma_portal;
699 EXPORT_SYMBOL(qman_dma_portal);
700 @@ -38,6 +39,7 @@ EXPORT_SYMBOL(qman_dma_portal);
701 #define CONFIG_FSL_DPA_PIRQ_FAST 1
703 static struct cpumask portal_cpus;
704 +static int __qman_portals_probed;
705 /* protect qman global registers and global data shared among portals */
706 static DEFINE_SPINLOCK(qman_lock);
708 @@ -218,19 +220,36 @@ static int qman_online_cpu(unsigned int
712 +int qman_portals_probed(void)
714 + return __qman_portals_probed;
716 +EXPORT_SYMBOL_GPL(qman_portals_probed);
718 static int qman_portal_probe(struct platform_device *pdev)
720 struct device *dev = &pdev->dev;
721 struct device_node *node = dev->of_node;
722 + struct iommu_domain *domain;
723 struct qm_portal_config *pcfg;
724 struct resource *addr_phys[2];
729 + err = qman_is_probed();
731 + return -EPROBE_DEFER;
733 + dev_err(&pdev->dev, "failing probe due to qman probe error\n");
737 pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
740 + __qman_portals_probed = -1;
746 @@ -238,19 +257,20 @@ static int qman_portal_probe(struct plat
749 dev_err(dev, "Can't get %pOF property 'reg::CE'\n", node);
754 addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
757 dev_err(dev, "Can't get %pOF property 'reg::CI'\n", node);
762 err = of_property_read_u32(node, "cell-index", &val);
764 dev_err(dev, "Can't get %pOF property 'cell-index'\n", node);
765 + __qman_portals_probed = -1;
769 @@ -258,11 +278,18 @@ static int qman_portal_probe(struct plat
770 irq = platform_get_irq(pdev, 0);
772 dev_err(dev, "Can't get %pOF IRQ\n", node);
778 - va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
780 + /* PPC requires a cacheable/non-coherent mapping of the portal */
781 + va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]),
782 + (pgprot_val(PAGE_KERNEL) & ~_PAGE_COHERENT));
784 + /* For ARM we can use write combine mapping. */
785 + va = ioremap_wc(addr_phys[0]->start, resource_size(addr_phys[0]));
788 dev_err(dev, "ioremap::CE failed\n");
790 @@ -270,8 +297,7 @@ static int qman_portal_probe(struct plat
792 pcfg->addr_virt[DPAA_PORTAL_CE] = va;
794 - va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
795 - _PAGE_GUARDED | _PAGE_NO_CACHE);
796 + va = ioremap(addr_phys[1]->start, resource_size(addr_phys[1]));
798 dev_err(dev, "ioremap::CI failed\n");
800 @@ -279,6 +305,21 @@ static int qman_portal_probe(struct plat
802 pcfg->addr_virt[DPAA_PORTAL_CI] = va;
804 + /* Create an 1-to-1 iommu mapping for cena portal area */
805 + domain = iommu_get_domain_for_dev(dev);
808 + * Note: not mapping this as cacheable triggers the infamous
811 + err = iommu_map(domain,
812 + addr_phys[0]->start, addr_phys[0]->start,
813 + PAGE_ALIGN(resource_size(addr_phys[0])),
814 + IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
816 + dev_warn(dev, "failed to iommu_map() %d\n", err);
819 pcfg->pools = qm_get_pools_sdqcr();
821 spin_lock(&qman_lock);
822 @@ -290,6 +331,9 @@ static int qman_portal_probe(struct plat
825 cpumask_set_cpu(cpu, &portal_cpus);
826 + if (!__qman_portals_probed &&
827 + cpumask_weight(&portal_cpus) == num_online_cpus())
828 + __qman_portals_probed = 1;
829 spin_unlock(&qman_lock);
832 @@ -314,6 +358,8 @@ err_portal_init:
834 iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]);
836 + __qman_portals_probed = -1;
841 --- a/drivers/soc/fsl/qbman/qman_priv.h
842 +++ b/drivers/soc/fsl/qbman/qman_priv.h
844 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
847 -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
849 #include "dpaa_sys.h"
851 #include <soc/fsl/qman.h>
852 #include <linux/dma-mapping.h>
853 #include <linux/iommu.h>
854 +#include <linux/dma-contiguous.h>
855 +#include <linux/of_address.h>
857 #if defined(CONFIG_FSL_PAMU)
858 #include <asm/fsl_pamu_stash.h>
859 @@ -187,6 +187,7 @@ struct qm_portal_config {
860 #define QMAN_REV20 0x0200
861 #define QMAN_REV30 0x0300
862 #define QMAN_REV31 0x0301
863 +#define QMAN_REV32 0x0302
864 extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */
866 #define QM_FQID_RANGE_START 1 /* FQID 0 reserved for internal use */
867 --- a/drivers/soc/fsl/qbman/qman_test.h
868 +++ b/drivers/soc/fsl/qbman/qman_test.h
871 #include "qman_priv.h"
873 -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
875 int qman_test_stash(void);
876 int qman_test_api(void);
877 --- a/include/soc/fsl/bman.h
878 +++ b/include/soc/fsl/bman.h
879 @@ -126,4 +126,20 @@ int bman_release(struct bman_pool *pool,
881 int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num);
884 + * bman_is_probed - Check if bman is probed
886 + * Returns 1 if the bman driver successfully probed, -1 if the bman driver
887 + * failed to probe or 0 if the bman driver did not probed yet.
889 +int bman_is_probed(void);
891 + * bman_portals_probed - Check if all cpu bound bman portals are probed
893 + * Returns 1 if all the required cpu bound bman portals successfully probed,
894 + * -1 if probe errors appeared or 0 if the bman portals did not yet finished
897 +int bman_portals_probed(void);
899 #endif /* __FSL_BMAN_H */
900 --- a/include/soc/fsl/qman.h
901 +++ b/include/soc/fsl/qman.h
902 @@ -1186,4 +1186,21 @@ int qman_alloc_cgrid_range(u32 *result,
904 int qman_release_cgrid(u32 id);
907 + * qman_is_probed - Check if qman is probed
909 + * Returns 1 if the qman driver successfully probed, -1 if the qman driver
910 + * failed to probe or 0 if the qman driver did not probed yet.
912 +int qman_is_probed(void);
915 + * qman_portals_probed - Check if all cpu bound qman portals are probed
917 + * Returns 1 if all the required cpu bound qman portals successfully probed,
918 + * -1 if probe errors appeared or 0 if the qman portals did not yet finished
921 +int qman_portals_probed(void);
923 #endif /* __FSL_QMAN_H */