1 From 48dbe4b3a31795b8efdfff82f69eccd086052eed Mon Sep 17 00:00:00 2001
2 From: Biwen Li <biwen.li@nxp.com>
3 Date: Fri, 16 Nov 2018 10:27:30 +0800
4 Subject: [PATCH 16/39] dpaa-bqman: support layerscape
5 This is an integrated patch of dpaa-bqman for layerscape
7 Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
8 Signed-off-by: Madalin Bucur <madalin.bucur@nxp.com>
9 Signed-off-by: Roy Pledge <roy.pledge@nxp.com>
10 Signed-off-by: Stuart Yoder <stuart.yoder@nxp.com>
11 Signed-off-by: Valentin Rothberg <valentinrothberg@gmail.com>
12 Signed-off-by: Biwen Li <biwen.li@nxp.com>
14 drivers/soc/fsl/qbman/Kconfig | 2 +-
15 drivers/soc/fsl/qbman/bman.c | 24 ++++-
16 drivers/soc/fsl/qbman/bman_ccsr.c | 35 ++++++-
17 drivers/soc/fsl/qbman/bman_portal.c | 12 ++-
18 drivers/soc/fsl/qbman/bman_priv.h | 3 +
19 drivers/soc/fsl/qbman/dpaa_sys.h | 8 +-
20 drivers/soc/fsl/qbman/qman.c | 46 ++++++++-
21 drivers/soc/fsl/qbman/qman_ccsr.c | 140 ++++++++++++++++++++++------
22 drivers/soc/fsl/qbman/qman_portal.c | 12 ++-
23 drivers/soc/fsl/qbman/qman_priv.h | 5 +-
24 drivers/soc/fsl/qbman/qman_test.h | 2 -
25 11 files changed, 236 insertions(+), 53 deletions(-)
27 --- a/drivers/soc/fsl/qbman/Kconfig
28 +++ b/drivers/soc/fsl/qbman/Kconfig
31 bool "Freescale DPAA 1.x support"
32 - depends on FSL_SOC_BOOKE
33 + depends on (FSL_SOC_BOOKE || ARCH_LAYERSCAPE)
34 select GENERIC_ALLOCATOR
36 The Freescale Data Path Acceleration Architecture (DPAA) is a set of
37 --- a/drivers/soc/fsl/qbman/bman.c
38 +++ b/drivers/soc/fsl/qbman/bman.c
41 /* Portal register assists */
43 +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
44 +/* Cache-inhibited register offsets */
45 +#define BM_REG_RCR_PI_CINH 0x3000
46 +#define BM_REG_RCR_CI_CINH 0x3100
47 +#define BM_REG_RCR_ITR 0x3200
48 +#define BM_REG_CFG 0x3300
49 +#define BM_REG_SCN(n) (0x3400 + ((n) << 6))
50 +#define BM_REG_ISR 0x3e00
51 +#define BM_REG_IER 0x3e40
52 +#define BM_REG_ISDR 0x3e80
53 +#define BM_REG_IIR 0x3ec0
55 +/* Cache-enabled register offsets */
56 +#define BM_CL_CR 0x0000
57 +#define BM_CL_RR0 0x0100
58 +#define BM_CL_RR1 0x0140
59 +#define BM_CL_RCR 0x1000
60 +#define BM_CL_RCR_PI_CENA 0x3000
61 +#define BM_CL_RCR_CI_CENA 0x3100
64 /* Cache-inhibited register offsets */
65 #define BM_REG_RCR_PI_CINH 0x0000
66 #define BM_REG_RCR_CI_CINH 0x0004
68 #define BM_CL_RCR 0x1000
69 #define BM_CL_RCR_PI_CENA 0x3000
70 #define BM_CL_RCR_CI_CENA 0x3100
75 @@ -607,7 +629,7 @@ int bman_p_irqsource_add(struct bman_por
76 unsigned long irqflags;
78 local_irq_save(irqflags);
79 - set_bits(bits & BM_PIRQ_VISIBLE, &p->irq_sources);
80 + p->irq_sources |= bits & BM_PIRQ_VISIBLE;
81 bm_out(&p->p, BM_REG_IER, p->irq_sources);
82 local_irq_restore(irqflags);
84 --- a/drivers/soc/fsl/qbman/bman_ccsr.c
85 +++ b/drivers/soc/fsl/qbman/bman_ccsr.c
86 @@ -170,10 +170,11 @@ static int fsl_bman_probe(struct platfor
89 struct device *dev = &pdev->dev;
90 - struct device_node *node = dev->of_node;
91 + struct device_node *mem_node, *node = dev->of_node;
97 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
99 @@ -201,6 +202,38 @@ static int fsl_bman_probe(struct platfor
104 + * If FBPR memory wasn't defined using the qbman compatiable string
105 + * try using the of_reserved_mem_device method
108 + ret = of_reserved_mem_device_init(dev);
110 + dev_err(dev, "of_reserved_mem_device_init() failed 0x%x\n",
114 + mem_node = of_parse_phandle(dev->of_node, "memory-region", 0);
116 + ret = of_property_read_u64(mem_node, "size", &size);
118 + dev_err(dev, "FBPR: of_address_to_resource fails 0x%x\n",
124 + dev_err(dev, "No memory-region found for FBPR\n");
127 + if (!dma_zalloc_coherent(dev, fbpr_sz, &fbpr_a, 0)) {
128 + dev_err(dev, "Alloc FBPR memory failed\n");
133 + dev_dbg(dev, "Allocated FBPR 0x%llx 0x%zx\n", fbpr_a, fbpr_sz);
135 bm_set_memory(fbpr_a, fbpr_sz);
137 err_irq = platform_get_irq(pdev, 0);
138 --- a/drivers/soc/fsl/qbman/bman_portal.c
139 +++ b/drivers/soc/fsl/qbman/bman_portal.c
140 @@ -123,7 +123,14 @@ static int bman_portal_probe(struct plat
144 - va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
146 + /* PPC requires a cacheable/non-coherent mapping of the portal */
147 + va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]),
148 + (pgprot_val(PAGE_KERNEL) & ~_PAGE_COHERENT));
150 + /* For ARM we can use write combine mapping. */
151 + va = ioremap_wc(addr_phys[0]->start, resource_size(addr_phys[0]));
154 dev_err(dev, "ioremap::CE failed\n");
156 @@ -131,8 +138,7 @@ static int bman_portal_probe(struct plat
158 pcfg->addr_virt[DPAA_PORTAL_CE] = va;
160 - va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
161 - _PAGE_GUARDED | _PAGE_NO_CACHE);
162 + va = ioremap(addr_phys[1]->start, resource_size(addr_phys[1]));
164 dev_err(dev, "ioremap::CI failed\n");
166 --- a/drivers/soc/fsl/qbman/bman_priv.h
167 +++ b/drivers/soc/fsl/qbman/bman_priv.h
169 #include "dpaa_sys.h"
171 #include <soc/fsl/bman.h>
172 +#include <linux/dma-contiguous.h>
173 +#include <linux/of_address.h>
174 +#include <linux/dma-mapping.h>
176 /* Portal processing (interrupt) sources */
177 #define BM_PIRQ_RCRI 0x00000002 /* RCR Ring (below threshold) */
178 --- a/drivers/soc/fsl/qbman/dpaa_sys.h
179 +++ b/drivers/soc/fsl/qbman/dpaa_sys.h
181 #include <linux/prefetch.h>
182 #include <linux/genalloc.h>
183 #include <asm/cacheflush.h>
184 +#include <linux/io.h>
185 +#include <linux/delay.h>
187 /* For 2-element tables related to cache-inhibited and cache-enabled mappings */
188 #define DPAA_PORTAL_CE 0
189 #define DPAA_PORTAL_CI 1
191 -#if (L1_CACHE_BYTES != 32) && (L1_CACHE_BYTES != 64)
192 -#error "Unsupported Cacheline Size"
195 static inline void dpaa_flush(void *p)
198 flush_dcache_range((unsigned long)p, (unsigned long)p+64);
199 -#elif defined(CONFIG_ARM32)
200 +#elif defined(CONFIG_ARM)
201 __cpuc_flush_dcache_area(p, 64);
202 #elif defined(CONFIG_ARM64)
203 __flush_dcache_area(p, 64);
204 --- a/drivers/soc/fsl/qbman/qman.c
205 +++ b/drivers/soc/fsl/qbman/qman.c
208 /* Portal register assists */
210 +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
211 +/* Cache-inhibited register offsets */
212 +#define QM_REG_EQCR_PI_CINH 0x3000
213 +#define QM_REG_EQCR_CI_CINH 0x3040
214 +#define QM_REG_EQCR_ITR 0x3080
215 +#define QM_REG_DQRR_PI_CINH 0x3100
216 +#define QM_REG_DQRR_CI_CINH 0x3140
217 +#define QM_REG_DQRR_ITR 0x3180
218 +#define QM_REG_DQRR_DCAP 0x31C0
219 +#define QM_REG_DQRR_SDQCR 0x3200
220 +#define QM_REG_DQRR_VDQCR 0x3240
221 +#define QM_REG_DQRR_PDQCR 0x3280
222 +#define QM_REG_MR_PI_CINH 0x3300
223 +#define QM_REG_MR_CI_CINH 0x3340
224 +#define QM_REG_MR_ITR 0x3380
225 +#define QM_REG_CFG 0x3500
226 +#define QM_REG_ISR 0x3600
227 +#define QM_REG_IER 0x3640
228 +#define QM_REG_ISDR 0x3680
229 +#define QM_REG_IIR 0x36C0
230 +#define QM_REG_ITPR 0x3740
232 +/* Cache-enabled register offsets */
233 +#define QM_CL_EQCR 0x0000
234 +#define QM_CL_DQRR 0x1000
235 +#define QM_CL_MR 0x2000
236 +#define QM_CL_EQCR_PI_CENA 0x3000
237 +#define QM_CL_EQCR_CI_CENA 0x3040
238 +#define QM_CL_DQRR_PI_CENA 0x3100
239 +#define QM_CL_DQRR_CI_CENA 0x3140
240 +#define QM_CL_MR_PI_CENA 0x3300
241 +#define QM_CL_MR_CI_CENA 0x3340
242 +#define QM_CL_CR 0x3800
243 +#define QM_CL_RR0 0x3900
244 +#define QM_CL_RR1 0x3940
247 /* Cache-inhibited register offsets */
248 #define QM_REG_EQCR_PI_CINH 0x0000
249 #define QM_REG_EQCR_CI_CINH 0x0004
251 #define QM_CL_CR 0x3800
252 #define QM_CL_RR0 0x3900
253 #define QM_CL_RR1 0x3940
257 * BTW, the drivers (and h/w programming model) already obtain the required
258 @@ -909,12 +947,12 @@ static inline int qm_mc_result_timeout(s
260 static inline void fq_set(struct qman_fq *fq, u32 mask)
262 - set_bits(mask, &fq->flags);
266 static inline void fq_clear(struct qman_fq *fq, u32 mask)
268 - clear_bits(mask, &fq->flags);
269 + fq->flags &= ~mask;
272 static inline int fq_isset(struct qman_fq *fq, u32 mask)
273 @@ -1566,7 +1604,7 @@ void qman_p_irqsource_add(struct qman_po
274 unsigned long irqflags;
276 local_irq_save(irqflags);
277 - set_bits(bits & QM_PIRQ_VISIBLE, &p->irq_sources);
278 + p->irq_sources |= bits & QM_PIRQ_VISIBLE;
279 qm_out(&p->p, QM_REG_IER, p->irq_sources);
280 local_irq_restore(irqflags);
282 @@ -1589,7 +1627,7 @@ void qman_p_irqsource_remove(struct qman
284 local_irq_save(irqflags);
285 bits &= QM_PIRQ_VISIBLE;
286 - clear_bits(bits, &p->irq_sources);
287 + p->irq_sources &= ~bits;
288 qm_out(&p->p, QM_REG_IER, p->irq_sources);
289 ier = qm_in(&p->p, QM_REG_IER);
291 --- a/drivers/soc/fsl/qbman/qman_ccsr.c
292 +++ b/drivers/soc/fsl/qbman/qman_ccsr.c
293 @@ -401,21 +401,42 @@ static int qm_init_pfdr(struct device *d
297 - * Ideally we would use the DMA API to turn rmem->base into a DMA address
298 - * (especially if iommu translations ever get involved). Unfortunately, the
299 - * DMA API currently does not allow mapping anything that is not backed with
301 + * QMan needs two global memory areas initialized at boot time:
302 + * 1) FQD: Frame Queue Descriptors used to manage frame queues
303 + * 2) PFDR: Packed Frame Queue Descriptor Records used to store frames
304 + * Both areas are reserved using the device tree reserved memory framework
305 + * and the addresses and sizes are initialized when the QMan device is probed
307 static dma_addr_t fqd_a, pfdr_a;
308 static size_t fqd_sz, pfdr_sz;
312 + * Support for PPC Device Tree backward compatibility when compatiable
313 + * string is set to fsl-qman-fqd and fsl-qman-pfdr
315 +static int zero_priv_mem(phys_addr_t addr, size_t sz)
317 + /* map as cacheable, non-guarded */
318 + void __iomem *tmpp = ioremap_prot(addr, sz, 0);
323 + memset_io(tmpp, 0, sz);
324 + flush_dcache_range((unsigned long)tmpp,
325 + (unsigned long)tmpp + sz);
331 static int qman_fqd(struct reserved_mem *rmem)
336 WARN_ON(!(fqd_a && fqd_sz));
340 RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd);
341 @@ -431,32 +452,13 @@ static int qman_pfdr(struct reserved_mem
343 RESERVEDMEM_OF_DECLARE(qman_pfdr, "fsl,qman-pfdr", qman_pfdr);
347 static unsigned int qm_get_fqid_maxcnt(void)
353 - * Flush this memory range from data cache so that QMAN originated
354 - * transactions for this memory region could be marked non-coherent.
356 -static int zero_priv_mem(struct device *dev, struct device_node *node,
357 - phys_addr_t addr, size_t sz)
359 - /* map as cacheable, non-guarded */
360 - void __iomem *tmpp = ioremap_prot(addr, sz, 0);
365 - memset_io(tmpp, 0, sz);
366 - flush_dcache_range((unsigned long)tmpp,
367 - (unsigned long)tmpp + sz);
373 static void log_edata_bits(struct device *dev, u32 bit_count)
375 u32 i, j, mask = 0xffffffff;
376 @@ -687,11 +689,12 @@ static int qman_resource_init(struct dev
377 static int fsl_qman_probe(struct platform_device *pdev)
379 struct device *dev = &pdev->dev;
380 - struct device_node *node = dev->of_node;
381 + struct device_node *mem_node, *node = dev->of_node;
382 struct resource *res;
388 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
390 @@ -717,6 +720,8 @@ static int fsl_qman_probe(struct platfor
391 qman_ip_rev = QMAN_REV30;
392 else if (major == 3 && minor == 1)
393 qman_ip_rev = QMAN_REV31;
394 + else if (major == 3 && minor == 2)
395 + qman_ip_rev = QMAN_REV32;
397 dev_err(dev, "Unknown QMan version\n");
399 @@ -727,10 +732,83 @@ static int fsl_qman_probe(struct platfor
400 qm_channel_caam = QMAN_CHANNEL_CAAM_REV3;
403 - ret = zero_priv_mem(dev, node, fqd_a, fqd_sz);
410 + * For PPC backward DT compatibility
411 + * FQD memory MUST be zero'd by software
413 + zero_priv_mem(fqd_a, fqd_sz);
415 + WARN(1, "Unexpected archiceture using non shared-dma-mem reservations");
419 + * Order of memory regions is assumed as FQD followed by PFDR
420 + * in order to ensure allocations from the correct regions the
421 + * driver initializes then allocates each piece in order
423 + ret = of_reserved_mem_device_init_by_idx(dev, dev->of_node, 0);
425 + dev_err(dev, "of_reserved_mem_device_init_by_idx(0) failed 0x%x\n",
429 + mem_node = of_parse_phandle(dev->of_node, "memory-region", 0);
431 + ret = of_property_read_u64(mem_node, "size", &size);
433 + dev_err(dev, "FQD: of_address_to_resource fails 0x%x\n",
439 + dev_err(dev, "No memory-region found for FQD\n");
442 + if (!dma_zalloc_coherent(dev, fqd_sz, &fqd_a, 0)) {
443 + dev_err(dev, "Alloc FQD memory failed\n");
448 + * Disassociate the FQD reserved memory area from the device
449 + * because a device can only have one DMA memory area. This
450 + * should be fine since the memory is allocated and initialized
451 + * and only ever accessed by the QMan device from now on
453 + of_reserved_mem_device_release(dev);
455 + dev_dbg(dev, "Allocated FQD 0x%llx 0x%zx\n", fqd_a, fqd_sz);
458 + /* Setup PFDR memory */
459 + ret = of_reserved_mem_device_init_by_idx(dev, dev->of_node, 1);
461 + dev_err(dev, "of_reserved_mem_device_init(1) failed 0x%x\n",
465 + mem_node = of_parse_phandle(dev->of_node, "memory-region", 1);
467 + ret = of_property_read_u64(mem_node, "size", &size);
469 + dev_err(dev, "PFDR: of_address_to_resource fails 0x%x\n",
475 + dev_err(dev, "No memory-region found for PFDR\n");
478 + if (!dma_zalloc_coherent(dev, pfdr_sz, &pfdr_a, 0)) {
479 + dev_err(dev, "Alloc PFDR Failed size 0x%zx\n", pfdr_sz);
483 + dev_info(dev, "Allocated PFDR 0x%llx 0x%zx\n", pfdr_a, pfdr_sz);
485 ret = qman_init_ccsr(dev);
487 --- a/drivers/soc/fsl/qbman/qman_portal.c
488 +++ b/drivers/soc/fsl/qbman/qman_portal.c
489 @@ -262,7 +262,14 @@ static int qman_portal_probe(struct plat
493 - va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
495 + /* PPC requires a cacheable/non-coherent mapping of the portal */
496 + va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]),
497 + (pgprot_val(PAGE_KERNEL) & ~_PAGE_COHERENT));
499 + /* For ARM we can use write combine mapping. */
500 + va = ioremap_wc(addr_phys[0]->start, resource_size(addr_phys[0]));
503 dev_err(dev, "ioremap::CE failed\n");
505 @@ -270,8 +277,7 @@ static int qman_portal_probe(struct plat
507 pcfg->addr_virt[DPAA_PORTAL_CE] = va;
509 - va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
510 - _PAGE_GUARDED | _PAGE_NO_CACHE);
511 + va = ioremap(addr_phys[1]->start, resource_size(addr_phys[1]));
513 dev_err(dev, "ioremap::CI failed\n");
515 --- a/drivers/soc/fsl/qbman/qman_priv.h
516 +++ b/drivers/soc/fsl/qbman/qman_priv.h
518 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
521 -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
523 #include "dpaa_sys.h"
525 #include <soc/fsl/qman.h>
526 #include <linux/dma-mapping.h>
527 #include <linux/iommu.h>
528 +#include <linux/dma-contiguous.h>
529 +#include <linux/of_address.h>
531 #if defined(CONFIG_FSL_PAMU)
532 #include <asm/fsl_pamu_stash.h>
533 @@ -187,6 +187,7 @@ struct qm_portal_config {
534 #define QMAN_REV20 0x0200
535 #define QMAN_REV30 0x0300
536 #define QMAN_REV31 0x0301
537 +#define QMAN_REV32 0x0302
538 extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */
540 #define QM_FQID_RANGE_START 1 /* FQID 0 reserved for internal use */
541 --- a/drivers/soc/fsl/qbman/qman_test.h
542 +++ b/drivers/soc/fsl/qbman/qman_test.h
545 #include "qman_priv.h"
547 -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
549 int qman_test_stash(void);
550 int qman_test_api(void);