kernel: bump 4.14 to 4.14.106
[openwrt/staging/dedeckeh.git] / target / linux / layerscape / patches-4.14 / 711-dpaa-bqman-support-layerscape.patch
1 From 48dbe4b3a31795b8efdfff82f69eccd086052eed Mon Sep 17 00:00:00 2001
2 From: Biwen Li <biwen.li@nxp.com>
3 Date: Fri, 16 Nov 2018 10:27:30 +0800
4 Subject: [PATCH 16/39] dpaa-bqman: support layerscape
5 This is an integrated patch of dpaa-bqman for layerscape
6
7 Signed-off-by: Claudiu Manoil <claudiu.manoil@nxp.com>
8 Signed-off-by: Madalin Bucur <madalin.bucur@nxp.com>
9 Signed-off-by: Roy Pledge <roy.pledge@nxp.com>
10 Signed-off-by: Stuart Yoder <stuart.yoder@nxp.com>
11 Signed-off-by: Valentin Rothberg <valentinrothberg@gmail.com>
12 Signed-off-by: Biwen Li <biwen.li@nxp.com>
13 ---
14 drivers/soc/fsl/qbman/Kconfig | 2 +-
15 drivers/soc/fsl/qbman/bman.c | 24 ++++-
16 drivers/soc/fsl/qbman/bman_ccsr.c | 35 ++++++-
17 drivers/soc/fsl/qbman/bman_portal.c | 12 ++-
18 drivers/soc/fsl/qbman/bman_priv.h | 3 +
19 drivers/soc/fsl/qbman/dpaa_sys.h | 8 +-
20 drivers/soc/fsl/qbman/qman.c | 46 ++++++++-
21 drivers/soc/fsl/qbman/qman_ccsr.c | 140 ++++++++++++++++++++++------
22 drivers/soc/fsl/qbman/qman_portal.c | 12 ++-
23 drivers/soc/fsl/qbman/qman_priv.h | 5 +-
24 drivers/soc/fsl/qbman/qman_test.h | 2 -
25 11 files changed, 236 insertions(+), 53 deletions(-)
26
27 --- a/drivers/soc/fsl/qbman/Kconfig
28 +++ b/drivers/soc/fsl/qbman/Kconfig
29 @@ -1,6 +1,6 @@
30 menuconfig FSL_DPAA
31 bool "Freescale DPAA 1.x support"
32 - depends on FSL_SOC_BOOKE
33 + depends on (FSL_SOC_BOOKE || ARCH_LAYERSCAPE)
34 select GENERIC_ALLOCATOR
35 help
36 The Freescale Data Path Acceleration Architecture (DPAA) is a set of
37 --- a/drivers/soc/fsl/qbman/bman.c
38 +++ b/drivers/soc/fsl/qbman/bman.c
39 @@ -35,6 +35,27 @@
40
41 /* Portal register assists */
42
43 +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
44 +/* Cache-inhibited register offsets */
45 +#define BM_REG_RCR_PI_CINH 0x3000
46 +#define BM_REG_RCR_CI_CINH 0x3100
47 +#define BM_REG_RCR_ITR 0x3200
48 +#define BM_REG_CFG 0x3300
49 +#define BM_REG_SCN(n) (0x3400 + ((n) << 6))
50 +#define BM_REG_ISR 0x3e00
51 +#define BM_REG_IER 0x3e40
52 +#define BM_REG_ISDR 0x3e80
53 +#define BM_REG_IIR 0x3ec0
54 +
55 +/* Cache-enabled register offsets */
56 +#define BM_CL_CR 0x0000
57 +#define BM_CL_RR0 0x0100
58 +#define BM_CL_RR1 0x0140
59 +#define BM_CL_RCR 0x1000
60 +#define BM_CL_RCR_PI_CENA 0x3000
61 +#define BM_CL_RCR_CI_CENA 0x3100
62 +
63 +#else
64 /* Cache-inhibited register offsets */
65 #define BM_REG_RCR_PI_CINH 0x0000
66 #define BM_REG_RCR_CI_CINH 0x0004
67 @@ -53,6 +74,7 @@
68 #define BM_CL_RCR 0x1000
69 #define BM_CL_RCR_PI_CENA 0x3000
70 #define BM_CL_RCR_CI_CENA 0x3100
71 +#endif
72
73 /*
74 * Portal modes.
75 @@ -607,7 +629,7 @@ int bman_p_irqsource_add(struct bman_por
76 unsigned long irqflags;
77
78 local_irq_save(irqflags);
79 - set_bits(bits & BM_PIRQ_VISIBLE, &p->irq_sources);
80 + p->irq_sources |= bits & BM_PIRQ_VISIBLE;
81 bm_out(&p->p, BM_REG_IER, p->irq_sources);
82 local_irq_restore(irqflags);
83 return 0;
84 --- a/drivers/soc/fsl/qbman/bman_ccsr.c
85 +++ b/drivers/soc/fsl/qbman/bman_ccsr.c
86 @@ -170,10 +170,11 @@ static int fsl_bman_probe(struct platfor
87 {
88 int ret, err_irq;
89 struct device *dev = &pdev->dev;
90 - struct device_node *node = dev->of_node;
91 + struct device_node *mem_node, *node = dev->of_node;
92 struct resource *res;
93 u16 id, bm_pool_cnt;
94 u8 major, minor;
95 + u64 size;
96
97 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
98 if (!res) {
99 @@ -201,6 +202,38 @@ static int fsl_bman_probe(struct platfor
100 return -ENODEV;
101 }
102
103 + /*
104 + * If FBPR memory wasn't defined using the qbman compatiable string
105 + * try using the of_reserved_mem_device method
106 + */
107 + if (!fbpr_a) {
108 + ret = of_reserved_mem_device_init(dev);
109 + if (ret) {
110 + dev_err(dev, "of_reserved_mem_device_init() failed 0x%x\n",
111 + ret);
112 + return -ENODEV;
113 + }
114 + mem_node = of_parse_phandle(dev->of_node, "memory-region", 0);
115 + if (mem_node) {
116 + ret = of_property_read_u64(mem_node, "size", &size);
117 + if (ret) {
118 + dev_err(dev, "FBPR: of_address_to_resource fails 0x%x\n",
119 + ret);
120 + return -ENODEV;
121 + }
122 + fbpr_sz = size;
123 + } else {
124 + dev_err(dev, "No memory-region found for FBPR\n");
125 + return -ENODEV;
126 + }
127 + if (!dma_zalloc_coherent(dev, fbpr_sz, &fbpr_a, 0)) {
128 + dev_err(dev, "Alloc FBPR memory failed\n");
129 + return -ENODEV;
130 + }
131 + }
132 +
133 + dev_dbg(dev, "Allocated FBPR 0x%llx 0x%zx\n", fbpr_a, fbpr_sz);
134 +
135 bm_set_memory(fbpr_a, fbpr_sz);
136
137 err_irq = platform_get_irq(pdev, 0);
138 --- a/drivers/soc/fsl/qbman/bman_portal.c
139 +++ b/drivers/soc/fsl/qbman/bman_portal.c
140 @@ -123,7 +123,14 @@ static int bman_portal_probe(struct plat
141 }
142 pcfg->irq = irq;
143
144 - va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
145 +#ifdef CONFIG_PPC
146 + /* PPC requires a cacheable/non-coherent mapping of the portal */
147 + va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]),
148 + (pgprot_val(PAGE_KERNEL) & ~_PAGE_COHERENT));
149 +#else
150 + /* For ARM we can use write combine mapping. */
151 + va = ioremap_wc(addr_phys[0]->start, resource_size(addr_phys[0]));
152 +#endif
153 if (!va) {
154 dev_err(dev, "ioremap::CE failed\n");
155 goto err_ioremap1;
156 @@ -131,8 +138,7 @@ static int bman_portal_probe(struct plat
157
158 pcfg->addr_virt[DPAA_PORTAL_CE] = va;
159
160 - va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
161 - _PAGE_GUARDED | _PAGE_NO_CACHE);
162 + va = ioremap(addr_phys[1]->start, resource_size(addr_phys[1]));
163 if (!va) {
164 dev_err(dev, "ioremap::CI failed\n");
165 goto err_ioremap2;
166 --- a/drivers/soc/fsl/qbman/bman_priv.h
167 +++ b/drivers/soc/fsl/qbman/bman_priv.h
168 @@ -33,6 +33,9 @@
169 #include "dpaa_sys.h"
170
171 #include <soc/fsl/bman.h>
172 +#include <linux/dma-contiguous.h>
173 +#include <linux/of_address.h>
174 +#include <linux/dma-mapping.h>
175
176 /* Portal processing (interrupt) sources */
177 #define BM_PIRQ_RCRI 0x00000002 /* RCR Ring (below threshold) */
178 --- a/drivers/soc/fsl/qbman/dpaa_sys.h
179 +++ b/drivers/soc/fsl/qbman/dpaa_sys.h
180 @@ -44,20 +44,18 @@
181 #include <linux/prefetch.h>
182 #include <linux/genalloc.h>
183 #include <asm/cacheflush.h>
184 +#include <linux/io.h>
185 +#include <linux/delay.h>
186
187 /* For 2-element tables related to cache-inhibited and cache-enabled mappings */
188 #define DPAA_PORTAL_CE 0
189 #define DPAA_PORTAL_CI 1
190
191 -#if (L1_CACHE_BYTES != 32) && (L1_CACHE_BYTES != 64)
192 -#error "Unsupported Cacheline Size"
193 -#endif
194 -
195 static inline void dpaa_flush(void *p)
196 {
197 #ifdef CONFIG_PPC
198 flush_dcache_range((unsigned long)p, (unsigned long)p+64);
199 -#elif defined(CONFIG_ARM32)
200 +#elif defined(CONFIG_ARM)
201 __cpuc_flush_dcache_area(p, 64);
202 #elif defined(CONFIG_ARM64)
203 __flush_dcache_area(p, 64);
204 --- a/drivers/soc/fsl/qbman/qman.c
205 +++ b/drivers/soc/fsl/qbman/qman.c
206 @@ -41,6 +41,43 @@
207
208 /* Portal register assists */
209
210 +#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
211 +/* Cache-inhibited register offsets */
212 +#define QM_REG_EQCR_PI_CINH 0x3000
213 +#define QM_REG_EQCR_CI_CINH 0x3040
214 +#define QM_REG_EQCR_ITR 0x3080
215 +#define QM_REG_DQRR_PI_CINH 0x3100
216 +#define QM_REG_DQRR_CI_CINH 0x3140
217 +#define QM_REG_DQRR_ITR 0x3180
218 +#define QM_REG_DQRR_DCAP 0x31C0
219 +#define QM_REG_DQRR_SDQCR 0x3200
220 +#define QM_REG_DQRR_VDQCR 0x3240
221 +#define QM_REG_DQRR_PDQCR 0x3280
222 +#define QM_REG_MR_PI_CINH 0x3300
223 +#define QM_REG_MR_CI_CINH 0x3340
224 +#define QM_REG_MR_ITR 0x3380
225 +#define QM_REG_CFG 0x3500
226 +#define QM_REG_ISR 0x3600
227 +#define QM_REG_IER 0x3640
228 +#define QM_REG_ISDR 0x3680
229 +#define QM_REG_IIR 0x36C0
230 +#define QM_REG_ITPR 0x3740
231 +
232 +/* Cache-enabled register offsets */
233 +#define QM_CL_EQCR 0x0000
234 +#define QM_CL_DQRR 0x1000
235 +#define QM_CL_MR 0x2000
236 +#define QM_CL_EQCR_PI_CENA 0x3000
237 +#define QM_CL_EQCR_CI_CENA 0x3040
238 +#define QM_CL_DQRR_PI_CENA 0x3100
239 +#define QM_CL_DQRR_CI_CENA 0x3140
240 +#define QM_CL_MR_PI_CENA 0x3300
241 +#define QM_CL_MR_CI_CENA 0x3340
242 +#define QM_CL_CR 0x3800
243 +#define QM_CL_RR0 0x3900
244 +#define QM_CL_RR1 0x3940
245 +
246 +#else
247 /* Cache-inhibited register offsets */
248 #define QM_REG_EQCR_PI_CINH 0x0000
249 #define QM_REG_EQCR_CI_CINH 0x0004
250 @@ -75,6 +112,7 @@
251 #define QM_CL_CR 0x3800
252 #define QM_CL_RR0 0x3900
253 #define QM_CL_RR1 0x3940
254 +#endif
255
256 /*
257 * BTW, the drivers (and h/w programming model) already obtain the required
258 @@ -909,12 +947,12 @@ static inline int qm_mc_result_timeout(s
259
260 static inline void fq_set(struct qman_fq *fq, u32 mask)
261 {
262 - set_bits(mask, &fq->flags);
263 + fq->flags |= mask;
264 }
265
266 static inline void fq_clear(struct qman_fq *fq, u32 mask)
267 {
268 - clear_bits(mask, &fq->flags);
269 + fq->flags &= ~mask;
270 }
271
272 static inline int fq_isset(struct qman_fq *fq, u32 mask)
273 @@ -1567,7 +1605,7 @@ void qman_p_irqsource_add(struct qman_po
274 unsigned long irqflags;
275
276 local_irq_save(irqflags);
277 - set_bits(bits & QM_PIRQ_VISIBLE, &p->irq_sources);
278 + p->irq_sources |= bits & QM_PIRQ_VISIBLE;
279 qm_out(&p->p, QM_REG_IER, p->irq_sources);
280 local_irq_restore(irqflags);
281 }
282 @@ -1590,7 +1628,7 @@ void qman_p_irqsource_remove(struct qman
283 */
284 local_irq_save(irqflags);
285 bits &= QM_PIRQ_VISIBLE;
286 - clear_bits(bits, &p->irq_sources);
287 + p->irq_sources &= ~bits;
288 qm_out(&p->p, QM_REG_IER, p->irq_sources);
289 ier = qm_in(&p->p, QM_REG_IER);
290 /*
291 --- a/drivers/soc/fsl/qbman/qman_ccsr.c
292 +++ b/drivers/soc/fsl/qbman/qman_ccsr.c
293 @@ -401,21 +401,42 @@ static int qm_init_pfdr(struct device *d
294 }
295
296 /*
297 - * Ideally we would use the DMA API to turn rmem->base into a DMA address
298 - * (especially if iommu translations ever get involved). Unfortunately, the
299 - * DMA API currently does not allow mapping anything that is not backed with
300 - * a struct page.
301 + * QMan needs two global memory areas initialized at boot time:
302 + * 1) FQD: Frame Queue Descriptors used to manage frame queues
303 + * 2) PFDR: Packed Frame Queue Descriptor Records used to store frames
304 + * Both areas are reserved using the device tree reserved memory framework
305 + * and the addresses and sizes are initialized when the QMan device is probed
306 */
307 static dma_addr_t fqd_a, pfdr_a;
308 static size_t fqd_sz, pfdr_sz;
309
310 +#ifdef CONFIG_PPC
311 +/*
312 + * Support for PPC Device Tree backward compatibility when compatiable
313 + * string is set to fsl-qman-fqd and fsl-qman-pfdr
314 + */
315 +static int zero_priv_mem(phys_addr_t addr, size_t sz)
316 +{
317 + /* map as cacheable, non-guarded */
318 + void __iomem *tmpp = ioremap_prot(addr, sz, 0);
319 +
320 + if (!tmpp)
321 + return -ENOMEM;
322 +
323 + memset_io(tmpp, 0, sz);
324 + flush_dcache_range((unsigned long)tmpp,
325 + (unsigned long)tmpp + sz);
326 + iounmap(tmpp);
327 +
328 + return 0;
329 +}
330 +
331 static int qman_fqd(struct reserved_mem *rmem)
332 {
333 fqd_a = rmem->base;
334 fqd_sz = rmem->size;
335
336 WARN_ON(!(fqd_a && fqd_sz));
337 -
338 return 0;
339 }
340 RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd);
341 @@ -431,32 +452,13 @@ static int qman_pfdr(struct reserved_mem
342 }
343 RESERVEDMEM_OF_DECLARE(qman_pfdr, "fsl,qman-pfdr", qman_pfdr);
344
345 +#endif
346 +
347 static unsigned int qm_get_fqid_maxcnt(void)
348 {
349 return fqd_sz / 64;
350 }
351
352 -/*
353 - * Flush this memory range from data cache so that QMAN originated
354 - * transactions for this memory region could be marked non-coherent.
355 - */
356 -static int zero_priv_mem(struct device *dev, struct device_node *node,
357 - phys_addr_t addr, size_t sz)
358 -{
359 - /* map as cacheable, non-guarded */
360 - void __iomem *tmpp = ioremap_prot(addr, sz, 0);
361 -
362 - if (!tmpp)
363 - return -ENOMEM;
364 -
365 - memset_io(tmpp, 0, sz);
366 - flush_dcache_range((unsigned long)tmpp,
367 - (unsigned long)tmpp + sz);
368 - iounmap(tmpp);
369 -
370 - return 0;
371 -}
372 -
373 static void log_edata_bits(struct device *dev, u32 bit_count)
374 {
375 u32 i, j, mask = 0xffffffff;
376 @@ -687,11 +689,12 @@ static int qman_resource_init(struct dev
377 static int fsl_qman_probe(struct platform_device *pdev)
378 {
379 struct device *dev = &pdev->dev;
380 - struct device_node *node = dev->of_node;
381 + struct device_node *mem_node, *node = dev->of_node;
382 struct resource *res;
383 int ret, err_irq;
384 u16 id;
385 u8 major, minor;
386 + u64 size;
387
388 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
389 if (!res) {
390 @@ -717,6 +720,8 @@ static int fsl_qman_probe(struct platfor
391 qman_ip_rev = QMAN_REV30;
392 else if (major == 3 && minor == 1)
393 qman_ip_rev = QMAN_REV31;
394 + else if (major == 3 && minor == 2)
395 + qman_ip_rev = QMAN_REV32;
396 else {
397 dev_err(dev, "Unknown QMan version\n");
398 return -ENODEV;
399 @@ -727,10 +732,83 @@ static int fsl_qman_probe(struct platfor
400 qm_channel_caam = QMAN_CHANNEL_CAAM_REV3;
401 }
402
403 - ret = zero_priv_mem(dev, node, fqd_a, fqd_sz);
404 - WARN_ON(ret);
405 - if (ret)
406 - return -ENODEV;
407 + if (fqd_a) {
408 +#ifdef CONFIG_PPC
409 + /*
410 + * For PPC backward DT compatibility
411 + * FQD memory MUST be zero'd by software
412 + */
413 + zero_priv_mem(fqd_a, fqd_sz);
414 +#else
415 + WARN(1, "Unexpected archiceture using non shared-dma-mem reservations");
416 +#endif
417 + } else {
418 + /*
419 + * Order of memory regions is assumed as FQD followed by PFDR
420 + * in order to ensure allocations from the correct regions the
421 + * driver initializes then allocates each piece in order
422 + */
423 + ret = of_reserved_mem_device_init_by_idx(dev, dev->of_node, 0);
424 + if (ret) {
425 + dev_err(dev, "of_reserved_mem_device_init_by_idx(0) failed 0x%x\n",
426 + ret);
427 + return -ENODEV;
428 + }
429 + mem_node = of_parse_phandle(dev->of_node, "memory-region", 0);
430 + if (mem_node) {
431 + ret = of_property_read_u64(mem_node, "size", &size);
432 + if (ret) {
433 + dev_err(dev, "FQD: of_address_to_resource fails 0x%x\n",
434 + ret);
435 + return -ENODEV;
436 + }
437 + fqd_sz = size;
438 + } else {
439 + dev_err(dev, "No memory-region found for FQD\n");
440 + return -ENODEV;
441 + }
442 + if (!dma_zalloc_coherent(dev, fqd_sz, &fqd_a, 0)) {
443 + dev_err(dev, "Alloc FQD memory failed\n");
444 + return -ENODEV;
445 + }
446 +
447 + /*
448 + * Disassociate the FQD reserved memory area from the device
449 + * because a device can only have one DMA memory area. This
450 + * should be fine since the memory is allocated and initialized
451 + * and only ever accessed by the QMan device from now on
452 + */
453 + of_reserved_mem_device_release(dev);
454 + }
455 + dev_dbg(dev, "Allocated FQD 0x%llx 0x%zx\n", fqd_a, fqd_sz);
456 +
457 + if (!pfdr_a) {
458 + /* Setup PFDR memory */
459 + ret = of_reserved_mem_device_init_by_idx(dev, dev->of_node, 1);
460 + if (ret) {
461 + dev_err(dev, "of_reserved_mem_device_init(1) failed 0x%x\n",
462 + ret);
463 + return -ENODEV;
464 + }
465 + mem_node = of_parse_phandle(dev->of_node, "memory-region", 1);
466 + if (mem_node) {
467 + ret = of_property_read_u64(mem_node, "size", &size);
468 + if (ret) {
469 + dev_err(dev, "PFDR: of_address_to_resource fails 0x%x\n",
470 + ret);
471 + return -ENODEV;
472 + }
473 + pfdr_sz = size;
474 + } else {
475 + dev_err(dev, "No memory-region found for PFDR\n");
476 + return -ENODEV;
477 + }
478 + if (!dma_zalloc_coherent(dev, pfdr_sz, &pfdr_a, 0)) {
479 + dev_err(dev, "Alloc PFDR Failed size 0x%zx\n", pfdr_sz);
480 + return -ENODEV;
481 + }
482 + }
483 + dev_info(dev, "Allocated PFDR 0x%llx 0x%zx\n", pfdr_a, pfdr_sz);
484
485 ret = qman_init_ccsr(dev);
486 if (ret) {
487 --- a/drivers/soc/fsl/qbman/qman_portal.c
488 +++ b/drivers/soc/fsl/qbman/qman_portal.c
489 @@ -262,7 +262,14 @@ static int qman_portal_probe(struct plat
490 }
491 pcfg->irq = irq;
492
493 - va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
494 +#ifdef CONFIG_PPC
495 + /* PPC requires a cacheable/non-coherent mapping of the portal */
496 + va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]),
497 + (pgprot_val(PAGE_KERNEL) & ~_PAGE_COHERENT));
498 +#else
499 + /* For ARM we can use write combine mapping. */
500 + va = ioremap_wc(addr_phys[0]->start, resource_size(addr_phys[0]));
501 +#endif
502 if (!va) {
503 dev_err(dev, "ioremap::CE failed\n");
504 goto err_ioremap1;
505 @@ -270,8 +277,7 @@ static int qman_portal_probe(struct plat
506
507 pcfg->addr_virt[DPAA_PORTAL_CE] = va;
508
509 - va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
510 - _PAGE_GUARDED | _PAGE_NO_CACHE);
511 + va = ioremap(addr_phys[1]->start, resource_size(addr_phys[1]));
512 if (!va) {
513 dev_err(dev, "ioremap::CI failed\n");
514 goto err_ioremap2;
515 --- a/drivers/soc/fsl/qbman/qman_priv.h
516 +++ b/drivers/soc/fsl/qbman/qman_priv.h
517 @@ -28,13 +28,13 @@
518 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
519 */
520
521 -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
522 -
523 #include "dpaa_sys.h"
524
525 #include <soc/fsl/qman.h>
526 #include <linux/dma-mapping.h>
527 #include <linux/iommu.h>
528 +#include <linux/dma-contiguous.h>
529 +#include <linux/of_address.h>
530
531 #if defined(CONFIG_FSL_PAMU)
532 #include <asm/fsl_pamu_stash.h>
533 @@ -187,6 +187,7 @@ struct qm_portal_config {
534 #define QMAN_REV20 0x0200
535 #define QMAN_REV30 0x0300
536 #define QMAN_REV31 0x0301
537 +#define QMAN_REV32 0x0302
538 extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */
539
540 #define QM_FQID_RANGE_START 1 /* FQID 0 reserved for internal use */
541 --- a/drivers/soc/fsl/qbman/qman_test.h
542 +++ b/drivers/soc/fsl/qbman/qman_test.h
543 @@ -30,7 +30,5 @@
544
545 #include "qman_priv.h"
546
547 -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
548 -
549 int qman_test_stash(void);
550 int qman_test_api(void);