layerscape: add LS1021AIOT board support
[openwrt/openwrt.git] / target / linux / layerscape / patches-4.9 / 301-arch-support-layerscape.patch
1 From 2f2a0ab9e4b3186be981f7151a4f4f794d4b6caa Mon Sep 17 00:00:00 2001
2 From: Yangbo Lu <yangbo.lu@nxp.com>
3 Date: Thu, 5 Jul 2018 16:18:37 +0800
4 Subject: [PATCH 03/32] arch: support layerscape
5 MIME-Version: 1.0
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
8
9 This is an integrated patch for layerscape arch support.
10
11 Signed-off-by: Madalin Bucur <madalin.bucur@nxp.com>
12 Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
13 Signed-off-by: Zhao Qiang <B45475@freescale.com>
14 Signed-off-by: Camelia Groza <camelia.groza@nxp.com>
15 Signed-off-by: Haiying Wang <Haiying.wang@freescale.com>
16 Signed-off-by: Pan Jiafei <Jiafei.Pan@nxp.com>
17 Signed-off-by: Po Liu <po.liu@nxp.com>
18 Signed-off-by: Bharat Bhushan <Bharat.Bhushan@nxp.com>
19 Signed-off-by: Jianhua Xie <jianhua.xie@nxp.com>
20 Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
21 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
22 ---
23 arch/arm/include/asm/delay.h | 16 +++++++
24 arch/arm/include/asm/dma-mapping.h | 6 ---
25 arch/arm/include/asm/io.h | 31 +++++++++++++
26 arch/arm/include/asm/mach/map.h | 4 +-
27 arch/arm/include/asm/pgtable.h | 7 +++
28 arch/arm/kernel/bios32.c | 43 ++++++++++++++++++
29 arch/arm/mm/dma-mapping.c | 1 +
30 arch/arm/mm/ioremap.c | 7 +++
31 arch/arm/mm/mmu.c | 9 ++++
32 arch/arm64/include/asm/cache.h | 2 +-
33 arch/arm64/include/asm/io.h | 30 +++++++++++++
34 arch/arm64/include/asm/pci.h | 4 ++
35 arch/arm64/include/asm/pgtable-prot.h | 2 +
36 arch/arm64/include/asm/pgtable.h | 5 +++
37 arch/arm64/kernel/pci.c | 62 ++++++++++++++++++++++++++
38 arch/arm64/mm/dma-mapping.c | 6 +++
39 arch/powerpc/include/asm/dma-mapping.h | 5 ---
40 arch/tile/include/asm/dma-mapping.h | 5 ---
41 18 files changed, 226 insertions(+), 19 deletions(-)
42
43 --- a/arch/arm/include/asm/delay.h
44 +++ b/arch/arm/include/asm/delay.h
45 @@ -57,6 +57,22 @@ extern void __bad_udelay(void);
46 __const_udelay((n) * UDELAY_MULT)) : \
47 __udelay(n))
48
49 +#define spin_event_timeout(condition, timeout, delay) \
50 +({ \
51 + typeof(condition) __ret; \
52 + int i = 0; \
53 + while (!(__ret = (condition)) && (i++ < timeout)) { \
54 + if (delay) \
55 + udelay(delay); \
56 + else \
57 + cpu_relax(); \
58 + udelay(1); \
59 + } \
60 + if (!__ret) \
61 + __ret = (condition); \
62 + __ret; \
63 +})
64 +
65 /* Loop-based definitions for assembly code. */
66 extern void __loop_delay(unsigned long loops);
67 extern void __loop_udelay(unsigned long usecs);
68 --- a/arch/arm/include/asm/dma-mapping.h
69 +++ b/arch/arm/include/asm/dma-mapping.h
70 @@ -31,12 +31,6 @@ static inline struct dma_map_ops *get_dm
71 return __generic_dma_ops(dev);
72 }
73
74 -static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
75 -{
76 - BUG_ON(!dev);
77 - dev->archdata.dma_ops = ops;
78 -}
79 -
80 #define HAVE_ARCH_DMA_SUPPORTED 1
81 extern int dma_supported(struct device *dev, u64 mask);
82
83 --- a/arch/arm/include/asm/io.h
84 +++ b/arch/arm/include/asm/io.h
85 @@ -129,6 +129,7 @@ static inline u32 __raw_readl(const vola
86 #define MT_DEVICE_NONSHARED 1
87 #define MT_DEVICE_CACHED 2
88 #define MT_DEVICE_WC 3
89 +#define MT_MEMORY_RW_NS 4
90 /*
91 * types 4 onwards can be found in asm/mach/map.h and are undefined
92 * for ioremap
93 @@ -220,6 +221,34 @@ extern int pci_ioremap_io(unsigned int o
94 #endif
95 #endif
96
97 +/* access ports */
98 +#define setbits32(_addr, _v) iowrite32be(ioread32be(_addr) | (_v), (_addr))
99 +#define clrbits32(_addr, _v) iowrite32be(ioread32be(_addr) & ~(_v), (_addr))
100 +
101 +#define setbits16(_addr, _v) iowrite16be(ioread16be(_addr) | (_v), (_addr))
102 +#define clrbits16(_addr, _v) iowrite16be(ioread16be(_addr) & ~(_v), (_addr))
103 +
104 +#define setbits8(_addr, _v) iowrite8(ioread8(_addr) | (_v), (_addr))
105 +#define clrbits8(_addr, _v) iowrite8(ioread8(_addr) & ~(_v), (_addr))
106 +
107 +/* Clear and set bits in one shot. These macros can be used to clear and
108 + * set multiple bits in a register using a single read-modify-write. These
109 + * macros can also be used to set a multiple-bit bit pattern using a mask,
110 + * by specifying the mask in the 'clear' parameter and the new bit pattern
111 + * in the 'set' parameter.
112 + */
113 +
114 +#define clrsetbits_be32(addr, clear, set) \
115 + iowrite32be((ioread32be(addr) & ~(clear)) | (set), (addr))
116 +#define clrsetbits_le32(addr, clear, set) \
117 + iowrite32le((ioread32le(addr) & ~(clear)) | (set), (addr))
118 +#define clrsetbits_be16(addr, clear, set) \
119 + iowrite16be((ioread16be(addr) & ~(clear)) | (set), (addr))
120 +#define clrsetbits_le16(addr, clear, set) \
121 + iowrite16le((ioread16le(addr) & ~(clear)) | (set), (addr))
122 +#define clrsetbits_8(addr, clear, set) \
123 + iowrite8((ioread8(addr) & ~(clear)) | (set), (addr))
124 +
125 /*
126 * IO port access primitives
127 * -------------------------
128 @@ -408,6 +437,8 @@ void __iomem *ioremap_wc(resource_size_t
129 #define ioremap_wc ioremap_wc
130 #define ioremap_wt ioremap_wc
131
132 +void __iomem *ioremap_cache_ns(resource_size_t res_cookie, size_t size);
133 +
134 void iounmap(volatile void __iomem *iomem_cookie);
135 #define iounmap iounmap
136
137 --- a/arch/arm/include/asm/mach/map.h
138 +++ b/arch/arm/include/asm/mach/map.h
139 @@ -21,9 +21,9 @@ struct map_desc {
140 unsigned int type;
141 };
142
143 -/* types 0-3 are defined in asm/io.h */
144 +/* types 0-4 are defined in asm/io.h */
145 enum {
146 - MT_UNCACHED = 4,
147 + MT_UNCACHED = 5,
148 MT_CACHECLEAN,
149 MT_MINICLEAN,
150 MT_LOW_VECTORS,
151 --- a/arch/arm/include/asm/pgtable.h
152 +++ b/arch/arm/include/asm/pgtable.h
153 @@ -118,6 +118,13 @@ extern pgprot_t pgprot_s2_device;
154 #define pgprot_noncached(prot) \
155 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
156
157 +#define pgprot_cached(prot) \
158 + __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_DEV_CACHED)
159 +
160 +#define pgprot_cached_ns(prot) \
161 + __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_DEV_CACHED | \
162 + L_PTE_MT_DEV_NONSHARED)
163 +
164 #define pgprot_writecombine(prot) \
165 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
166
167 --- a/arch/arm/kernel/bios32.c
168 +++ b/arch/arm/kernel/bios32.c
169 @@ -11,6 +11,8 @@
170 #include <linux/slab.h>
171 #include <linux/init.h>
172 #include <linux/io.h>
173 +#include <linux/of_irq.h>
174 +#include <linux/pcieport_if.h>
175
176 #include <asm/mach-types.h>
177 #include <asm/mach/map.h>
178 @@ -64,6 +66,47 @@ void pcibios_report_status(u_int status_
179 }
180
181 /*
182 + * Check device tree if the service interrupts are there
183 + */
184 +int pcibios_check_service_irqs(struct pci_dev *dev, int *irqs, int mask)
185 +{
186 + int ret, count = 0;
187 + struct device_node *np = NULL;
188 +
189 + if (dev->bus->dev.of_node)
190 + np = dev->bus->dev.of_node;
191 +
192 + if (np == NULL)
193 + return 0;
194 +
195 + if (!IS_ENABLED(CONFIG_OF_IRQ))
196 + return 0;
197 +
198 + /* If root port doesn't support MSI/MSI-X/INTx in RC mode,
199 + * request irq for aer
200 + */
201 + if (mask & PCIE_PORT_SERVICE_AER) {
202 + ret = of_irq_get_byname(np, "aer");
203 + if (ret > 0) {
204 + irqs[PCIE_PORT_SERVICE_AER_SHIFT] = ret;
205 + count++;
206 + }
207 + }
208 +
209 + if (mask & PCIE_PORT_SERVICE_PME) {
210 + ret = of_irq_get_byname(np, "pme");
211 + if (ret > 0) {
212 + irqs[PCIE_PORT_SERVICE_PME_SHIFT] = ret;
213 + count++;
214 + }
215 + }
216 +
217 + /* TODO: add more service interrupts if there it is in the device tree*/
218 +
219 + return count;
220 +}
221 +
222 +/*
223 * We don't use this to fix the device, but initialisation of it.
224 * It's not the correct use for this, but it works.
225 * Note that the arbiter/ISA bridge appears to be buggy, specifically in
226 --- a/arch/arm/mm/dma-mapping.c
227 +++ b/arch/arm/mm/dma-mapping.c
228 @@ -2410,6 +2410,7 @@ void arch_setup_dma_ops(struct device *d
229
230 set_dma_ops(dev, dma_ops);
231 }
232 +EXPORT_SYMBOL(arch_setup_dma_ops);
233
234 void arch_teardown_dma_ops(struct device *dev)
235 {
236 --- a/arch/arm/mm/ioremap.c
237 +++ b/arch/arm/mm/ioremap.c
238 @@ -398,6 +398,13 @@ void __iomem *ioremap_wc(resource_size_t
239 }
240 EXPORT_SYMBOL(ioremap_wc);
241
242 +void __iomem *ioremap_cache_ns(resource_size_t res_cookie, size_t size)
243 +{
244 + return arch_ioremap_caller(res_cookie, size, MT_MEMORY_RW_NS,
245 + __builtin_return_address(0));
246 +}
247 +EXPORT_SYMBOL(ioremap_cache_ns);
248 +
249 /*
250 * Remap an arbitrary physical address space into the kernel virtual
251 * address space as memory. Needed when the kernel wants to execute
252 --- a/arch/arm/mm/mmu.c
253 +++ b/arch/arm/mm/mmu.c
254 @@ -313,6 +313,13 @@ static struct mem_type mem_types[] __ro_
255 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
256 .domain = DOMAIN_KERNEL,
257 },
258 + [MT_MEMORY_RW_NS] = {
259 + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
260 + L_PTE_XN,
261 + .prot_l1 = PMD_TYPE_TABLE,
262 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_XN,
263 + .domain = DOMAIN_KERNEL,
264 + },
265 [MT_ROM] = {
266 .prot_sect = PMD_TYPE_SECT,
267 .domain = DOMAIN_KERNEL,
268 @@ -644,6 +651,7 @@ static void __init build_mem_type_table(
269 }
270 kern_pgprot |= PTE_EXT_AF;
271 vecs_pgprot |= PTE_EXT_AF;
272 + mem_types[MT_MEMORY_RW_NS].prot_pte |= PTE_EXT_AF | cp->pte;
273
274 /*
275 * Set PXN for user mappings
276 @@ -672,6 +680,7 @@ static void __init build_mem_type_table(
277 mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
278 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
279 mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
280 + mem_types[MT_MEMORY_RW_NS].prot_sect |= ecc_mask | cp->pmd;
281 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
282 mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
283 mem_types[MT_ROM].prot_sect |= cp->pmd;
284 --- a/arch/arm64/include/asm/cache.h
285 +++ b/arch/arm64/include/asm/cache.h
286 @@ -18,7 +18,7 @@
287
288 #include <asm/cachetype.h>
289
290 -#define L1_CACHE_SHIFT 7
291 +#define L1_CACHE_SHIFT 6
292 #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
293
294 /*
295 --- a/arch/arm64/include/asm/io.h
296 +++ b/arch/arm64/include/asm/io.h
297 @@ -171,6 +171,8 @@ extern void __iomem *ioremap_cache(phys_
298 #define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
299 #define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC))
300 #define ioremap_wt(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
301 +#define ioremap_cache_ns(addr, size) __ioremap((addr), (size), \
302 + __pgprot(PROT_NORMAL_NS))
303 #define iounmap __iounmap
304
305 /*
306 @@ -184,6 +186,34 @@ extern void __iomem *ioremap_cache(phys_
307 #define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); })
308 #define iowrite64be(v,p) ({ __iowmb(); __raw_writeq((__force __u64)cpu_to_be64(v), p); })
309
310 +/* access ports */
311 +#define setbits32(_addr, _v) iowrite32be(ioread32be(_addr) | (_v), (_addr))
312 +#define clrbits32(_addr, _v) iowrite32be(ioread32be(_addr) & ~(_v), (_addr))
313 +
314 +#define setbits16(_addr, _v) iowrite16be(ioread16be(_addr) | (_v), (_addr))
315 +#define clrbits16(_addr, _v) iowrite16be(ioread16be(_addr) & ~(_v), (_addr))
316 +
317 +#define setbits8(_addr, _v) iowrite8(ioread8(_addr) | (_v), (_addr))
318 +#define clrbits8(_addr, _v) iowrite8(ioread8(_addr) & ~(_v), (_addr))
319 +
320 +/* Clear and set bits in one shot. These macros can be used to clear and
321 + * set multiple bits in a register using a single read-modify-write. These
322 + * macros can also be used to set a multiple-bit bit pattern using a mask,
323 + * by specifying the mask in the 'clear' parameter and the new bit pattern
324 + * in the 'set' parameter.
325 + */
326 +
327 +#define clrsetbits_be32(addr, clear, set) \
328 + iowrite32be((ioread32be(addr) & ~(clear)) | (set), (addr))
329 +#define clrsetbits_le32(addr, clear, set) \
330 + iowrite32le((ioread32le(addr) & ~(clear)) | (set), (addr))
331 +#define clrsetbits_be16(addr, clear, set) \
332 + iowrite16be((ioread16be(addr) & ~(clear)) | (set), (addr))
333 +#define clrsetbits_le16(addr, clear, set) \
334 + iowrite16le((ioread16le(addr) & ~(clear)) | (set), (addr))
335 +#define clrsetbits_8(addr, clear, set) \
336 + iowrite8((ioread8(addr) & ~(clear)) | (set), (addr))
337 +
338 #include <asm-generic/io.h>
339
340 /*
341 --- a/arch/arm64/include/asm/pci.h
342 +++ b/arch/arm64/include/asm/pci.h
343 @@ -31,6 +31,10 @@ static inline int pci_get_legacy_ide_irq
344 return -ENODEV;
345 }
346
347 +#define HAVE_PCI_MMAP
348 +extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
349 + enum pci_mmap_state mmap_state,
350 + int write_combine);
351 static inline int pci_proc_domain(struct pci_bus *bus)
352 {
353 return 1;
354 --- a/arch/arm64/include/asm/pgtable-prot.h
355 +++ b/arch/arm64/include/asm/pgtable-prot.h
356 @@ -48,6 +48,7 @@
357 #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC))
358 #define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT))
359 #define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
360 +#define PROT_NORMAL_NS (PTE_TYPE_PAGE | PTE_AF | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
361
362 #define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
363 #define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
364 @@ -68,6 +69,7 @@
365 #define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
366
367 #define PAGE_S2 __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
368 +#define PAGE_S2_NS __pgprot(PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDWR | PTE_TYPE_PAGE | PTE_AF)
369 #define PAGE_S2_DEVICE __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
370
371 #define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_NG | PTE_PXN | PTE_UXN)
372 --- a/arch/arm64/include/asm/pgtable.h
373 +++ b/arch/arm64/include/asm/pgtable.h
374 @@ -370,6 +370,11 @@ static inline int pmd_protnone(pmd_t pmd
375 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
376 #define pgprot_writecombine(prot) \
377 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
378 +#define pgprot_cached(prot) \
379 + __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL) | \
380 + PTE_PXN | PTE_UXN)
381 +#define pgprot_cached_ns(prot) \
382 + __pgprot(pgprot_val(pgprot_cached(prot)) ^ PTE_SHARED)
383 #define pgprot_device(prot) \
384 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
385 #define __HAVE_PHYS_MEM_ACCESS_PROT
386 --- a/arch/arm64/kernel/pci.c
387 +++ b/arch/arm64/kernel/pci.c
388 @@ -17,6 +17,8 @@
389 #include <linux/mm.h>
390 #include <linux/of_pci.h>
391 #include <linux/of_platform.h>
392 +#include <linux/of_irq.h>
393 +#include <linux/pcieport_if.h>
394 #include <linux/pci.h>
395 #include <linux/pci-acpi.h>
396 #include <linux/pci-ecam.h>
397 @@ -53,6 +55,66 @@ int pcibios_alloc_irq(struct pci_dev *de
398
399 return 0;
400 }
401 +
402 +/*
403 + * Check device tree if the service interrupts are there
404 + */
405 +int pcibios_check_service_irqs(struct pci_dev *dev, int *irqs, int mask)
406 +{
407 + int ret, count = 0;
408 + struct device_node *np = NULL;
409 +
410 + if (dev->bus->dev.of_node)
411 + np = dev->bus->dev.of_node;
412 +
413 + if (np == NULL)
414 + return 0;
415 +
416 + if (!IS_ENABLED(CONFIG_OF_IRQ))
417 + return 0;
418 +
419 + /* If root port doesn't support MSI/MSI-X/INTx in RC mode,
420 + * request irq for aer
421 + */
422 + if (mask & PCIE_PORT_SERVICE_AER) {
423 + ret = of_irq_get_byname(np, "aer");
424 + if (ret > 0) {
425 + irqs[PCIE_PORT_SERVICE_AER_SHIFT] = ret;
426 + count++;
427 + }
428 + }
429 +
430 + if (mask & PCIE_PORT_SERVICE_PME) {
431 + ret = of_irq_get_byname(np, "pme");
432 + if (ret > 0) {
433 + irqs[PCIE_PORT_SERVICE_PME_SHIFT] = ret;
434 + count++;
435 + }
436 + }
437 +
438 + /* TODO: add more service interrupts if there it is in the device tree*/
439 +
440 + return count;
441 +}
442 +
443 +int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
444 + enum pci_mmap_state mmap_state, int write_combine)
445 +{
446 + if (mmap_state == pci_mmap_io)
447 + return -EINVAL;
448 +
449 + /*
450 + * Mark this as IO
451 + */
452 + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
453 +
454 + if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
455 + vma->vm_end - vma->vm_start,
456 + vma->vm_page_prot))
457 + return -EAGAIN;
458 +
459 + return 0;
460 +}
461
462 /*
463 * raw_pci_read/write - Platform-specific PCI config space access.
464 --- a/arch/arm64/mm/dma-mapping.c
465 +++ b/arch/arm64/mm/dma-mapping.c
466 @@ -30,6 +30,7 @@
467 #include <linux/swiotlb.h>
468
469 #include <asm/cacheflush.h>
470 +#include <linux/fsl/mc.h>
471
472 static int swiotlb __ro_after_init;
473
474 @@ -925,6 +926,10 @@ static int __init __iommu_dma_init(void)
475 if (!ret)
476 ret = register_iommu_dma_ops_notifier(&pci_bus_type);
477 #endif
478 +#ifdef CONFIG_FSL_MC_BUS
479 + if (!ret)
480 + ret = register_iommu_dma_ops_notifier(&fsl_mc_bus_type);
481 +#endif
482 return ret;
483 }
484 arch_initcall(__iommu_dma_init);
485 @@ -978,3 +983,4 @@ void arch_setup_dma_ops(struct device *d
486 dev->archdata.dma_coherent = coherent;
487 __iommu_setup_dma_ops(dev, dma_base, size, iommu);
488 }
489 +EXPORT_SYMBOL(arch_setup_dma_ops);
490 --- a/arch/powerpc/include/asm/dma-mapping.h
491 +++ b/arch/powerpc/include/asm/dma-mapping.h
492 @@ -91,11 +91,6 @@ static inline struct dma_map_ops *get_dm
493 return dev->archdata.dma_ops;
494 }
495
496 -static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
497 -{
498 - dev->archdata.dma_ops = ops;
499 -}
500 -
501 /*
502 * get_dma_offset()
503 *
504 --- a/arch/tile/include/asm/dma-mapping.h
505 +++ b/arch/tile/include/asm/dma-mapping.h
506 @@ -59,11 +59,6 @@ static inline phys_addr_t dma_to_phys(st
507
508 static inline void dma_mark_clean(void *addr, size_t size) {}
509
510 -static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
511 -{
512 - dev->archdata.dma_ops = ops;
513 -}
514 -
515 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
516 {
517 if (!dev->dma_mask)