1 From 2c58080407554e1bac8fd50d23cb02420524caed Mon Sep 17 00:00:00 2001
2 From: Felix Fietkau <nbd@openwrt.org>
3 Date: Mon, 12 Aug 2013 12:50:22 +0200
4 Subject: [PATCH] MIPS: partially inline dma ops
6 Several DMA ops are no-op on many platforms, and the indirection through
7 the mips_dma_map_ops function table is causing the compiler to emit
10 Inlining visibly improves network performance in my tests (on a 24Kc
11 based system), and also slightly reduces code size of a few drivers.
13 Signed-off-by: Felix Fietkau <nbd@openwrt.org>
15 arch/mips/Kconfig | 4 +
16 arch/mips/include/asm/dma-mapping.h | 360 +++++++++++++++++++++++++++++++++++-
17 arch/mips/mm/dma-default.c | 163 ++--------------
18 3 files changed, 373 insertions(+), 154 deletions(-)
20 --- a/arch/mips/Kconfig
21 +++ b/arch/mips/Kconfig
22 @@ -1430,6 +1430,7 @@ config CPU_CAVIUM_OCTEON
25 select USB_EHCI_BIG_ENDIAN_MMIO
26 + select SYS_HAS_DMA_OPS
28 The Cavium Octeon processor is a highly integrated chip containing
29 many ethernet hardware widgets for networking tasks. The processor
30 @@ -1650,6 +1651,9 @@ config SYS_HAS_CPU_XLR
31 config SYS_HAS_CPU_XLP
34 +config SYS_HAS_DMA_OPS
38 # CPU may reorder R->R, R->W, W->R, W->W
39 # Reordering beyond LL and SC is handled in WEAK_REORDERING_BEYOND_LLSC
40 --- a/arch/mips/include/asm/dma-mapping.h
41 +++ b/arch/mips/include/asm/dma-mapping.h
43 #ifndef _ASM_DMA_MAPPING_H
44 #define _ASM_DMA_MAPPING_H
46 +#include <linux/kmemcheck.h>
47 +#include <linux/bug.h>
48 +#include <linux/scatterlist.h>
49 +#include <linux/dma-debug.h>
50 +#include <linux/dma-attrs.h>
52 #include <asm/scatterlist.h>
53 #include <asm/dma-coherence.h>
54 #include <asm/cache.h>
56 #include <dma-coherence.h>
59 -extern struct dma_map_ops *mips_dma_map_ops;
60 +void __dma_sync(struct page *page, unsigned long offset, size_t size,
61 + enum dma_data_direction direction);
62 +void *mips_dma_alloc_coherent(struct device *dev, size_t size,
63 + dma_addr_t *dma_handle, gfp_t gfp,
64 + struct dma_attrs *attrs);
65 +void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
66 + dma_addr_t dma_handle, struct dma_attrs *attrs);
68 static inline struct dma_map_ops *get_dma_ops(struct device *dev)
70 +#ifdef CONFIG_SYS_HAS_DMA_OPS
71 if (dev && dev->archdata.dma_ops)
72 return dev->archdata.dma_ops;
74 return mips_dma_map_ops;
81 + * Warning on the terminology - Linux calls an uncached area coherent;
82 + * MIPS terminology calls memory areas with hardware maintained coherency
86 +static inline int cpu_is_noncoherent_r10000(struct device *dev)
88 +#ifndef CONFIG_SYS_HAS_CPU_R10000
91 + return !plat_device_is_coherent(dev) &&
92 + (current_cpu_type() == CPU_R10000 ||
93 + current_cpu_type() == CPU_R12000);
96 +static inline struct page *dma_addr_to_page(struct device *dev,
97 + dma_addr_t dma_addr)
100 + plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT);
103 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
104 @@ -30,12 +69,309 @@ static inline bool dma_capable(struct de
106 static inline void dma_mark_clean(void *addr, size_t size) {}
108 -#include <asm-generic/dma-mapping-common.h>
109 +static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
111 + enum dma_data_direction dir,
112 + struct dma_attrs *attrs)
114 + struct dma_map_ops *ops = get_dma_ops(dev);
115 + unsigned long offset = (unsigned long)ptr & ~PAGE_MASK;
116 + struct page *page = virt_to_page(ptr);
119 + kmemcheck_mark_initialized(ptr, size);
120 + BUG_ON(!valid_dma_direction(dir));
122 + addr = ops->map_page(dev, page, offset, size, dir, attrs);
124 + if (!plat_device_is_coherent(dev))
125 + __dma_sync(page, offset, size, dir);
127 + addr = plat_map_dma_mem_page(dev, page) + offset;
129 + debug_dma_map_page(dev, page, offset, size, dir, addr, true);
133 +static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
135 + enum dma_data_direction dir,
136 + struct dma_attrs *attrs)
138 + struct dma_map_ops *ops = get_dma_ops(dev);
140 + BUG_ON(!valid_dma_direction(dir));
142 + ops->unmap_page(dev, addr, size, dir, attrs);
144 + if (cpu_is_noncoherent_r10000(dev))
145 + __dma_sync(dma_addr_to_page(dev, addr),
146 + addr & ~PAGE_MASK, size, dir);
148 + plat_unmap_dma_mem(dev, addr, size, dir);
150 + debug_dma_unmap_page(dev, addr, size, dir, true);
153 +static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
154 + int nents, enum dma_data_direction dir,
155 + struct dma_attrs *attrs)
157 + struct dma_map_ops *ops = get_dma_ops(dev);
159 + struct scatterlist *s;
161 + for_each_sg(sg, s, nents, i)
162 + kmemcheck_mark_initialized(sg_virt(s), s->length);
163 + BUG_ON(!valid_dma_direction(dir));
165 + ents = ops->map_sg(dev, sg, nents, dir, attrs);
167 + for_each_sg(sg, s, nents, i) {
168 + struct page *page = sg_page(s);
170 + if (!plat_device_is_coherent(dev))
171 + __dma_sync(page, s->offset, s->length, dir);
173 + plat_map_dma_mem_page(dev, page) + s->offset;
177 + debug_dma_map_sg(dev, sg, nents, ents, dir);
182 +static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
183 + int nents, enum dma_data_direction dir,
184 + struct dma_attrs *attrs)
186 + struct dma_map_ops *ops = get_dma_ops(dev);
187 + struct scatterlist *s;
190 + BUG_ON(!valid_dma_direction(dir));
191 + debug_dma_unmap_sg(dev, sg, nents, dir);
193 + ops->unmap_sg(dev, sg, nents, dir, attrs);
197 + for_each_sg(sg, s, nents, i) {
198 + if (!plat_device_is_coherent(dev) && dir != DMA_TO_DEVICE)
199 + __dma_sync(sg_page(s), s->offset, s->length, dir);
200 + plat_unmap_dma_mem(dev, s->dma_address, s->length, dir);
204 +static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
205 + size_t offset, size_t size,
206 + enum dma_data_direction dir)
208 + struct dma_map_ops *ops = get_dma_ops(dev);
211 + kmemcheck_mark_initialized(page_address(page) + offset, size);
212 + BUG_ON(!valid_dma_direction(dir));
214 + addr = ops->map_page(dev, page, offset, size, dir, NULL);
216 + if (!plat_device_is_coherent(dev))
217 + __dma_sync(page, offset, size, dir);
219 + addr = plat_map_dma_mem_page(dev, page) + offset;
221 + debug_dma_map_page(dev, page, offset, size, dir, addr, false);
226 +static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
227 + size_t size, enum dma_data_direction dir)
229 + struct dma_map_ops *ops = get_dma_ops(dev);
231 + BUG_ON(!valid_dma_direction(dir));
233 + ops->unmap_page(dev, addr, size, dir, NULL);
235 + if (cpu_is_noncoherent_r10000(dev))
236 + __dma_sync(dma_addr_to_page(dev, addr),
237 + addr & ~PAGE_MASK, size, dir);
239 + plat_unmap_dma_mem(dev, addr, size, dir);
241 + debug_dma_unmap_page(dev, addr, size, dir, false);
244 +static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
246 + enum dma_data_direction dir)
248 + struct dma_map_ops *ops = get_dma_ops(dev);
250 + BUG_ON(!valid_dma_direction(dir));
252 + ops->sync_single_for_cpu(dev, addr, size, dir);
253 + else if (cpu_is_noncoherent_r10000(dev))
254 + __dma_sync(dma_addr_to_page(dev, addr),
255 + addr & ~PAGE_MASK, size, dir);
256 + debug_dma_sync_single_for_cpu(dev, addr, size, dir);
259 +static inline void dma_sync_single_for_device(struct device *dev,
260 + dma_addr_t addr, size_t size,
261 + enum dma_data_direction dir)
263 + struct dma_map_ops *ops = get_dma_ops(dev);
265 + BUG_ON(!valid_dma_direction(dir));
267 + ops->sync_single_for_device(dev, addr, size, dir);
268 + else if (!plat_device_is_coherent(dev))
269 + __dma_sync(dma_addr_to_page(dev, addr),
270 + addr & ~PAGE_MASK, size, dir);
271 + debug_dma_sync_single_for_device(dev, addr, size, dir);
274 +static inline void dma_sync_single_range_for_cpu(struct device *dev,
276 + unsigned long offset,
278 + enum dma_data_direction dir)
280 + const struct dma_map_ops *ops = get_dma_ops(dev);
282 + BUG_ON(!valid_dma_direction(dir));
284 + ops->sync_single_for_cpu(dev, addr + offset, size, dir);
285 + else if (cpu_is_noncoherent_r10000(dev))
286 + __dma_sync(dma_addr_to_page(dev, addr + offset),
287 + (addr + offset) & ~PAGE_MASK, size, dir);
288 + debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
291 +static inline void dma_sync_single_range_for_device(struct device *dev,
293 + unsigned long offset,
295 + enum dma_data_direction dir)
297 + const struct dma_map_ops *ops = get_dma_ops(dev);
299 + BUG_ON(!valid_dma_direction(dir));
301 + ops->sync_single_for_device(dev, addr + offset, size, dir);
302 + else if (!plat_device_is_coherent(dev))
303 + __dma_sync(dma_addr_to_page(dev, addr + offset),
304 + (addr + offset) & ~PAGE_MASK, size, dir);
305 + debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
309 +dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
310 + int nelems, enum dma_data_direction dir)
312 + struct dma_map_ops *ops = get_dma_ops(dev);
313 + struct scatterlist *s;
316 + BUG_ON(!valid_dma_direction(dir));
318 + ops->sync_sg_for_cpu(dev, sg, nelems, dir);
319 + else if (cpu_is_noncoherent_r10000(dev)) {
320 + for_each_sg(sg, s, nelems, i)
321 + __dma_sync(sg_page(s), s->offset, s->length, dir);
323 + debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
327 +dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
328 + int nelems, enum dma_data_direction dir)
330 + struct dma_map_ops *ops = get_dma_ops(dev);
331 + struct scatterlist *s;
334 + BUG_ON(!valid_dma_direction(dir));
336 + ops->sync_sg_for_device(dev, sg, nelems, dir);
337 + else if (!plat_device_is_coherent(dev)) {
338 + for_each_sg(sg, s, nelems, i)
339 + __dma_sync(sg_page(s), s->offset, s->length, dir);
341 + debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
345 +#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
346 +#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
347 +#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
348 +#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
350 +extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
351 + void *cpu_addr, dma_addr_t dma_addr, size_t size);
354 + * dma_mmap_attrs - map a coherent DMA allocation into user space
355 + * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
356 + * @vma: vm_area_struct describing requested user mapping
357 + * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
358 + * @handle: device-view address returned from dma_alloc_attrs
359 + * @size: size of memory originally requested in dma_alloc_attrs
360 + * @attrs: attributes of mapping properties requested in dma_alloc_attrs
362 + * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
363 + * into user space. The coherent DMA buffer must not be freed by the
364 + * driver until the user space mapping has been released.
367 +dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
368 + dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
370 + struct dma_map_ops *ops = get_dma_ops(dev);
372 + if (ops && ops->mmap)
373 + return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
374 + return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
377 +#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
379 +static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
380 + void *cpu_addr, dma_addr_t dma_addr, size_t size)
382 + DEFINE_DMA_ATTRS(attrs);
383 + dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
384 + return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
388 +dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
389 + void *cpu_addr, dma_addr_t dma_addr, size_t size);
392 +dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
393 + dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
395 + struct dma_map_ops *ops = get_dma_ops(dev);
397 + if (ops && ops->get_sgtable)
398 + return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
400 + return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
403 +#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL)
406 static inline int dma_supported(struct device *dev, u64 mask)
408 struct dma_map_ops *ops = get_dma_ops(dev);
409 - return ops->dma_supported(dev, mask);
411 + return ops->dma_supported(dev, mask);
412 + return plat_dma_supported(dev, mask);
415 static inline int dma_mapping_error(struct device *dev, u64 mask)
416 @@ -43,7 +379,9 @@ static inline int dma_mapping_error(stru
417 struct dma_map_ops *ops = get_dma_ops(dev);
419 debug_dma_mapping_error(dev, mask);
420 - return ops->mapping_error(dev, mask);
422 + return ops->mapping_error(dev, mask);
427 @@ -69,7 +407,11 @@ static inline void *dma_alloc_attrs(stru
429 struct dma_map_ops *ops = get_dma_ops(dev);
431 - ret = ops->alloc(dev, size, dma_handle, gfp, attrs);
433 + ret = ops->alloc(dev, size, dma_handle, gfp, attrs);
435 + ret = mips_dma_alloc_coherent(dev, size, dma_handle, gfp,
438 debug_dma_alloc_coherent(dev, size, *dma_handle, ret);
440 @@ -84,7 +426,10 @@ static inline void dma_free_attrs(struct
442 struct dma_map_ops *ops = get_dma_ops(dev);
444 - ops->free(dev, size, vaddr, dma_handle, attrs);
446 + ops->free(dev, size, vaddr, dma_handle, attrs);
448 + mips_dma_free_coherent(dev, size, vaddr, dma_handle, attrs);
450 debug_dma_free_coherent(dev, size, vaddr, dma_handle);
452 --- a/arch/mips/mm/dma-default.c
453 +++ b/arch/mips/mm/dma-default.c
456 #ifdef CONFIG_DMA_MAYBE_COHERENT
457 int coherentio = 0; /* User defined DMA coherency from command line. */
458 -EXPORT_SYMBOL_GPL(coherentio);
459 +EXPORT_SYMBOL(coherentio);
460 int hw_coherentio = 0; /* Actual hardware supported DMA coherency setting. */
462 static int __init setcoherentio(char *str)
463 @@ -44,26 +44,6 @@ static int __init setnocoherentio(char *
464 early_param("nocoherentio", setnocoherentio);
467 -static inline struct page *dma_addr_to_page(struct device *dev,
468 - dma_addr_t dma_addr)
470 - return pfn_to_page(
471 - plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT);
475 - * Warning on the terminology - Linux calls an uncached area coherent;
476 - * MIPS terminology calls memory areas with hardware maintained coherency
480 -static inline int cpu_is_noncoherent_r10000(struct device *dev)
482 - return !plat_device_is_coherent(dev) &&
483 - (current_cpu_type() == CPU_R10000 ||
484 - current_cpu_type() == CPU_R12000);
487 static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
490 @@ -119,8 +99,9 @@ void *dma_alloc_noncoherent(struct devic
492 EXPORT_SYMBOL(dma_alloc_noncoherent);
494 -static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
495 - dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs)
496 +void *mips_dma_alloc_coherent(struct device *dev, size_t size,
497 + dma_addr_t *dma_handle, gfp_t gfp,
498 + struct dma_attrs *attrs)
502 @@ -144,6 +125,7 @@ static void *mips_dma_alloc_coherent(str
506 +EXPORT_SYMBOL(mips_dma_alloc_coherent);
509 void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
510 @@ -154,8 +136,8 @@ void dma_free_noncoherent(struct device
512 EXPORT_SYMBOL(dma_free_noncoherent);
514 -static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
515 - dma_addr_t dma_handle, struct dma_attrs *attrs)
516 +void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
517 + dma_addr_t dma_handle, struct dma_attrs *attrs)
519 unsigned long addr = (unsigned long) vaddr;
520 int order = get_order(size);
521 @@ -170,6 +152,7 @@ static void mips_dma_free_coherent(struc
523 free_pages(addr, get_order(size));
525 +EXPORT_SYMBOL(mips_dma_free_coherent);
527 static inline void __dma_sync_virtual(void *addr, size_t size,
528 enum dma_data_direction direction)
529 @@ -198,8 +181,8 @@ static inline void __dma_sync_virtual(vo
530 * If highmem is not configured then the bulk of this loop gets
533 -static inline void __dma_sync(struct page *page,
534 - unsigned long offset, size_t size, enum dma_data_direction direction)
535 +void __dma_sync(struct page *page, unsigned long offset, size_t size,
536 + enum dma_data_direction direction)
540 @@ -228,109 +211,7 @@ static inline void __dma_sync(struct pag
545 -static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
546 - size_t size, enum dma_data_direction direction, struct dma_attrs *attrs)
548 - if (cpu_is_noncoherent_r10000(dev))
549 - __dma_sync(dma_addr_to_page(dev, dma_addr),
550 - dma_addr & ~PAGE_MASK, size, direction);
552 - plat_unmap_dma_mem(dev, dma_addr, size, direction);
555 -static int mips_dma_map_sg(struct device *dev, struct scatterlist *sg,
556 - int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
560 - for (i = 0; i < nents; i++, sg++) {
561 - if (!plat_device_is_coherent(dev))
562 - __dma_sync(sg_page(sg), sg->offset, sg->length,
564 - sg->dma_address = plat_map_dma_mem_page(dev, sg_page(sg)) +
571 -static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
572 - unsigned long offset, size_t size, enum dma_data_direction direction,
573 - struct dma_attrs *attrs)
575 - if (!plat_device_is_coherent(dev))
576 - __dma_sync(page, offset, size, direction);
578 - return plat_map_dma_mem_page(dev, page) + offset;
581 -static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
582 - int nhwentries, enum dma_data_direction direction,
583 - struct dma_attrs *attrs)
587 - for (i = 0; i < nhwentries; i++, sg++) {
588 - if (!plat_device_is_coherent(dev) &&
589 - direction != DMA_TO_DEVICE)
590 - __dma_sync(sg_page(sg), sg->offset, sg->length,
592 - plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
596 -static void mips_dma_sync_single_for_cpu(struct device *dev,
597 - dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
599 - if (cpu_is_noncoherent_r10000(dev))
600 - __dma_sync(dma_addr_to_page(dev, dma_handle),
601 - dma_handle & ~PAGE_MASK, size, direction);
604 -static void mips_dma_sync_single_for_device(struct device *dev,
605 - dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
607 - if (!plat_device_is_coherent(dev))
608 - __dma_sync(dma_addr_to_page(dev, dma_handle),
609 - dma_handle & ~PAGE_MASK, size, direction);
612 -static void mips_dma_sync_sg_for_cpu(struct device *dev,
613 - struct scatterlist *sg, int nelems, enum dma_data_direction direction)
617 - /* Make sure that gcc doesn't leave the empty loop body. */
618 - for (i = 0; i < nelems; i++, sg++) {
619 - if (cpu_is_noncoherent_r10000(dev))
620 - __dma_sync(sg_page(sg), sg->offset, sg->length,
625 -static void mips_dma_sync_sg_for_device(struct device *dev,
626 - struct scatterlist *sg, int nelems, enum dma_data_direction direction)
630 - /* Make sure that gcc doesn't leave the empty loop body. */
631 - for (i = 0; i < nelems; i++, sg++) {
632 - if (!plat_device_is_coherent(dev))
633 - __dma_sync(sg_page(sg), sg->offset, sg->length,
638 -int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
643 -int mips_dma_supported(struct device *dev, u64 mask)
645 - return plat_dma_supported(dev, mask);
647 +EXPORT_SYMBOL(__dma_sync);
649 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
650 enum dma_data_direction direction)
651 @@ -343,23 +224,10 @@ void dma_cache_sync(struct device *dev,
653 EXPORT_SYMBOL(dma_cache_sync);
655 -static struct dma_map_ops mips_default_dma_map_ops = {
656 - .alloc = mips_dma_alloc_coherent,
657 - .free = mips_dma_free_coherent,
658 - .map_page = mips_dma_map_page,
659 - .unmap_page = mips_dma_unmap_page,
660 - .map_sg = mips_dma_map_sg,
661 - .unmap_sg = mips_dma_unmap_sg,
662 - .sync_single_for_cpu = mips_dma_sync_single_for_cpu,
663 - .sync_single_for_device = mips_dma_sync_single_for_device,
664 - .sync_sg_for_cpu = mips_dma_sync_sg_for_cpu,
665 - .sync_sg_for_device = mips_dma_sync_sg_for_device,
666 - .mapping_error = mips_dma_mapping_error,
667 - .dma_supported = mips_dma_supported
670 -struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
671 +#ifdef CONFIG_SYS_HAS_DMA_OPS
672 +struct dma_map_ops *mips_dma_map_ops = NULL;
673 EXPORT_SYMBOL(mips_dma_map_ops);
676 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)