generic: add preliminary 3.19 support
[openwrt/staging/wigyori.git] / target / linux / generic / patches-3.19 / 132-mips_inline_dma_ops.patch
1 From 2c58080407554e1bac8fd50d23cb02420524caed Mon Sep 17 00:00:00 2001
2 From: Felix Fietkau <nbd@openwrt.org>
3 Date: Mon, 12 Aug 2013 12:50:22 +0200
4 Subject: [PATCH] MIPS: partially inline dma ops
5
6 Several DMA ops are no-op on many platforms, and the indirection through
7 the mips_dma_map_ops function table is causing the compiler to emit
8 unnecessary code.
9
10 Inlining visibly improves network performance in my tests (on a 24Kc
11 based system), and also slightly reduces code size of a few drivers.
12
13 Signed-off-by: Felix Fietkau <nbd@openwrt.org>
14 ---
15 arch/mips/Kconfig | 4 +
16 arch/mips/include/asm/dma-mapping.h | 360 +++++++++++++++++++++++++++++++++++-
17 arch/mips/mm/dma-default.c | 163 ++--------------
18 3 files changed, 373 insertions(+), 154 deletions(-)
19
20 --- a/arch/mips/Kconfig
21 +++ b/arch/mips/Kconfig
22 @@ -1486,6 +1486,7 @@ config CPU_CAVIUM_OCTEON
23 select CPU_SUPPORTS_HUGEPAGES
24 select USB_EHCI_BIG_ENDIAN_MMIO
25 select MIPS_L1_CACHE_SHIFT_7
26 + select SYS_HAS_DMA_OPS
27 help
28 The Cavium Octeon processor is a highly integrated chip containing
29 many ethernet hardware widgets for networking tasks. The processor
30 @@ -1744,6 +1745,9 @@ config MIPS_MALTA_PM
31 bool
32 default y
33
34 +config SYS_HAS_DMA_OPS
35 + bool
36 +
37 #
38 # CPU may reorder R->R, R->W, W->R, W->W
39 # Reordering beyond LL and SC is handled in WEAK_REORDERING_BEYOND_LLSC
40 --- a/arch/mips/include/asm/dma-mapping.h
41 +++ b/arch/mips/include/asm/dma-mapping.h
42 @@ -1,9 +1,16 @@
43 #ifndef _ASM_DMA_MAPPING_H
44 #define _ASM_DMA_MAPPING_H
45
46 +#include <linux/kmemcheck.h>
47 +#include <linux/bug.h>
48 +#include <linux/scatterlist.h>
49 +#include <linux/dma-debug.h>
50 +#include <linux/dma-attrs.h>
51 +
52 #include <asm/scatterlist.h>
53 #include <asm/dma-coherence.h>
54 #include <asm/cache.h>
55 +#include <asm/cpu-type.h>
56 #include <asm-generic/dma-coherent.h>
57
58 #ifndef CONFIG_SGI_IP27 /* Kludge to fix 2.6.39 build for IP27 */
59 @@ -12,12 +19,48 @@
60
61 extern struct dma_map_ops *mips_dma_map_ops;
62
63 +void __dma_sync(struct page *page, unsigned long offset, size_t size,
64 + enum dma_data_direction direction);
65 +void *mips_dma_alloc_coherent(struct device *dev, size_t size,
66 + dma_addr_t *dma_handle, gfp_t gfp,
67 + struct dma_attrs *attrs);
68 +void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
69 + dma_addr_t dma_handle, struct dma_attrs *attrs);
70 +
71 static inline struct dma_map_ops *get_dma_ops(struct device *dev)
72 {
73 +#ifdef CONFIG_SYS_HAS_DMA_OPS
74 if (dev && dev->archdata.dma_ops)
75 return dev->archdata.dma_ops;
76 else
77 return mips_dma_map_ops;
78 +#else
79 + return NULL;
80 +#endif
81 +}
82 +
83 +/*
84 + * Warning on the terminology - Linux calls an uncached area coherent;
85 + * MIPS terminology calls memory areas with hardware maintained coherency
86 + * coherent.
87 + */
88 +
89 +static inline int cpu_needs_post_dma_flush(struct device *dev)
90 +{
91 +#ifndef CONFIG_SYS_HAS_CPU_R10000
92 + return 0;
93 +#endif
94 + return !plat_device_is_coherent(dev) &&
95 + (boot_cpu_type() == CPU_R10000 ||
96 + boot_cpu_type() == CPU_R12000 ||
97 + boot_cpu_type() == CPU_BMIPS5000);
98 +}
99 +
100 +static inline struct page *dma_addr_to_page(struct device *dev,
101 + dma_addr_t dma_addr)
102 +{
103 + return pfn_to_page(
104 + plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT);
105 }
106
107 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
108 @@ -30,12 +73,304 @@ static inline bool dma_capable(struct de
109
110 static inline void dma_mark_clean(void *addr, size_t size) {}
111
112 -#include <asm-generic/dma-mapping-common.h>
113 +static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
114 + size_t size,
115 + enum dma_data_direction dir,
116 + struct dma_attrs *attrs)
117 +{
118 + struct dma_map_ops *ops = get_dma_ops(dev);
119 + unsigned long offset = (unsigned long)ptr & ~PAGE_MASK;
120 + struct page *page = virt_to_page(ptr);
121 + dma_addr_t addr;
122 +
123 + kmemcheck_mark_initialized(ptr, size);
124 + BUG_ON(!valid_dma_direction(dir));
125 + if (ops) {
126 + addr = ops->map_page(dev, page, offset, size, dir, attrs);
127 + } else {
128 + if (!plat_device_is_coherent(dev))
129 + __dma_sync(page, offset, size, dir);
130 +
131 + addr = plat_map_dma_mem_page(dev, page) + offset;
132 + }
133 + debug_dma_map_page(dev, page, offset, size, dir, addr, true);
134 + return addr;
135 +}
136 +
137 +static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
138 + size_t size,
139 + enum dma_data_direction dir,
140 + struct dma_attrs *attrs)
141 +{
142 + struct dma_map_ops *ops = get_dma_ops(dev);
143 +
144 + BUG_ON(!valid_dma_direction(dir));
145 + if (ops) {
146 + ops->unmap_page(dev, addr, size, dir, attrs);
147 + } else {
148 + if (cpu_needs_post_dma_flush(dev))
149 + __dma_sync(dma_addr_to_page(dev, addr),
150 + addr & ~PAGE_MASK, size, dir);
151 +
152 + plat_unmap_dma_mem(dev, addr, size, dir);
153 + }
154 + debug_dma_unmap_page(dev, addr, size, dir, true);
155 +}
156 +
157 +static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
158 + int nents, enum dma_data_direction dir,
159 + struct dma_attrs *attrs)
160 +{
161 + struct dma_map_ops *ops = get_dma_ops(dev);
162 + int i, ents;
163 + struct scatterlist *s;
164 +
165 + for_each_sg(sg, s, nents, i)
166 + kmemcheck_mark_initialized(sg_virt(s), s->length);
167 + BUG_ON(!valid_dma_direction(dir));
168 + if (ops) {
169 + ents = ops->map_sg(dev, sg, nents, dir, attrs);
170 + } else {
171 + for_each_sg(sg, s, nents, i) {
172 + struct page *page = sg_page(s);
173 +
174 + if (!plat_device_is_coherent(dev))
175 + __dma_sync(page, s->offset, s->length, dir);
176 +#ifdef CONFIG_NEED_SG_DMA_LENGTH
177 + s->dma_length = s->length;
178 +#endif
179 + s->dma_address =
180 + plat_map_dma_mem_page(dev, page) + s->offset;
181 + }
182 + ents = nents;
183 + }
184 + debug_dma_map_sg(dev, sg, nents, ents, dir);
185 +
186 + return ents;
187 +}
188 +
189 +static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
190 + int nents, enum dma_data_direction dir,
191 + struct dma_attrs *attrs)
192 +{
193 + struct dma_map_ops *ops = get_dma_ops(dev);
194 + struct scatterlist *s;
195 + int i;
196 +
197 + BUG_ON(!valid_dma_direction(dir));
198 + debug_dma_unmap_sg(dev, sg, nents, dir);
199 + if (ops) {
200 + ops->unmap_sg(dev, sg, nents, dir, attrs);
201 + return;
202 + }
203 +
204 + for_each_sg(sg, s, nents, i) {
205 + if (!plat_device_is_coherent(dev) && dir != DMA_TO_DEVICE)
206 + __dma_sync(sg_page(s), s->offset, s->length, dir);
207 + plat_unmap_dma_mem(dev, s->dma_address, s->length, dir);
208 + }
209 +}
210 +
211 +static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
212 + size_t offset, size_t size,
213 + enum dma_data_direction dir)
214 +{
215 + struct dma_map_ops *ops = get_dma_ops(dev);
216 + dma_addr_t addr;
217 +
218 + kmemcheck_mark_initialized(page_address(page) + offset, size);
219 + BUG_ON(!valid_dma_direction(dir));
220 + if (ops) {
221 + addr = ops->map_page(dev, page, offset, size, dir, NULL);
222 + } else {
223 + if (!plat_device_is_coherent(dev))
224 + __dma_sync(page, offset, size, dir);
225 +
226 + addr = plat_map_dma_mem_page(dev, page) + offset;
227 + }
228 + debug_dma_map_page(dev, page, offset, size, dir, addr, false);
229 +
230 + return addr;
231 +}
232 +
233 +static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
234 + size_t size, enum dma_data_direction dir)
235 +{
236 + struct dma_map_ops *ops = get_dma_ops(dev);
237 +
238 + BUG_ON(!valid_dma_direction(dir));
239 + if (ops) {
240 + ops->unmap_page(dev, addr, size, dir, NULL);
241 + } else {
242 + if (cpu_needs_post_dma_flush(dev))
243 + __dma_sync(dma_addr_to_page(dev, addr),
244 + addr & ~PAGE_MASK, size, dir);
245 +
246 + plat_unmap_dma_mem(dev, addr, size, dir);
247 + }
248 + debug_dma_unmap_page(dev, addr, size, dir, false);
249 +}
250 +
251 +static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
252 + size_t size,
253 + enum dma_data_direction dir)
254 +{
255 + struct dma_map_ops *ops = get_dma_ops(dev);
256 +
257 + BUG_ON(!valid_dma_direction(dir));
258 + if (ops)
259 + ops->sync_single_for_cpu(dev, addr, size, dir);
260 + else if (cpu_needs_post_dma_flush(dev))
261 + __dma_sync(dma_addr_to_page(dev, addr),
262 + addr & ~PAGE_MASK, size, dir);
263 + debug_dma_sync_single_for_cpu(dev, addr, size, dir);
264 +}
265 +
266 +static inline void dma_sync_single_for_device(struct device *dev,
267 + dma_addr_t addr, size_t size,
268 + enum dma_data_direction dir)
269 +{
270 + struct dma_map_ops *ops = get_dma_ops(dev);
271 +
272 + BUG_ON(!valid_dma_direction(dir));
273 + if (ops)
274 + ops->sync_single_for_device(dev, addr, size, dir);
275 + else if (!plat_device_is_coherent(dev))
276 + __dma_sync(dma_addr_to_page(dev, addr),
277 + addr & ~PAGE_MASK, size, dir);
278 + debug_dma_sync_single_for_device(dev, addr, size, dir);
279 +}
280 +
281 +static inline void dma_sync_single_range_for_cpu(struct device *dev,
282 + dma_addr_t addr,
283 + unsigned long offset,
284 + size_t size,
285 + enum dma_data_direction dir)
286 +{
287 + const struct dma_map_ops *ops = get_dma_ops(dev);
288 +
289 + BUG_ON(!valid_dma_direction(dir));
290 + if (ops)
291 + ops->sync_single_for_cpu(dev, addr + offset, size, dir);
292 + else if (cpu_needs_post_dma_flush(dev))
293 + __dma_sync(dma_addr_to_page(dev, addr + offset),
294 + (addr + offset) & ~PAGE_MASK, size, dir);
295 + debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
296 +}
297 +
298 +static inline void dma_sync_single_range_for_device(struct device *dev,
299 + dma_addr_t addr,
300 + unsigned long offset,
301 + size_t size,
302 + enum dma_data_direction dir)
303 +{
304 + const struct dma_map_ops *ops = get_dma_ops(dev);
305 +
306 + BUG_ON(!valid_dma_direction(dir));
307 + if (ops)
308 + ops->sync_single_for_device(dev, addr + offset, size, dir);
309 + else if (!plat_device_is_coherent(dev))
310 + __dma_sync(dma_addr_to_page(dev, addr + offset),
311 + (addr + offset) & ~PAGE_MASK, size, dir);
312 + debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
313 +}
314 +
315 +static inline void
316 +dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
317 + int nelems, enum dma_data_direction dir)
318 +{
319 + struct dma_map_ops *ops = get_dma_ops(dev);
320 + struct scatterlist *s;
321 + int i;
322 +
323 + BUG_ON(!valid_dma_direction(dir));
324 + if (ops)
325 + ops->sync_sg_for_cpu(dev, sg, nelems, dir);
326 + else if (cpu_needs_post_dma_flush(dev)) {
327 + for_each_sg(sg, s, nelems, i)
328 + __dma_sync(sg_page(s), s->offset, s->length, dir);
329 + }
330 + debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
331 +}
332 +
333 +static inline void
334 +dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
335 + int nelems, enum dma_data_direction dir)
336 +{
337 + struct dma_map_ops *ops = get_dma_ops(dev);
338 + struct scatterlist *s;
339 + int i;
340 +
341 + BUG_ON(!valid_dma_direction(dir));
342 + if (ops)
343 + ops->sync_sg_for_device(dev, sg, nelems, dir);
344 + else if (!plat_device_is_coherent(dev)) {
345 + for_each_sg(sg, s, nelems, i)
346 + __dma_sync(sg_page(s), s->offset, s->length, dir);
347 + }
348 + debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
349 +
350 +}
351 +
352 +#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
353 +#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
354 +#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
355 +#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
356 +
357 +extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
358 + void *cpu_addr, dma_addr_t dma_addr, size_t size);
359 +
360 +/**
361 + * dma_mmap_attrs - map a coherent DMA allocation into user space
362 + * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
363 + * @vma: vm_area_struct describing requested user mapping
364 + * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
365 + * @handle: device-view address returned from dma_alloc_attrs
366 + * @size: size of memory originally requested in dma_alloc_attrs
367 + * @attrs: attributes of mapping properties requested in dma_alloc_attrs
368 + *
369 + * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
370 + * into user space. The coherent DMA buffer must not be freed by the
371 + * driver until the user space mapping has been released.
372 + */
373 +static inline int
374 +dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
375 + dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
376 +{
377 + struct dma_map_ops *ops = get_dma_ops(dev);
378 + BUG_ON(!ops);
379 + if (ops && ops->mmap)
380 + return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
381 + return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
382 +}
383 +
384 +#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
385 +
386 +int
387 +dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
388 + void *cpu_addr, dma_addr_t dma_addr, size_t size);
389 +
390 +static inline int
391 +dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
392 + dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
393 +{
394 + struct dma_map_ops *ops = get_dma_ops(dev);
395 + BUG_ON(!ops);
396 + if (ops && ops->get_sgtable)
397 + return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
398 + attrs);
399 + return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
400 +}
401 +
402 +#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL)
403 +
404
405 static inline int dma_supported(struct device *dev, u64 mask)
406 {
407 struct dma_map_ops *ops = get_dma_ops(dev);
408 - return ops->dma_supported(dev, mask);
409 + if (ops)
410 + return ops->dma_supported(dev, mask);
411 + return plat_dma_supported(dev, mask);
412 }
413
414 static inline int dma_mapping_error(struct device *dev, u64 mask)
415 @@ -43,7 +378,9 @@ static inline int dma_mapping_error(stru
416 struct dma_map_ops *ops = get_dma_ops(dev);
417
418 debug_dma_mapping_error(dev, mask);
419 - return ops->mapping_error(dev, mask);
420 + if (ops)
421 + return ops->mapping_error(dev, mask);
422 + return 0;
423 }
424
425 static inline int
426 @@ -54,7 +391,7 @@ dma_set_mask(struct device *dev, u64 mas
427 if(!dev->dma_mask || !dma_supported(dev, mask))
428 return -EIO;
429
430 - if (ops->set_dma_mask)
431 + if (ops && ops->set_dma_mask)
432 return ops->set_dma_mask(dev, mask);
433
434 *dev->dma_mask = mask;
435 @@ -74,7 +411,11 @@ static inline void *dma_alloc_attrs(stru
436 void *ret;
437 struct dma_map_ops *ops = get_dma_ops(dev);
438
439 - ret = ops->alloc(dev, size, dma_handle, gfp, attrs);
440 + if (ops)
441 + ret = ops->alloc(dev, size, dma_handle, gfp, attrs);
442 + else
443 + ret = mips_dma_alloc_coherent(dev, size, dma_handle, gfp,
444 + attrs);
445
446 debug_dma_alloc_coherent(dev, size, *dma_handle, ret);
447
448 @@ -89,7 +430,10 @@ static inline void dma_free_attrs(struct
449 {
450 struct dma_map_ops *ops = get_dma_ops(dev);
451
452 - ops->free(dev, size, vaddr, dma_handle, attrs);
453 + if (ops)
454 + ops->free(dev, size, vaddr, dma_handle, attrs);
455 + else
456 + mips_dma_free_coherent(dev, size, vaddr, dma_handle, attrs);
457
458 debug_dma_free_coherent(dev, size, vaddr, dma_handle);
459 }
460 --- a/arch/mips/mm/dma-default.c
461 +++ b/arch/mips/mm/dma-default.c
462 @@ -26,7 +26,7 @@
463
464 #ifdef CONFIG_DMA_MAYBE_COHERENT
465 int coherentio = 0; /* User defined DMA coherency from command line. */
466 -EXPORT_SYMBOL_GPL(coherentio);
467 +EXPORT_SYMBOL(coherentio);
468 int hw_coherentio = 0; /* Actual hardware supported DMA coherency setting. */
469
470 static int __init setcoherentio(char *str)
471 @@ -46,35 +46,6 @@ static int __init setnocoherentio(char *
472 early_param("nocoherentio", setnocoherentio);
473 #endif
474
475 -static inline struct page *dma_addr_to_page(struct device *dev,
476 - dma_addr_t dma_addr)
477 -{
478 - return pfn_to_page(
479 - plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT);
480 -}
481 -
482 -/*
483 - * The affected CPUs below in 'cpu_needs_post_dma_flush()' can
484 - * speculatively fill random cachelines with stale data at any time,
485 - * requiring an extra flush post-DMA.
486 - *
487 - * Warning on the terminology - Linux calls an uncached area coherent;
488 - * MIPS terminology calls memory areas with hardware maintained coherency
489 - * coherent.
490 - *
491 - * Note that the R14000 and R16000 should also be checked for in this
492 - * condition. However this function is only called on non-I/O-coherent
493 - * systems and only the R10000 and R12000 are used in such systems, the
494 - * SGI IP28 Indigo² rsp. SGI IP32 aka O2.
495 - */
496 -static inline int cpu_needs_post_dma_flush(struct device *dev)
497 -{
498 - return !plat_device_is_coherent(dev) &&
499 - (boot_cpu_type() == CPU_R10000 ||
500 - boot_cpu_type() == CPU_R12000 ||
501 - boot_cpu_type() == CPU_BMIPS5000);
502 -}
503 -
504 static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
505 {
506 gfp_t dma_flag;
507 @@ -130,8 +101,9 @@ void *dma_alloc_noncoherent(struct devic
508 }
509 EXPORT_SYMBOL(dma_alloc_noncoherent);
510
511 -static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
512 - dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs)
513 +void *mips_dma_alloc_coherent(struct device *dev, size_t size,
514 + dma_addr_t *dma_handle, gfp_t gfp,
515 + struct dma_attrs *attrs)
516 {
517 void *ret;
518 struct page *page = NULL;
519 @@ -162,6 +134,7 @@ static void *mips_dma_alloc_coherent(str
520
521 return ret;
522 }
523 +EXPORT_SYMBOL(mips_dma_alloc_coherent);
524
525
526 void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
527 @@ -172,8 +145,8 @@ void dma_free_noncoherent(struct device
528 }
529 EXPORT_SYMBOL(dma_free_noncoherent);
530
531 -static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
532 - dma_addr_t dma_handle, struct dma_attrs *attrs)
533 +void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
534 + dma_addr_t dma_handle, struct dma_attrs *attrs)
535 {
536 unsigned long addr = (unsigned long) vaddr;
537 int order = get_order(size);
538 @@ -193,6 +166,7 @@ static void mips_dma_free_coherent(struc
539 if (!dma_release_from_contiguous(dev, page, count))
540 __free_pages(page, get_order(size));
541 }
542 +EXPORT_SYMBOL(mips_dma_free_coherent);
543
544 static inline void __dma_sync_virtual(void *addr, size_t size,
545 enum dma_data_direction direction)
546 @@ -221,8 +195,8 @@ static inline void __dma_sync_virtual(vo
547 * If highmem is not configured then the bulk of this loop gets
548 * optimized out.
549 */
550 -static inline void __dma_sync(struct page *page,
551 - unsigned long offset, size_t size, enum dma_data_direction direction)
552 +void __dma_sync(struct page *page, unsigned long offset, size_t size,
553 + enum dma_data_direction direction)
554 {
555 size_t left = size;
556
557 @@ -251,108 +225,7 @@ static inline void __dma_sync(struct pag
558 left -= len;
559 } while (left);
560 }
561 -
562 -static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
563 - size_t size, enum dma_data_direction direction, struct dma_attrs *attrs)
564 -{
565 - if (cpu_needs_post_dma_flush(dev))
566 - __dma_sync(dma_addr_to_page(dev, dma_addr),
567 - dma_addr & ~PAGE_MASK, size, direction);
568 -
569 - plat_unmap_dma_mem(dev, dma_addr, size, direction);
570 -}
571 -
572 -static int mips_dma_map_sg(struct device *dev, struct scatterlist *sg,
573 - int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
574 -{
575 - int i;
576 -
577 - for (i = 0; i < nents; i++, sg++) {
578 - if (!plat_device_is_coherent(dev))
579 - __dma_sync(sg_page(sg), sg->offset, sg->length,
580 - direction);
581 -#ifdef CONFIG_NEED_SG_DMA_LENGTH
582 - sg->dma_length = sg->length;
583 -#endif
584 - sg->dma_address = plat_map_dma_mem_page(dev, sg_page(sg)) +
585 - sg->offset;
586 - }
587 -
588 - return nents;
589 -}
590 -
591 -static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
592 - unsigned long offset, size_t size, enum dma_data_direction direction,
593 - struct dma_attrs *attrs)
594 -{
595 - if (!plat_device_is_coherent(dev))
596 - __dma_sync(page, offset, size, direction);
597 -
598 - return plat_map_dma_mem_page(dev, page) + offset;
599 -}
600 -
601 -static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
602 - int nhwentries, enum dma_data_direction direction,
603 - struct dma_attrs *attrs)
604 -{
605 - int i;
606 -
607 - for (i = 0; i < nhwentries; i++, sg++) {
608 - if (!plat_device_is_coherent(dev) &&
609 - direction != DMA_TO_DEVICE)
610 - __dma_sync(sg_page(sg), sg->offset, sg->length,
611 - direction);
612 - plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
613 - }
614 -}
615 -
616 -static void mips_dma_sync_single_for_cpu(struct device *dev,
617 - dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
618 -{
619 - if (cpu_needs_post_dma_flush(dev))
620 - __dma_sync(dma_addr_to_page(dev, dma_handle),
621 - dma_handle & ~PAGE_MASK, size, direction);
622 -}
623 -
624 -static void mips_dma_sync_single_for_device(struct device *dev,
625 - dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
626 -{
627 - if (!plat_device_is_coherent(dev))
628 - __dma_sync(dma_addr_to_page(dev, dma_handle),
629 - dma_handle & ~PAGE_MASK, size, direction);
630 -}
631 -
632 -static void mips_dma_sync_sg_for_cpu(struct device *dev,
633 - struct scatterlist *sg, int nelems, enum dma_data_direction direction)
634 -{
635 - int i;
636 -
637 - if (cpu_needs_post_dma_flush(dev))
638 - for (i = 0; i < nelems; i++, sg++)
639 - __dma_sync(sg_page(sg), sg->offset, sg->length,
640 - direction);
641 -}
642 -
643 -static void mips_dma_sync_sg_for_device(struct device *dev,
644 - struct scatterlist *sg, int nelems, enum dma_data_direction direction)
645 -{
646 - int i;
647 -
648 - if (!plat_device_is_coherent(dev))
649 - for (i = 0; i < nelems; i++, sg++)
650 - __dma_sync(sg_page(sg), sg->offset, sg->length,
651 - direction);
652 -}
653 -
654 -int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
655 -{
656 - return 0;
657 -}
658 -
659 -int mips_dma_supported(struct device *dev, u64 mask)
660 -{
661 - return plat_dma_supported(dev, mask);
662 -}
663 +EXPORT_SYMBOL(__dma_sync);
664
665 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
666 enum dma_data_direction direction)
667 @@ -365,23 +238,10 @@ void dma_cache_sync(struct device *dev,
668
669 EXPORT_SYMBOL(dma_cache_sync);
670
671 -static struct dma_map_ops mips_default_dma_map_ops = {
672 - .alloc = mips_dma_alloc_coherent,
673 - .free = mips_dma_free_coherent,
674 - .map_page = mips_dma_map_page,
675 - .unmap_page = mips_dma_unmap_page,
676 - .map_sg = mips_dma_map_sg,
677 - .unmap_sg = mips_dma_unmap_sg,
678 - .sync_single_for_cpu = mips_dma_sync_single_for_cpu,
679 - .sync_single_for_device = mips_dma_sync_single_for_device,
680 - .sync_sg_for_cpu = mips_dma_sync_sg_for_cpu,
681 - .sync_sg_for_device = mips_dma_sync_sg_for_device,
682 - .mapping_error = mips_dma_mapping_error,
683 - .dma_supported = mips_dma_supported
684 -};
685 -
686 -struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
687 +#ifdef CONFIG_SYS_HAS_DMA_OPS
688 +struct dma_map_ops *mips_dma_map_ops = NULL;
689 EXPORT_SYMBOL(mips_dma_map_ops);
690 +#endif
691
692 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
693