kernel: update kernel 4.4 to version 4.4.6
[openwrt/openwrt.git] / target / linux / generic / patches-4.4 / 132-mips_inline_dma_ops.patch
1 From 2c58080407554e1bac8fd50d23cb02420524caed Mon Sep 17 00:00:00 2001
2 From: Felix Fietkau <nbd@openwrt.org>
3 Date: Mon, 12 Aug 2013 12:50:22 +0200
4 Subject: [PATCH] MIPS: partially inline dma ops
5
6 Several DMA ops are no-op on many platforms, and the indirection through
7 the mips_dma_map_ops function table is causing the compiler to emit
8 unnecessary code.
9
10 Inlining visibly improves network performance in my tests (on a 24Kc
11 based system), and also slightly reduces code size of a few drivers.
12
13 Signed-off-by: Felix Fietkau <nbd@openwrt.org>
14 ---
15 arch/mips/Kconfig | 4 +
16 arch/mips/include/asm/dma-mapping.h | 360 +++++++++++++++++++++++++++++++++++-
17 arch/mips/mm/dma-default.c | 163 ++--------------
18 3 files changed, 373 insertions(+), 154 deletions(-)
19
20 --- a/arch/mips/Kconfig
21 +++ b/arch/mips/Kconfig
22 @@ -1618,6 +1618,7 @@ config CPU_CAVIUM_OCTEON
23 select USB_EHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
24 select USB_OHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
25 select MIPS_L1_CACHE_SHIFT_7
26 + select SYS_HAS_DMA_OPS
27 help
28 The Cavium Octeon processor is a highly integrated chip containing
29 many ethernet hardware widgets for networking tasks. The processor
30 @@ -1913,6 +1914,9 @@ config MIPS_MALTA_PM
31 bool
32 default y
33
34 +config SYS_HAS_DMA_OPS
35 + bool
36 +
37 #
38 # CPU may reorder R->R, R->W, W->R, W->W
39 # Reordering beyond LL and SC is handled in WEAK_REORDERING_BEYOND_LLSC
40 --- a/arch/mips/include/asm/dma-mapping.h
41 +++ b/arch/mips/include/asm/dma-mapping.h
42 @@ -1,9 +1,16 @@
43 #ifndef _ASM_DMA_MAPPING_H
44 #define _ASM_DMA_MAPPING_H
45
46 +#include <linux/kmemcheck.h>
47 +#include <linux/bug.h>
48 #include <linux/scatterlist.h>
49 +#include <linux/dma-debug.h>
50 +#include <linux/dma-attrs.h>
51 +
52 #include <asm/dma-coherence.h>
53 #include <asm/cache.h>
54 +#include <asm/cpu-type.h>
55 +#include <asm-generic/dma-coherent.h>
56
57 #ifndef CONFIG_SGI_IP27 /* Kludge to fix 2.6.39 build for IP27 */
58 #include <dma-coherence.h>
59 @@ -11,12 +18,53 @@
60
61 extern struct dma_map_ops *mips_dma_map_ops;
62
63 +void __dma_sync(struct page *page, unsigned long offset, size_t size,
64 + enum dma_data_direction direction);
65 +void *mips_dma_alloc_coherent(struct device *dev, size_t size,
66 + dma_addr_t *dma_handle, gfp_t gfp,
67 + struct dma_attrs *attrs);
68 +void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
69 + dma_addr_t dma_handle, struct dma_attrs *attrs);
70 +
71 static inline struct dma_map_ops *get_dma_ops(struct device *dev)
72 {
73 +#ifdef CONFIG_SYS_HAS_DMA_OPS
74 if (dev && dev->archdata.dma_ops)
75 return dev->archdata.dma_ops;
76 else
77 return mips_dma_map_ops;
78 +#else
79 + return NULL;
80 +#endif
81 +}
82 +
83 +/*
84 + * The affected CPUs below in 'cpu_needs_post_dma_flush()' can
85 + * speculatively fill random cachelines with stale data at any time,
86 + * requiring an extra flush post-DMA.
87 + *
88 + * Warning on the terminology - Linux calls an uncached area coherent;
89 + * MIPS terminology calls memory areas with hardware maintained coherency
90 + * coherent.
91 + *
92 + * Note that the R14000 and R16000 should also be checked for in this
93 + * condition. However this function is only called on non-I/O-coherent
94 + * systems and only the R10000 and R12000 are used in such systems, the
95 + * SGI IP28 Indigo² rsp. SGI IP32 aka O2.
96 + */
97 +static inline int cpu_needs_post_dma_flush(struct device *dev)
98 +{
99 + return !plat_device_is_coherent(dev) &&
100 + (boot_cpu_type() == CPU_R10000 ||
101 + boot_cpu_type() == CPU_R12000 ||
102 + boot_cpu_type() == CPU_BMIPS5000);
103 +}
104 +
105 +static inline struct page *dma_addr_to_page(struct device *dev,
106 + dma_addr_t dma_addr)
107 +{
108 + return pfn_to_page(
109 + plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT);
110 }
111
112 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
113 @@ -29,9 +77,399 @@ static inline bool dma_capable(struct de
114
115 static inline void dma_mark_clean(void *addr, size_t size) {}
116
117 -#include <asm-generic/dma-mapping-common.h>
118 +static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
119 + size_t size,
120 + enum dma_data_direction dir,
121 + struct dma_attrs *attrs)
122 +{
123 + struct dma_map_ops *ops = get_dma_ops(dev);
124 + unsigned long offset = (unsigned long)ptr & ~PAGE_MASK;
125 + struct page *page = virt_to_page(ptr);
126 + dma_addr_t addr;
127 +
128 + kmemcheck_mark_initialized(ptr, size);
129 + BUG_ON(!valid_dma_direction(dir));
130 + if (ops) {
131 + addr = ops->map_page(dev, page, offset, size, dir, attrs);
132 + } else {
133 + if (!plat_device_is_coherent(dev))
134 + __dma_sync(page, offset, size, dir);
135 +
136 + addr = plat_map_dma_mem_page(dev, page) + offset;
137 + }
138 + debug_dma_map_page(dev, page, offset, size, dir, addr, true);
139 + return addr;
140 +}
141 +
142 +static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
143 + size_t size,
144 + enum dma_data_direction dir,
145 + struct dma_attrs *attrs)
146 +{
147 + struct dma_map_ops *ops = get_dma_ops(dev);
148 +
149 + BUG_ON(!valid_dma_direction(dir));
150 + if (ops) {
151 + ops->unmap_page(dev, addr, size, dir, attrs);
152 + } else {
153 + if (cpu_needs_post_dma_flush(dev))
154 + __dma_sync(dma_addr_to_page(dev, addr),
155 + addr & ~PAGE_MASK, size, dir);
156 + plat_post_dma_flush(dev);
157 + plat_unmap_dma_mem(dev, addr, size, dir);
158 + }
159 + debug_dma_unmap_page(dev, addr, size, dir, true);
160 +}
161 +
162 +/*
163 + * dma_maps_sg_attrs returns 0 on error and > 0 on success.
164 + * It should never return a value < 0.
165 + */
166 +static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
167 + int nents, enum dma_data_direction dir,
168 + struct dma_attrs *attrs)
169 +{
170 + struct dma_map_ops *ops = get_dma_ops(dev);
171 + int i, ents;
172 + struct scatterlist *s;
173 +
174 + for_each_sg(sg, s, nents, i)
175 + kmemcheck_mark_initialized(sg_virt(s), s->length);
176 + BUG_ON(!valid_dma_direction(dir));
177 + if (ops) {
178 + ents = ops->map_sg(dev, sg, nents, dir, attrs);
179 + } else {
180 + for_each_sg(sg, s, nents, i) {
181 + struct page *page = sg_page(s);
182 +
183 + if (!plat_device_is_coherent(dev))
184 + __dma_sync(page, s->offset, s->length, dir);
185 +#ifdef CONFIG_NEED_SG_DMA_LENGTH
186 + s->dma_length = s->length;
187 +#endif
188 + s->dma_address =
189 + plat_map_dma_mem_page(dev, page) + s->offset;
190 + }
191 + ents = nents;
192 + }
193 + BUG_ON(ents < 0);
194 + debug_dma_map_sg(dev, sg, nents, ents, dir);
195 +
196 + return ents;
197 +}
198 +
199 +static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
200 + int nents, enum dma_data_direction dir,
201 + struct dma_attrs *attrs)
202 +{
203 + struct dma_map_ops *ops = get_dma_ops(dev);
204 + struct scatterlist *s;
205 + int i;
206 +
207 + BUG_ON(!valid_dma_direction(dir));
208 + debug_dma_unmap_sg(dev, sg, nents, dir);
209 + if (ops) {
210 + ops->unmap_sg(dev, sg, nents, dir, attrs);
211 + return;
212 + }
213 + for_each_sg(sg, s, nents, i) {
214 + if (!plat_device_is_coherent(dev) && dir != DMA_TO_DEVICE)
215 + __dma_sync(sg_page(s), s->offset, s->length, dir);
216 + plat_unmap_dma_mem(dev, s->dma_address, s->length, dir);
217 + }
218 +}
219 +
220 +static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
221 + size_t offset, size_t size,
222 + enum dma_data_direction dir)
223 +{
224 + struct dma_map_ops *ops = get_dma_ops(dev);
225 + dma_addr_t addr;
226 +
227 + kmemcheck_mark_initialized(page_address(page) + offset, size);
228 + BUG_ON(!valid_dma_direction(dir));
229 + if (ops) {
230 + addr = ops->map_page(dev, page, offset, size, dir, NULL);
231 + } else {
232 + if (!plat_device_is_coherent(dev))
233 + __dma_sync(page, offset, size, dir);
234 +
235 + addr = plat_map_dma_mem_page(dev, page) + offset;
236 + }
237 + debug_dma_map_page(dev, page, offset, size, dir, addr, false);
238 +
239 + return addr;
240 +}
241 +
242 +static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
243 + size_t size, enum dma_data_direction dir)
244 +{
245 + struct dma_map_ops *ops = get_dma_ops(dev);
246 +
247 + BUG_ON(!valid_dma_direction(dir));
248 + if (ops) {
249 + ops->unmap_page(dev, addr, size, dir, NULL);
250 + } else {
251 + if (cpu_needs_post_dma_flush(dev))
252 + __dma_sync(dma_addr_to_page(dev, addr),
253 + addr & ~PAGE_MASK, size, dir);
254 + plat_post_dma_flush(dev);
255 + plat_unmap_dma_mem(dev, addr, size, dir);
256 + }
257 + debug_dma_unmap_page(dev, addr, size, dir, false);
258 +}
259 +
260 +static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
261 + size_t size,
262 + enum dma_data_direction dir)
263 +{
264 + struct dma_map_ops *ops = get_dma_ops(dev);
265 +
266 + BUG_ON(!valid_dma_direction(dir));
267 + if (ops) {
268 + ops->sync_single_for_cpu(dev, addr, size, dir);
269 + } else {
270 + if (cpu_needs_post_dma_flush(dev))
271 + __dma_sync(dma_addr_to_page(dev, addr),
272 + addr & ~PAGE_MASK, size, dir);
273 + plat_post_dma_flush(dev);
274 + }
275 + debug_dma_sync_single_for_cpu(dev, addr, size, dir);
276 +}
277 +
278 +static inline void dma_sync_single_for_device(struct device *dev,
279 + dma_addr_t addr, size_t size,
280 + enum dma_data_direction dir)
281 +{
282 + struct dma_map_ops *ops = get_dma_ops(dev);
283 +
284 + BUG_ON(!valid_dma_direction(dir));
285 + if (ops)
286 + ops->sync_single_for_device(dev, addr, size, dir);
287 + else if (!plat_device_is_coherent(dev))
288 + __dma_sync(dma_addr_to_page(dev, addr),
289 + addr & ~PAGE_MASK, size, dir);
290 + debug_dma_sync_single_for_device(dev, addr, size, dir);
291 +}
292 +
293 +static inline void dma_sync_single_range_for_cpu(struct device *dev,
294 + dma_addr_t addr,
295 + unsigned long offset,
296 + size_t size,
297 + enum dma_data_direction dir)
298 +{
299 + const struct dma_map_ops *ops = get_dma_ops(dev);
300 +
301 + BUG_ON(!valid_dma_direction(dir));
302 + if (ops) {
303 + ops->sync_single_for_cpu(dev, addr + offset, size, dir);
304 + } else {
305 + if (cpu_needs_post_dma_flush(dev))
306 + __dma_sync(dma_addr_to_page(dev, addr + offset),
307 + (addr + offset) & ~PAGE_MASK, size, dir);
308 + plat_post_dma_flush(dev);
309 + }
310 +
311 + debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
312 +}
313 +
314 +static inline void dma_sync_single_range_for_device(struct device *dev,
315 + dma_addr_t addr,
316 + unsigned long offset,
317 + size_t size,
318 + enum dma_data_direction dir)
319 +{
320 + const struct dma_map_ops *ops = get_dma_ops(dev);
321 +
322 + BUG_ON(!valid_dma_direction(dir));
323 + if (ops)
324 + ops->sync_single_for_device(dev, addr + offset, size, dir);
325 + else if (!plat_device_is_coherent(dev))
326 + __dma_sync(dma_addr_to_page(dev, addr + offset),
327 + (addr + offset) & ~PAGE_MASK, size, dir);
328 + debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
329 +}
330 +
331 +static inline void
332 +dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
333 + int nelems, enum dma_data_direction dir)
334 +{
335 + struct dma_map_ops *ops = get_dma_ops(dev);
336 + struct scatterlist *s;
337 + int i;
338 +
339 + BUG_ON(!valid_dma_direction(dir));
340 + if (ops) {
341 + ops->sync_sg_for_cpu(dev, sg, nelems, dir);
342 + } else if (cpu_needs_post_dma_flush(dev)) {
343 + for_each_sg(sg, s, nelems, i)
344 + __dma_sync(sg_page(s), s->offset, s->length, dir);
345 + }
346 + plat_post_dma_flush(dev);
347 + debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
348 +}
349 +
350 +static inline void
351 +dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
352 + int nelems, enum dma_data_direction dir)
353 +{
354 + struct dma_map_ops *ops = get_dma_ops(dev);
355 + struct scatterlist *s;
356 + int i;
357 +
358 + BUG_ON(!valid_dma_direction(dir));
359 + if (ops) {
360 + ops->sync_sg_for_device(dev, sg, nelems, dir);
361 + } else if (!plat_device_is_coherent(dev)) {
362 + for_each_sg(sg, s, nelems, i)
363 + __dma_sync(sg_page(s), s->offset, s->length, dir);
364 + }
365 + debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
366 +
367 +}
368 +
369 +#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
370 +#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
371 +#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
372 +#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
373 +
374 +extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
375 + void *cpu_addr, dma_addr_t dma_addr, size_t size);
376 +
377 +/**
378 + * dma_mmap_attrs - map a coherent DMA allocation into user space
379 + * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
380 + * @vma: vm_area_struct describing requested user mapping
381 + * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
382 + * @handle: device-view address returned from dma_alloc_attrs
383 + * @size: size of memory originally requested in dma_alloc_attrs
384 + * @attrs: attributes of mapping properties requested in dma_alloc_attrs
385 + *
386 + * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
387 + * into user space. The coherent DMA buffer must not be freed by the
388 + * driver until the user space mapping has been released.
389 + */
390 +static inline int
391 +dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
392 + dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
393 +{
394 + struct dma_map_ops *ops = get_dma_ops(dev);
395 + BUG_ON(!ops);
396 + if (ops && ops->mmap)
397 + return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
398 + return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
399 +}
400 +
401 +#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
402 +
403 +int
404 +dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
405 + void *cpu_addr, dma_addr_t dma_addr, size_t size);
406 +
407 +static inline int
408 +dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
409 + dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
410 +{
411 + struct dma_map_ops *ops = get_dma_ops(dev);
412 + BUG_ON(!ops);
413 + if (ops && ops->get_sgtable)
414 + return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
415 + attrs);
416 + return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
417 +}
418 +
419 +#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL)
420 +
421 +static inline int dma_supported(struct device *dev, u64 mask)
422 +{
423 + struct dma_map_ops *ops = get_dma_ops(dev);
424 + if (ops)
425 + return ops->dma_supported(dev, mask);
426 + return plat_dma_supported(dev, mask);
427 +}
428 +
429 +static inline int dma_mapping_error(struct device *dev, u64 mask)
430 +{
431 + struct dma_map_ops *ops = get_dma_ops(dev);
432 +
433 + debug_dma_mapping_error(dev, mask);
434 + if (ops)
435 + return ops->mapping_error(dev, mask);
436 + return 0;
437 +}
438 +
439 +static inline int
440 +dma_set_mask(struct device *dev, u64 mask)
441 +{
442 + struct dma_map_ops *ops = get_dma_ops(dev);
443 +
444 + if(!dev->dma_mask || !dma_supported(dev, mask))
445 + return -EIO;
446 +
447 + if (ops && ops->set_dma_mask)
448 + return ops->set_dma_mask(dev, mask);
449 +
450 + *dev->dma_mask = mask;
451 +
452 + return 0;
453 +}
454
455 extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
456 enum dma_data_direction direction);
457
458 +#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
459 +
460 +static inline void *dma_alloc_attrs(struct device *dev, size_t size,
461 + dma_addr_t *dma_handle, gfp_t gfp,
462 + struct dma_attrs *attrs)
463 +{
464 + void *ret;
465 + struct dma_map_ops *ops = get_dma_ops(dev);
466 +
467 + if (ops)
468 + ret = ops->alloc(dev, size, dma_handle, gfp, attrs);
469 + else
470 + ret = mips_dma_alloc_coherent(dev, size, dma_handle, gfp,
471 + attrs);
472 +
473 + debug_dma_alloc_coherent(dev, size, *dma_handle, ret);
474 +
475 + return ret;
476 +}
477 +
478 +#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
479 +
480 +static inline void dma_free_attrs(struct device *dev, size_t size,
481 + void *vaddr, dma_addr_t dma_handle,
482 + struct dma_attrs *attrs)
483 +{
484 + struct dma_map_ops *ops = get_dma_ops(dev);
485 +
486 + if (ops)
487 + ops->free(dev, size, vaddr, dma_handle, attrs);
488 + else
489 + mips_dma_free_coherent(dev, size, vaddr, dma_handle, attrs);
490 +
491 + debug_dma_free_coherent(dev, size, vaddr, dma_handle);
492 +}
493 +
494 +static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
495 + dma_addr_t *dma_handle, gfp_t gfp)
496 +{
497 + DEFINE_DMA_ATTRS(attrs);
498 +
499 + dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
500 + return dma_alloc_attrs(dev, size, dma_handle, gfp, &attrs);
501 +}
502 +
503 +static inline void dma_free_noncoherent(struct device *dev, size_t size,
504 + void *cpu_addr, dma_addr_t dma_handle)
505 +{
506 + DEFINE_DMA_ATTRS(attrs);
507 +
508 + dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
509 + dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
510 +}
511 +
512 +
513 #endif /* _ASM_DMA_MAPPING_H */
514 --- a/arch/mips/mm/dma-default.c
515 +++ b/arch/mips/mm/dma-default.c
516 @@ -46,35 +46,6 @@ static int __init setnocoherentio(char *
517 early_param("nocoherentio", setnocoherentio);
518 #endif
519
520 -static inline struct page *dma_addr_to_page(struct device *dev,
521 - dma_addr_t dma_addr)
522 -{
523 - return pfn_to_page(
524 - plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT);
525 -}
526 -
527 -/*
528 - * The affected CPUs below in 'cpu_needs_post_dma_flush()' can
529 - * speculatively fill random cachelines with stale data at any time,
530 - * requiring an extra flush post-DMA.
531 - *
532 - * Warning on the terminology - Linux calls an uncached area coherent;
533 - * MIPS terminology calls memory areas with hardware maintained coherency
534 - * coherent.
535 - *
536 - * Note that the R14000 and R16000 should also be checked for in this
537 - * condition. However this function is only called on non-I/O-coherent
538 - * systems and only the R10000 and R12000 are used in such systems, the
539 - * SGI IP28 Indigo² rsp. SGI IP32 aka O2.
540 - */
541 -static inline int cpu_needs_post_dma_flush(struct device *dev)
542 -{
543 - return !plat_device_is_coherent(dev) &&
544 - (boot_cpu_type() == CPU_R10000 ||
545 - boot_cpu_type() == CPU_R12000 ||
546 - boot_cpu_type() == CPU_BMIPS5000);
547 -}
548 -
549 static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
550 {
551 gfp_t dma_flag;
552 @@ -129,7 +100,7 @@ static void *mips_dma_alloc_noncoherent(
553 return ret;
554 }
555
556 -static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
557 +void *mips_dma_alloc_coherent(struct device *dev, size_t size,
558 dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs)
559 {
560 void *ret;
561 @@ -165,6 +136,7 @@ static void *mips_dma_alloc_coherent(str
562
563 return ret;
564 }
565 +EXPORT_SYMBOL(mips_dma_alloc_coherent);
566
567
568 static void mips_dma_free_noncoherent(struct device *dev, size_t size,
569 @@ -174,7 +146,7 @@ static void mips_dma_free_noncoherent(st
570 free_pages((unsigned long) vaddr, get_order(size));
571 }
572
573 -static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
574 +void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
575 dma_addr_t dma_handle, struct dma_attrs *attrs)
576 {
577 unsigned long addr = (unsigned long) vaddr;
578 @@ -196,40 +168,7 @@ static void mips_dma_free_coherent(struc
579 if (!dma_release_from_contiguous(dev, page, count))
580 __free_pages(page, get_order(size));
581 }
582 -
583 -static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma,
584 - void *cpu_addr, dma_addr_t dma_addr, size_t size,
585 - struct dma_attrs *attrs)
586 -{
587 - unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
588 - unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
589 - unsigned long addr = (unsigned long)cpu_addr;
590 - unsigned long off = vma->vm_pgoff;
591 - unsigned long pfn;
592 - int ret = -ENXIO;
593 -
594 - if (!plat_device_is_coherent(dev) && !hw_coherentio)
595 - addr = CAC_ADDR(addr);
596 -
597 - pfn = page_to_pfn(virt_to_page((void *)addr));
598 -
599 - if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
600 - vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
601 - else
602 - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
603 -
604 - if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
605 - return ret;
606 -
607 - if (off < count && user_count <= (count - off)) {
608 - ret = remap_pfn_range(vma, vma->vm_start,
609 - pfn + off,
610 - user_count << PAGE_SHIFT,
611 - vma->vm_page_prot);
612 - }
613 -
614 - return ret;
615 -}
616 +EXPORT_SYMBOL(mips_dma_free_coherent);
617
618 static inline void __dma_sync_virtual(void *addr, size_t size,
619 enum dma_data_direction direction)
620 @@ -258,7 +197,7 @@ static inline void __dma_sync_virtual(vo
621 * If highmem is not configured then the bulk of this loop gets
622 * optimized out.
623 */
624 -static inline void __dma_sync(struct page *page,
625 +void __dma_sync(struct page *page,
626 unsigned long offset, size_t size, enum dma_data_direction direction)
627 {
628 size_t left = size;
629 @@ -288,120 +227,7 @@ static inline void __dma_sync(struct pag
630 left -= len;
631 } while (left);
632 }
633 -
634 -static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
635 - size_t size, enum dma_data_direction direction, struct dma_attrs *attrs)
636 -{
637 - if (cpu_needs_post_dma_flush(dev))
638 - __dma_sync(dma_addr_to_page(dev, dma_addr),
639 - dma_addr & ~PAGE_MASK, size, direction);
640 - plat_post_dma_flush(dev);
641 - plat_unmap_dma_mem(dev, dma_addr, size, direction);
642 -}
643 -
644 -static int mips_dma_map_sg(struct device *dev, struct scatterlist *sglist,
645 - int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
646 -{
647 - int i;
648 - struct scatterlist *sg;
649 -
650 - for_each_sg(sglist, sg, nents, i) {
651 - if (!plat_device_is_coherent(dev))
652 - __dma_sync(sg_page(sg), sg->offset, sg->length,
653 - direction);
654 -#ifdef CONFIG_NEED_SG_DMA_LENGTH
655 - sg->dma_length = sg->length;
656 -#endif
657 - sg->dma_address = plat_map_dma_mem_page(dev, sg_page(sg)) +
658 - sg->offset;
659 - }
660 -
661 - return nents;
662 -}
663 -
664 -static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
665 - unsigned long offset, size_t size, enum dma_data_direction direction,
666 - struct dma_attrs *attrs)
667 -{
668 - if (!plat_device_is_coherent(dev))
669 - __dma_sync(page, offset, size, direction);
670 -
671 - return plat_map_dma_mem_page(dev, page) + offset;
672 -}
673 -
674 -static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
675 - int nhwentries, enum dma_data_direction direction,
676 - struct dma_attrs *attrs)
677 -{
678 - int i;
679 - struct scatterlist *sg;
680 -
681 - for_each_sg(sglist, sg, nhwentries, i) {
682 - if (!plat_device_is_coherent(dev) &&
683 - direction != DMA_TO_DEVICE)
684 - __dma_sync(sg_page(sg), sg->offset, sg->length,
685 - direction);
686 - plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
687 - }
688 -}
689 -
690 -static void mips_dma_sync_single_for_cpu(struct device *dev,
691 - dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
692 -{
693 - if (cpu_needs_post_dma_flush(dev))
694 - __dma_sync(dma_addr_to_page(dev, dma_handle),
695 - dma_handle & ~PAGE_MASK, size, direction);
696 - plat_post_dma_flush(dev);
697 -}
698 -
699 -static void mips_dma_sync_single_for_device(struct device *dev,
700 - dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
701 -{
702 - if (!plat_device_is_coherent(dev))
703 - __dma_sync(dma_addr_to_page(dev, dma_handle),
704 - dma_handle & ~PAGE_MASK, size, direction);
705 -}
706 -
707 -static void mips_dma_sync_sg_for_cpu(struct device *dev,
708 - struct scatterlist *sglist, int nelems,
709 - enum dma_data_direction direction)
710 -{
711 - int i;
712 - struct scatterlist *sg;
713 -
714 - if (cpu_needs_post_dma_flush(dev)) {
715 - for_each_sg(sglist, sg, nelems, i) {
716 - __dma_sync(sg_page(sg), sg->offset, sg->length,
717 - direction);
718 - }
719 - }
720 - plat_post_dma_flush(dev);
721 -}
722 -
723 -static void mips_dma_sync_sg_for_device(struct device *dev,
724 - struct scatterlist *sglist, int nelems,
725 - enum dma_data_direction direction)
726 -{
727 - int i;
728 - struct scatterlist *sg;
729 -
730 - if (!plat_device_is_coherent(dev)) {
731 - for_each_sg(sglist, sg, nelems, i) {
732 - __dma_sync(sg_page(sg), sg->offset, sg->length,
733 - direction);
734 - }
735 - }
736 -}
737 -
738 -int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
739 -{
740 - return 0;
741 -}
742 -
743 -int mips_dma_supported(struct device *dev, u64 mask)
744 -{
745 - return plat_dma_supported(dev, mask);
746 -}
747 +EXPORT_SYMBOL(__dma_sync);
748
749 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
750 enum dma_data_direction direction)
751 @@ -414,24 +240,10 @@ void dma_cache_sync(struct device *dev,
752
753 EXPORT_SYMBOL(dma_cache_sync);
754
755 -static struct dma_map_ops mips_default_dma_map_ops = {
756 - .alloc = mips_dma_alloc_coherent,
757 - .free = mips_dma_free_coherent,
758 - .mmap = mips_dma_mmap,
759 - .map_page = mips_dma_map_page,
760 - .unmap_page = mips_dma_unmap_page,
761 - .map_sg = mips_dma_map_sg,
762 - .unmap_sg = mips_dma_unmap_sg,
763 - .sync_single_for_cpu = mips_dma_sync_single_for_cpu,
764 - .sync_single_for_device = mips_dma_sync_single_for_device,
765 - .sync_sg_for_cpu = mips_dma_sync_sg_for_cpu,
766 - .sync_sg_for_device = mips_dma_sync_sg_for_device,
767 - .mapping_error = mips_dma_mapping_error,
768 - .dma_supported = mips_dma_supported
769 -};
770 -
771 -struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
772 +#ifdef CONFIG_SYS_HAS_DMA_OPS
773 +struct dma_map_ops *mips_dma_map_ops = NULL;
774 EXPORT_SYMBOL(mips_dma_map_ops);
775 +#endif
776
777 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
778