disable sstrip when using musl
[openwrt/openwrt.git] / target / linux / ubicom32 / files / arch / ubicom32 / include / asm / dma-mapping.h
1 /*
2 * arch/ubicom32/include/asm/dma-mapping.h
3 * Generic dma-mapping.h for Ubicom32 architecture.
4 *
5 * (C) Copyright 2009, Ubicom, Inc.
6 *
7 * This file is part of the Ubicom32 Linux Kernel Port.
8 *
9 * The Ubicom32 Linux Kernel Port is free software: you can redistribute
10 * it and/or modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation, either version 2 of the
12 * License, or (at your option) any later version.
13 *
14 * The Ubicom32 Linux Kernel Port is distributed in the hope that it
15 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
16 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with the Ubicom32 Linux Kernel Port. If not,
21 * see <http://www.gnu.org/licenses/>.
22 *
23 * Ubicom32 implementation derived from (with many thanks):
24 * arch/m68knommu
25 * arch/blackfin
26 * arch/parisc
27 */
28 #ifndef _ASM_UBICOM32_DMA_MAPPING_H
29 #define _ASM_UBICOM32_DMA_MAPPING_H
30
31 #include <linux/scatterlist.h>
32 #ifdef CONFIG_PCI
33
34 /* we implement the API below in terms of the existing PCI one,
35 * so include it */
36 #include <linux/pci.h>
37 /* need struct page definitions */
38 #include <linux/mm.h>
39
40 static inline int
41 dma_supported(struct device *dev, u64 mask)
42 {
43 BUG_ON(dev->bus != &pci_bus_type);
44
45 return pci_dma_supported(to_pci_dev(dev), mask);
46 }
47
48 static inline int
49 dma_set_mask(struct device *dev, u64 dma_mask)
50 {
51 BUG_ON(dev->bus != &pci_bus_type);
52
53 return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
54 }
55
56 static inline void *
57 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
58 gfp_t flag)
59 {
60 BUG_ON(dev->bus != &pci_bus_type);
61
62 return pci_alloc_consistent(to_pci_dev(dev), size, dma_handle);
63 }
64
65 static inline void
66 dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
67 dma_addr_t dma_handle)
68 {
69 BUG_ON(dev->bus != &pci_bus_type);
70
71 pci_free_consistent(to_pci_dev(dev), size, cpu_addr, dma_handle);
72 }
73
74 static inline dma_addr_t
75 dma_map_single(struct device *dev, void *cpu_addr, size_t size,
76 enum dma_data_direction direction)
77 {
78 BUG_ON(dev->bus != &pci_bus_type);
79
80 return pci_map_single(to_pci_dev(dev), cpu_addr, size, (int)direction);
81 }
82
83 static inline void
84 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
85 enum dma_data_direction direction)
86 {
87 BUG_ON(dev->bus != &pci_bus_type);
88
89 pci_unmap_single(to_pci_dev(dev), dma_addr, size, (int)direction);
90 }
91
92 static inline dma_addr_t
93 dma_map_page(struct device *dev, struct page *page,
94 unsigned long offset, size_t size,
95 enum dma_data_direction direction)
96 {
97 BUG_ON(dev->bus != &pci_bus_type);
98
99 return pci_map_page(to_pci_dev(dev), page, offset, size, (int)direction);
100 }
101
102 static inline void
103 dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
104 enum dma_data_direction direction)
105 {
106 BUG_ON(dev->bus != &pci_bus_type);
107
108 pci_unmap_page(to_pci_dev(dev), dma_address, size, (int)direction);
109 }
110
111 static inline int
112 dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
113 enum dma_data_direction direction)
114 {
115 BUG_ON(dev->bus != &pci_bus_type);
116
117 return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction);
118 }
119
120 static inline void
121 dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
122 enum dma_data_direction direction)
123 {
124 BUG_ON(dev->bus != &pci_bus_type);
125
126 pci_unmap_sg(to_pci_dev(dev), sg, nhwentries, (int)direction);
127 }
128
129 static inline void
130 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
131 enum dma_data_direction direction)
132 {
133 BUG_ON(dev->bus != &pci_bus_type);
134
135 pci_dma_sync_single_for_cpu(to_pci_dev(dev), dma_handle,
136 size, (int)direction);
137 }
138
139 static inline void
140 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
141 enum dma_data_direction direction)
142 {
143 BUG_ON(dev->bus != &pci_bus_type);
144
145 pci_dma_sync_single_for_device(to_pci_dev(dev), dma_handle,
146 size, (int)direction);
147 }
148
149 static inline void
150 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
151 enum dma_data_direction direction)
152 {
153 BUG_ON(dev->bus != &pci_bus_type);
154
155 pci_dma_sync_sg_for_cpu(to_pci_dev(dev), sg, nelems, (int)direction);
156 }
157
158 static inline void
159 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
160 enum dma_data_direction direction)
161 {
162 BUG_ON(dev->bus != &pci_bus_type);
163
164 pci_dma_sync_sg_for_device(to_pci_dev(dev), sg, nelems, (int)direction);
165 }
166
167 static inline int
168 dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
169 {
170 return pci_dma_mapping_error(to_pci_dev(dev), dma_addr);
171 }
172
173
174 #else
175
176 static inline int
177 dma_supported(struct device *dev, u64 mask)
178 {
179 return 0;
180 }
181
182 static inline int
183 dma_set_mask(struct device *dev, u64 dma_mask)
184 {
185 BUG();
186 return 0;
187 }
188
189 static inline void *
190 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
191 gfp_t flag)
192 {
193 BUG();
194 return NULL;
195 }
196
197 static inline void
198 dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
199 dma_addr_t dma_handle)
200 {
201 BUG();
202 }
203
204 static inline dma_addr_t
205 dma_map_single(struct device *dev, void *cpu_addr, size_t size,
206 enum dma_data_direction direction)
207 {
208 BUG();
209 return 0;
210 }
211
212 static inline void
213 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
214 enum dma_data_direction direction)
215 {
216 BUG();
217 }
218
219 static inline dma_addr_t
220 dma_map_page(struct device *dev, struct page *page,
221 unsigned long offset, size_t size,
222 enum dma_data_direction direction)
223 {
224 BUG();
225 return 0;
226 }
227
228 static inline void
229 dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
230 enum dma_data_direction direction)
231 {
232 BUG();
233 }
234
235 static inline int
236 dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
237 enum dma_data_direction direction)
238 {
239 BUG();
240 return 0;
241 }
242
243 static inline void
244 dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
245 enum dma_data_direction direction)
246 {
247 BUG();
248 }
249
250 static inline void
251 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
252 enum dma_data_direction direction)
253 {
254 BUG();
255 }
256
257 static inline void
258 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
259 enum dma_data_direction direction)
260 {
261 BUG();
262 }
263
264 static inline void
265 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
266 enum dma_data_direction direction)
267 {
268 BUG();
269 }
270
271 static inline void
272 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
273 enum dma_data_direction direction)
274 {
275 BUG();
276 }
277
278 static inline int
279 dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
280 {
281 return 0;
282 }
283
284 #endif
285
286 /* Now for the API extensions over the pci_ one */
287
288 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
289 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
290 #define dma_is_consistent(d, h) (1)
291
292 static inline int
293 dma_get_cache_alignment(void)
294 {
295 /* no easy way to get cache size on all processors, so return
296 * the maximum possible, to be safe */
297 return (1 << INTERNODE_CACHE_SHIFT);
298 }
299
300 static inline void
301 dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
302 unsigned long offset, size_t size,
303 enum dma_data_direction direction)
304 {
305 /* just sync everything, that's all the pci API can do */
306 dma_sync_single_for_cpu(dev, dma_handle, offset+size, direction);
307 }
308
309 static inline void
310 dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
311 unsigned long offset, size_t size,
312 enum dma_data_direction direction)
313 {
314 /* just sync everything, that's all the pci API can do */
315 dma_sync_single_for_device(dev, dma_handle, offset+size, direction);
316 }
317
318 static inline void
319 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
320 enum dma_data_direction direction)
321 {
322 /* could define this in terms of the dma_cache ... operations,
323 * but if you get this on a platform, you should convert the platform
324 * to using the generic device DMA API */
325 BUG();
326 }
327
328 #endif /* _ASM_UBICOM32_DMA_MAPPING_H */