mvebu: remove linux 4.4 support
[openwrt/staging/lynxis/omap.git] / target / linux / apm821xx / patches-4.4 / 011-dmaengine-core-Introduce-new-universal-API-to-reques.patch
1 From a8135d0d79e9d0ad3a4ff494fceeaae838becf38 Mon Sep 17 00:00:00 2001
2 From: Peter Ujfalusi <peter.ujfalusi@ti.com>
3 Date: Mon, 14 Dec 2015 22:47:40 +0200
4 Subject: [PATCH 2/3] dmaengine: core: Introduce new, universal API to request
5 a channel
6
7 The two API function can cover most, if not all current APIs used to
8 request a channel. With minimal effort dmaengine drivers, platforms and
9 dmaengine user drivers can be converted to use the two function.
10
11 struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask);
12
13 To request any channel matching with the requested capabilities, can be
14 used to request channel for memcpy, memset, xor, etc where no hardware
15 synchronization is needed.
16
17 struct dma_chan *dma_request_chan(struct device *dev, const char *name);
18 To request a slave channel. The dma_request_chan() will try to find the
19 channel via DT, ACPI or in case if the kernel booted in non DT/ACPI mode
20 it will use a filter lookup table and retrieves the needed information from
21 the dma_slave_map provided by the DMA drivers.
22 This legacy mode needs changes in platform code, in dmaengine drivers and
23 finally the dmaengine user drivers can be converted:
24
25 For each dmaengine driver an array of DMA device, slave and the parameter
26 for the filter function needs to be added:
27
28 static const struct dma_slave_map da830_edma_map[] = {
29 { "davinci-mcasp.0", "rx", EDMA_FILTER_PARAM(0, 0) },
30 { "davinci-mcasp.0", "tx", EDMA_FILTER_PARAM(0, 1) },
31 { "davinci-mcasp.1", "rx", EDMA_FILTER_PARAM(0, 2) },
32 { "davinci-mcasp.1", "tx", EDMA_FILTER_PARAM(0, 3) },
33 { "davinci-mcasp.2", "rx", EDMA_FILTER_PARAM(0, 4) },
34 { "davinci-mcasp.2", "tx", EDMA_FILTER_PARAM(0, 5) },
35 { "spi_davinci.0", "rx", EDMA_FILTER_PARAM(0, 14) },
36 { "spi_davinci.0", "tx", EDMA_FILTER_PARAM(0, 15) },
37 { "da830-mmc.0", "rx", EDMA_FILTER_PARAM(0, 16) },
38 { "da830-mmc.0", "tx", EDMA_FILTER_PARAM(0, 17) },
39 { "spi_davinci.1", "rx", EDMA_FILTER_PARAM(0, 18) },
40 { "spi_davinci.1", "tx", EDMA_FILTER_PARAM(0, 19) },
41 };
42
43 This information is going to be needed by the dmaengine driver, so
44 modification to the platform_data is needed, and the driver map should be
45 added to the pdata of the DMA driver:
46
47 da8xx_edma0_pdata.slave_map = da830_edma_map;
48 da8xx_edma0_pdata.slavecnt = ARRAY_SIZE(da830_edma_map);
49
50 The DMA driver then needs to configure the needed device -> filter_fn
51 mapping before it registers with dma_async_device_register() :
52
53 ecc->dma_slave.filter_map.map = info->slave_map;
54 ecc->dma_slave.filter_map.mapcnt = info->slavecnt;
55 ecc->dma_slave.filter_map.fn = edma_filter_fn;
56
57 When neither DT or ACPI lookup is available the dma_request_chan() will
58 try to match the requester's device name with the filter_map's list of
59 device names, when a match found it will use the information from the
60 dma_slave_map to get the channel with the dma_get_channel() internal
61 function.
62
63 Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com>
64 Reviewed-by: Arnd Bergmann <arnd@arndb.de>
65 Signed-off-by: Vinod Koul <vinod.koul@intel.com>
66 ---
67 Documentation/dmaengine/client.txt | 23 +++-------
68 drivers/dma/dmaengine.c | 89 +++++++++++++++++++++++++++++++++-----
69 include/linux/dmaengine.h | 51 +++++++++++++++++++---
70 3 files changed, 127 insertions(+), 36 deletions(-)
71
72 --- a/Documentation/dmaengine/client.txt
73 +++ b/Documentation/dmaengine/client.txt
74 @@ -22,25 +22,14 @@ The slave DMA usage consists of followin
75 Channel allocation is slightly different in the slave DMA context,
76 client drivers typically need a channel from a particular DMA
77 controller only and even in some cases a specific channel is desired.
78 - To request a channel dma_request_channel() API is used.
79 + To request a channel dma_request_chan() API is used.
80
81 Interface:
82 - struct dma_chan *dma_request_channel(dma_cap_mask_t mask,
83 - dma_filter_fn filter_fn,
84 - void *filter_param);
85 - where dma_filter_fn is defined as:
86 - typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
87 + struct dma_chan *dma_request_chan(struct device *dev, const char *name);
88
89 - The 'filter_fn' parameter is optional, but highly recommended for
90 - slave and cyclic channels as they typically need to obtain a specific
91 - DMA channel.
92 -
93 - When the optional 'filter_fn' parameter is NULL, dma_request_channel()
94 - simply returns the first channel that satisfies the capability mask.
95 -
96 - Otherwise, the 'filter_fn' routine will be called once for each free
97 - channel which has a capability in 'mask'. 'filter_fn' is expected to
98 - return 'true' when the desired DMA channel is found.
99 + Which will find and return the 'name' DMA channel associated with the 'dev'
100 + device. The association is done via DT, ACPI or board file based
101 + dma_slave_map matching table.
102
103 A channel allocated via this interface is exclusive to the caller,
104 until dma_release_channel() is called.
105 --- a/drivers/dma/dmaengine.c
106 +++ b/drivers/dma/dmaengine.c
107 @@ -43,6 +43,7 @@
108
109 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
110
111 +#include <linux/platform_device.h>
112 #include <linux/dma-mapping.h>
113 #include <linux/init.h>
114 #include <linux/module.h>
115 @@ -665,27 +666,73 @@ struct dma_chan *__dma_request_channel(c
116 }
117 EXPORT_SYMBOL_GPL(__dma_request_channel);
118
119 +static const struct dma_slave_map *dma_filter_match(struct dma_device *device,
120 + const char *name,
121 + struct device *dev)
122 +{
123 + int i;
124 +
125 + if (!device->filter.mapcnt)
126 + return NULL;
127 +
128 + for (i = 0; i < device->filter.mapcnt; i++) {
129 + const struct dma_slave_map *map = &device->filter.map[i];
130 +
131 + if (!strcmp(map->devname, dev_name(dev)) &&
132 + !strcmp(map->slave, name))
133 + return map;
134 + }
135 +
136 + return NULL;
137 +}
138 +
139 /**
140 - * dma_request_slave_channel_reason - try to allocate an exclusive slave channel
141 + * dma_request_chan - try to allocate an exclusive slave channel
142 * @dev: pointer to client device structure
143 * @name: slave channel name
144 *
145 * Returns pointer to appropriate DMA channel on success or an error pointer.
146 */
147 -struct dma_chan *dma_request_slave_channel_reason(struct device *dev,
148 - const char *name)
149 +struct dma_chan *dma_request_chan(struct device *dev, const char *name)
150 {
151 + struct dma_device *d, *_d;
152 + struct dma_chan *chan = NULL;
153 +
154 /* If device-tree is present get slave info from here */
155 if (dev->of_node)
156 - return of_dma_request_slave_channel(dev->of_node, name);
157 + chan = of_dma_request_slave_channel(dev->of_node, name);
158
159 /* If device was enumerated by ACPI get slave info from here */
160 - if (ACPI_HANDLE(dev))
161 - return acpi_dma_request_slave_chan_by_name(dev, name);
162 + if (has_acpi_companion(dev) && !chan)
163 + chan = acpi_dma_request_slave_chan_by_name(dev, name);
164 +
165 + if (chan) {
166 + /* Valid channel found or requester need to be deferred */
167 + if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
168 + return chan;
169 + }
170 +
171 + /* Try to find the channel via the DMA filter map(s) */
172 + mutex_lock(&dma_list_mutex);
173 + list_for_each_entry_safe(d, _d, &dma_device_list, global_node) {
174 + dma_cap_mask_t mask;
175 + const struct dma_slave_map *map = dma_filter_match(d, name, dev);
176 +
177 + if (!map)
178 + continue;
179 +
180 + dma_cap_zero(mask);
181 + dma_cap_set(DMA_SLAVE, mask);
182
183 - return ERR_PTR(-ENODEV);
184 + chan = find_candidate(d, &mask, d->filter.fn, map->param);
185 + if (!IS_ERR(chan))
186 + break;
187 + }
188 + mutex_unlock(&dma_list_mutex);
189 +
190 + return chan ? chan : ERR_PTR(-EPROBE_DEFER);
191 }
192 -EXPORT_SYMBOL_GPL(dma_request_slave_channel_reason);
193 +EXPORT_SYMBOL_GPL(dma_request_chan);
194
195 /**
196 * dma_request_slave_channel - try to allocate an exclusive slave channel
197 @@ -697,17 +744,35 @@ EXPORT_SYMBOL_GPL(dma_request_slave_chan
198 struct dma_chan *dma_request_slave_channel(struct device *dev,
199 const char *name)
200 {
201 - struct dma_chan *ch = dma_request_slave_channel_reason(dev, name);
202 + struct dma_chan *ch = dma_request_chan(dev, name);
203 if (IS_ERR(ch))
204 return NULL;
205
206 - dma_cap_set(DMA_PRIVATE, ch->device->cap_mask);
207 - ch->device->privatecnt++;
208 -
209 return ch;
210 }
211 EXPORT_SYMBOL_GPL(dma_request_slave_channel);
212
213 +/**
214 + * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
215 + * @mask: capabilities that the channel must satisfy
216 + *
217 + * Returns pointer to appropriate DMA channel on success or an error pointer.
218 + */
219 +struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask)
220 +{
221 + struct dma_chan *chan;
222 +
223 + if (!mask)
224 + return ERR_PTR(-ENODEV);
225 +
226 + chan = __dma_request_channel(mask, NULL, NULL);
227 + if (!chan)
228 + chan = ERR_PTR(-ENODEV);
229 +
230 + return chan;
231 +}
232 +EXPORT_SYMBOL_GPL(dma_request_chan_by_mask);
233 +
234 void dma_release_channel(struct dma_chan *chan)
235 {
236 mutex_lock(&dma_list_mutex);
237 --- a/include/linux/dmaengine.h
238 +++ b/include/linux/dmaengine.h
239 @@ -607,11 +607,38 @@ enum dmaengine_alignment {
240 };
241
242 /**
243 + * struct dma_slave_map - associates slave device and it's slave channel with
244 + * parameter to be used by a filter function
245 + * @devname: name of the device
246 + * @slave: slave channel name
247 + * @param: opaque parameter to pass to struct dma_filter.fn
248 + */
249 +struct dma_slave_map {
250 + const char *devname;
251 + const char *slave;
252 + void *param;
253 +};
254 +
255 +/**
256 + * struct dma_filter - information for slave device/channel to filter_fn/param
257 + * mapping
258 + * @fn: filter function callback
259 + * @mapcnt: number of slave device/channel in the map
260 + * @map: array of channel to filter mapping data
261 + */
262 +struct dma_filter {
263 + dma_filter_fn fn;
264 + int mapcnt;
265 + const struct dma_slave_map *map;
266 +};
267 +
268 +/**
269 * struct dma_device - info on the entity supplying DMA services
270 * @chancnt: how many DMA channels are supported
271 * @privatecnt: how many DMA channels are requested by dma_request_channel
272 * @channels: the list of struct dma_chan
273 * @global_node: list_head for global dma_device_list
274 + * @filter: information for device/slave to filter function/param mapping
275 * @cap_mask: one or more dma_capability flags
276 * @max_xor: maximum number of xor sources, 0 if no capability
277 * @max_pq: maximum number of PQ sources and PQ-continue capability
278 @@ -666,6 +693,7 @@ struct dma_device {
279 unsigned int privatecnt;
280 struct list_head channels;
281 struct list_head global_node;
282 + struct dma_filter filter;
283 dma_cap_mask_t cap_mask;
284 unsigned short max_xor;
285 unsigned short max_pq;
286 @@ -1140,9 +1168,11 @@ enum dma_status dma_wait_for_async_tx(st
287 void dma_issue_pending_all(void);
288 struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
289 dma_filter_fn fn, void *fn_param);
290 -struct dma_chan *dma_request_slave_channel_reason(struct device *dev,
291 - const char *name);
292 struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
293 +
294 +struct dma_chan *dma_request_chan(struct device *dev, const char *name);
295 +struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask);
296 +
297 void dma_release_channel(struct dma_chan *chan);
298 int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps);
299 #else
300 @@ -1166,16 +1196,21 @@ static inline struct dma_chan *__dma_req
301 {
302 return NULL;
303 }
304 -static inline struct dma_chan *dma_request_slave_channel_reason(
305 - struct device *dev, const char *name)
306 -{
307 - return ERR_PTR(-ENODEV);
308 -}
309 static inline struct dma_chan *dma_request_slave_channel(struct device *dev,
310 const char *name)
311 {
312 return NULL;
313 }
314 +static inline struct dma_chan *dma_request_chan(struct device *dev,
315 + const char *name)
316 +{
317 + return ERR_PTR(-ENODEV);
318 +}
319 +static inline struct dma_chan *dma_request_chan_by_mask(
320 + const dma_cap_mask_t *mask)
321 +{
322 + return ERR_PTR(-ENODEV);
323 +}
324 static inline void dma_release_channel(struct dma_chan *chan)
325 {
326 }
327 @@ -1186,6 +1221,8 @@ static inline int dma_get_slave_caps(str
328 }
329 #endif
330
331 +#define dma_request_slave_channel_reason(dev, name) dma_request_chan(dev, name)
332 +
333 static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
334 {
335 struct dma_slave_caps caps;