3225c7badb6fc3951fdf9f4ff9cd702fbe7ddba9
[openwrt/staging/chunkeey.git] / target / linux / xburst / patches-3.10 / 011-dma-Add-a-jz4740-dmaengine-driver.patch
1 From 8c53b6491806a37d6999886d22c34bfed310034c Mon Sep 17 00:00:00 2001
2 From: Lars-Peter Clausen <lars@metafoo.de>
3 Date: Thu, 30 May 2013 18:25:02 +0200
4 Subject: [PATCH 11/16] dma: Add a jz4740 dmaengine driver
5
6 This patch adds dmaengine support for the JZ4740 DMA controller. For now the
7 driver will be a wrapper around the custom JZ4740 DMA API. Once all users of the
8 custom JZ4740 DMA API have been converted to the dmaengine API the custom API
9 will be removed and direct hardware access will be added to the dmaengine
10 driver.
11
12 Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
13 Signed-off-by: Vinod Koul <vinod.koul@intel.com>
14 ---
15 drivers/dma/Kconfig | 6 +
16 drivers/dma/Makefile | 1 +
17 drivers/dma/dma-jz4740.c | 433 ++++++++++++++++++++++++++++++++++++++++++++++
18 3 files changed, 440 insertions(+)
19 create mode 100644 drivers/dma/dma-jz4740.c
20
21 diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
22 index e992489..b3e8952 100644
23 --- a/drivers/dma/Kconfig
24 +++ b/drivers/dma/Kconfig
25 @@ -312,6 +312,12 @@ config MMP_PDMA
26 help
27 Support the MMP PDMA engine for PXA and MMP platfrom.
28
29 +config DMA_JZ4740
30 + tristate "JZ4740 DMA support"
31 + depends on MACH_JZ4740
32 + select DMA_ENGINE
33 + select DMA_VIRTUAL_CHANNELS
34 +
35 config DMA_ENGINE
36 bool
37
38 diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
39 index a2b0df5..6127a61 100644
40 --- a/drivers/dma/Makefile
41 +++ b/drivers/dma/Makefile
42 @@ -38,3 +38,4 @@ obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
43 obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
44 obj-$(CONFIG_DMA_OMAP) += omap-dma.o
45 obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
46 +obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
47 diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c
48 new file mode 100644
49 index 0000000..3d42434
50 --- /dev/null
51 +++ b/drivers/dma/dma-jz4740.c
52 @@ -0,0 +1,433 @@
53 +/*
54 + * Copyright (C) 2013, Lars-Peter Clausen <lars@metafoo.de>
55 + * JZ4740 DMAC support
56 + *
57 + * This program is free software; you can redistribute it and/or modify it
58 + * under the terms of the GNU General Public License as published by the
59 + * Free Software Foundation; either version 2 of the License, or (at your
60 + * option) any later version.
61 + *
62 + * You should have received a copy of the GNU General Public License along
63 + * with this program; if not, write to the Free Software Foundation, Inc.,
64 + * 675 Mass Ave, Cambridge, MA 02139, USA.
65 + *
66 + */
67 +
68 +#include <linux/dmaengine.h>
69 +#include <linux/dma-mapping.h>
70 +#include <linux/err.h>
71 +#include <linux/init.h>
72 +#include <linux/list.h>
73 +#include <linux/module.h>
74 +#include <linux/platform_device.h>
75 +#include <linux/slab.h>
76 +#include <linux/spinlock.h>
77 +
78 +#include <asm/mach-jz4740/dma.h>
79 +
80 +#include "virt-dma.h"
81 +
82 +#define JZ_DMA_NR_CHANS 6
83 +
84 +struct jz4740_dma_sg {
85 + dma_addr_t addr;
86 + unsigned int len;
87 +};
88 +
89 +struct jz4740_dma_desc {
90 + struct virt_dma_desc vdesc;
91 +
92 + enum dma_transfer_direction direction;
93 + bool cyclic;
94 +
95 + unsigned int num_sgs;
96 + struct jz4740_dma_sg sg[];
97 +};
98 +
99 +struct jz4740_dmaengine_chan {
100 + struct virt_dma_chan vchan;
101 + struct jz4740_dma_chan *jz_chan;
102 +
103 + dma_addr_t fifo_addr;
104 +
105 + struct jz4740_dma_desc *desc;
106 + unsigned int next_sg;
107 +};
108 +
109 +struct jz4740_dma_dev {
110 + struct dma_device ddev;
111 +
112 + struct jz4740_dmaengine_chan chan[JZ_DMA_NR_CHANS];
113 +};
114 +
115 +static struct jz4740_dmaengine_chan *to_jz4740_dma_chan(struct dma_chan *c)
116 +{
117 + return container_of(c, struct jz4740_dmaengine_chan, vchan.chan);
118 +}
119 +
120 +static struct jz4740_dma_desc *to_jz4740_dma_desc(struct virt_dma_desc *vdesc)
121 +{
122 + return container_of(vdesc, struct jz4740_dma_desc, vdesc);
123 +}
124 +
125 +static struct jz4740_dma_desc *jz4740_dma_alloc_desc(unsigned int num_sgs)
126 +{
127 + return kzalloc(sizeof(struct jz4740_dma_desc) +
128 + sizeof(struct jz4740_dma_sg) * num_sgs, GFP_ATOMIC);
129 +}
130 +
131 +static enum jz4740_dma_width jz4740_dma_width(enum dma_slave_buswidth width)
132 +{
133 + switch (width) {
134 + case DMA_SLAVE_BUSWIDTH_1_BYTE:
135 + return JZ4740_DMA_WIDTH_8BIT;
136 + case DMA_SLAVE_BUSWIDTH_2_BYTES:
137 + return JZ4740_DMA_WIDTH_16BIT;
138 + case DMA_SLAVE_BUSWIDTH_4_BYTES:
139 + return JZ4740_DMA_WIDTH_32BIT;
140 + default:
141 + return JZ4740_DMA_WIDTH_32BIT;
142 + }
143 +}
144 +
145 +static enum jz4740_dma_transfer_size jz4740_dma_maxburst(u32 maxburst)
146 +{
147 + if (maxburst <= 1)
148 + return JZ4740_DMA_TRANSFER_SIZE_1BYTE;
149 + else if (maxburst <= 3)
150 + return JZ4740_DMA_TRANSFER_SIZE_2BYTE;
151 + else if (maxburst <= 15)
152 + return JZ4740_DMA_TRANSFER_SIZE_4BYTE;
153 + else if (maxburst <= 31)
154 + return JZ4740_DMA_TRANSFER_SIZE_16BYTE;
155 +
156 + return JZ4740_DMA_TRANSFER_SIZE_32BYTE;
157 +}
158 +
159 +static int jz4740_dma_slave_config(struct dma_chan *c,
160 + const struct dma_slave_config *config)
161 +{
162 + struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
163 + struct jz4740_dma_config jzcfg;
164 +
165 + switch (config->direction) {
166 + case DMA_MEM_TO_DEV:
167 + jzcfg.flags = JZ4740_DMA_SRC_AUTOINC;
168 + jzcfg.transfer_size = jz4740_dma_maxburst(config->dst_maxburst);
169 + chan->fifo_addr = config->dst_addr;
170 + break;
171 + case DMA_DEV_TO_MEM:
172 + jzcfg.flags = JZ4740_DMA_DST_AUTOINC;
173 + jzcfg.transfer_size = jz4740_dma_maxburst(config->src_maxburst);
174 + chan->fifo_addr = config->src_addr;
175 + break;
176 + default:
177 + return -EINVAL;
178 + }
179 +
180 +
181 + jzcfg.src_width = jz4740_dma_width(config->src_addr_width);
182 + jzcfg.dst_width = jz4740_dma_width(config->dst_addr_width);
183 + jzcfg.mode = JZ4740_DMA_MODE_SINGLE;
184 + jzcfg.request_type = config->slave_id;
185 +
186 + jz4740_dma_configure(chan->jz_chan, &jzcfg);
187 +
188 + return 0;
189 +}
190 +
191 +static int jz4740_dma_terminate_all(struct dma_chan *c)
192 +{
193 + struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
194 + unsigned long flags;
195 + LIST_HEAD(head);
196 +
197 + spin_lock_irqsave(&chan->vchan.lock, flags);
198 + jz4740_dma_disable(chan->jz_chan);
199 + chan->desc = NULL;
200 + vchan_get_all_descriptors(&chan->vchan, &head);
201 + spin_unlock_irqrestore(&chan->vchan.lock, flags);
202 +
203 + vchan_dma_desc_free_list(&chan->vchan, &head);
204 +
205 + return 0;
206 +}
207 +
208 +static int jz4740_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
209 + unsigned long arg)
210 +{
211 + struct dma_slave_config *config = (struct dma_slave_config *)arg;
212 +
213 + switch (cmd) {
214 + case DMA_SLAVE_CONFIG:
215 + return jz4740_dma_slave_config(chan, config);
216 + case DMA_TERMINATE_ALL:
217 + return jz4740_dma_terminate_all(chan);
218 + default:
219 + return -ENOSYS;
220 + }
221 +}
222 +
223 +static int jz4740_dma_start_transfer(struct jz4740_dmaengine_chan *chan)
224 +{
225 + dma_addr_t src_addr, dst_addr;
226 + struct virt_dma_desc *vdesc;
227 + struct jz4740_dma_sg *sg;
228 +
229 + jz4740_dma_disable(chan->jz_chan);
230 +
231 + if (!chan->desc) {
232 + vdesc = vchan_next_desc(&chan->vchan);
233 + if (!vdesc)
234 + return 0;
235 + chan->desc = to_jz4740_dma_desc(vdesc);
236 + chan->next_sg = 0;
237 + }
238 +
239 + if (chan->next_sg == chan->desc->num_sgs)
240 + chan->next_sg = 0;
241 +
242 + sg = &chan->desc->sg[chan->next_sg];
243 +
244 + if (chan->desc->direction == DMA_MEM_TO_DEV) {
245 + src_addr = sg->addr;
246 + dst_addr = chan->fifo_addr;
247 + } else {
248 + src_addr = chan->fifo_addr;
249 + dst_addr = sg->addr;
250 + }
251 + jz4740_dma_set_src_addr(chan->jz_chan, src_addr);
252 + jz4740_dma_set_dst_addr(chan->jz_chan, dst_addr);
253 + jz4740_dma_set_transfer_count(chan->jz_chan, sg->len);
254 +
255 + chan->next_sg++;
256 +
257 + jz4740_dma_enable(chan->jz_chan);
258 +
259 + return 0;
260 +}
261 +
262 +static void jz4740_dma_complete_cb(struct jz4740_dma_chan *jz_chan, int error,
263 + void *devid)
264 +{
265 + struct jz4740_dmaengine_chan *chan = devid;
266 +
267 + spin_lock(&chan->vchan.lock);
268 + if (chan->desc) {
269 + if (chan->desc && chan->desc->cyclic) {
270 + vchan_cyclic_callback(&chan->desc->vdesc);
271 + } else {
272 + if (chan->next_sg == chan->desc->num_sgs) {
273 + chan->desc = NULL;
274 + vchan_cookie_complete(&chan->desc->vdesc);
275 + }
276 + }
277 + }
278 + jz4740_dma_start_transfer(chan);
279 + spin_unlock(&chan->vchan.lock);
280 +}
281 +
282 +static void jz4740_dma_issue_pending(struct dma_chan *c)
283 +{
284 + struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
285 + unsigned long flags;
286 +
287 + spin_lock_irqsave(&chan->vchan.lock, flags);
288 + if (vchan_issue_pending(&chan->vchan) && !chan->desc)
289 + jz4740_dma_start_transfer(chan);
290 + spin_unlock_irqrestore(&chan->vchan.lock, flags);
291 +}
292 +
293 +static struct dma_async_tx_descriptor *jz4740_dma_prep_slave_sg(
294 + struct dma_chan *c, struct scatterlist *sgl,
295 + unsigned int sg_len, enum dma_transfer_direction direction,
296 + unsigned long flags, void *context)
297 +{
298 + struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
299 + struct jz4740_dma_desc *desc;
300 + struct scatterlist *sg;
301 + unsigned int i;
302 +
303 + desc = jz4740_dma_alloc_desc(sg_len);
304 + if (!desc)
305 + return NULL;
306 +
307 + for_each_sg(sgl, sg, sg_len, i) {
308 + desc->sg[i].addr = sg_dma_address(sg);
309 + desc->sg[i].len = sg_dma_len(sg);
310 + }
311 +
312 + desc->num_sgs = sg_len;
313 + desc->direction = direction;
314 + desc->cyclic = false;
315 +
316 + return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
317 +}
318 +
319 +static struct dma_async_tx_descriptor *jz4740_dma_prep_dma_cyclic(
320 + struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
321 + size_t period_len, enum dma_transfer_direction direction,
322 + unsigned long flags, void *context)
323 +{
324 + struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
325 + struct jz4740_dma_desc *desc;
326 + unsigned int num_periods, i;
327 +
328 + if (buf_len % period_len)
329 + return NULL;
330 +
331 + num_periods = buf_len / period_len;
332 +
333 + desc = jz4740_dma_alloc_desc(num_periods);
334 + if (!desc)
335 + return NULL;
336 +
337 + for (i = 0; i < num_periods; i++) {
338 + desc->sg[i].addr = buf_addr;
339 + desc->sg[i].len = period_len;
340 + buf_addr += period_len;
341 + }
342 +
343 + desc->num_sgs = num_periods;
344 + desc->direction = direction;
345 + desc->cyclic = true;
346 +
347 + return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
348 +}
349 +
350 +static size_t jz4740_dma_desc_residue(struct jz4740_dmaengine_chan *chan,
351 + struct jz4740_dma_desc *desc, unsigned int next_sg)
352 +{
353 + size_t residue = 0;
354 + unsigned int i;
355 +
356 + residue = 0;
357 +
358 + for (i = next_sg; i < desc->num_sgs; i++)
359 + residue += desc->sg[i].len;
360 +
361 + if (next_sg != 0)
362 + residue += jz4740_dma_get_residue(chan->jz_chan);
363 +
364 + return residue;
365 +}
366 +
367 +static enum dma_status jz4740_dma_tx_status(struct dma_chan *c,
368 + dma_cookie_t cookie, struct dma_tx_state *state)
369 +{
370 + struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
371 + struct virt_dma_desc *vdesc;
372 + enum dma_status status;
373 + unsigned long flags;
374 +
375 + status = dma_cookie_status(c, cookie, state);
376 + if (status == DMA_SUCCESS || !state)
377 + return status;
378 +
379 + spin_lock_irqsave(&chan->vchan.lock, flags);
380 + vdesc = vchan_find_desc(&chan->vchan, cookie);
381 + if (cookie == chan->desc->vdesc.tx.cookie) {
382 + state->residue = jz4740_dma_desc_residue(chan, chan->desc,
383 + chan->next_sg);
384 + } else if (vdesc) {
385 + state->residue = jz4740_dma_desc_residue(chan,
386 + to_jz4740_dma_desc(vdesc), 0);
387 + } else {
388 + state->residue = 0;
389 + }
390 + spin_unlock_irqrestore(&chan->vchan.lock, flags);
391 +
392 + return status;
393 +}
394 +
395 +static int jz4740_dma_alloc_chan_resources(struct dma_chan *c)
396 +{
397 + struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
398 +
399 + chan->jz_chan = jz4740_dma_request(chan, NULL);
400 + if (!chan->jz_chan)
401 + return -EBUSY;
402 +
403 + jz4740_dma_set_complete_cb(chan->jz_chan, jz4740_dma_complete_cb);
404 +
405 + return 0;
406 +}
407 +
408 +static void jz4740_dma_free_chan_resources(struct dma_chan *c)
409 +{
410 + struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
411 +
412 + vchan_free_chan_resources(&chan->vchan);
413 + jz4740_dma_free(chan->jz_chan);
414 + chan->jz_chan = NULL;
415 +}
416 +
417 +static void jz4740_dma_desc_free(struct virt_dma_desc *vdesc)
418 +{
419 + kfree(container_of(vdesc, struct jz4740_dma_desc, vdesc));
420 +}
421 +
422 +static int jz4740_dma_probe(struct platform_device *pdev)
423 +{
424 + struct jz4740_dmaengine_chan *chan;
425 + struct jz4740_dma_dev *dmadev;
426 + struct dma_device *dd;
427 + unsigned int i;
428 + int ret;
429 +
430 + dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL);
431 + if (!dmadev)
432 + return -EINVAL;
433 +
434 + dd = &dmadev->ddev;
435 +
436 + dma_cap_set(DMA_SLAVE, dd->cap_mask);
437 + dma_cap_set(DMA_CYCLIC, dd->cap_mask);
438 + dd->device_alloc_chan_resources = jz4740_dma_alloc_chan_resources;
439 + dd->device_free_chan_resources = jz4740_dma_free_chan_resources;
440 + dd->device_tx_status = jz4740_dma_tx_status;
441 + dd->device_issue_pending = jz4740_dma_issue_pending;
442 + dd->device_prep_slave_sg = jz4740_dma_prep_slave_sg;
443 + dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic;
444 + dd->device_control = jz4740_dma_control;
445 + dd->dev = &pdev->dev;
446 + dd->chancnt = JZ_DMA_NR_CHANS;
447 + INIT_LIST_HEAD(&dd->channels);
448 +
449 + for (i = 0; i < dd->chancnt; i++) {
450 + chan = &dmadev->chan[i];
451 + chan->vchan.desc_free = jz4740_dma_desc_free;
452 + vchan_init(&chan->vchan, dd);
453 + }
454 +
455 + ret = dma_async_device_register(dd);
456 + if (ret)
457 + return ret;
458 +
459 + platform_set_drvdata(pdev, dmadev);
460 +
461 + return 0;
462 +}
463 +
464 +static int jz4740_dma_remove(struct platform_device *pdev)
465 +{
466 + struct jz4740_dma_dev *dmadev = platform_get_drvdata(pdev);
467 +
468 + dma_async_device_unregister(&dmadev->ddev);
469 +
470 + return 0;
471 +}
472 +
473 +static struct platform_driver jz4740_dma_driver = {
474 + .probe = jz4740_dma_probe,
475 + .remove = jz4740_dma_remove,
476 + .driver = {
477 + .name = "jz4740-dma",
478 + .owner = THIS_MODULE,
479 + },
480 +};
481 +module_platform_driver(jz4740_dma_driver);
482 +
483 +MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
484 +MODULE_DESCRIPTION("JZ4740 DMA driver");
485 +MODULE_LICENSE("GPLv2");
486 --
487 1.7.10.4
488