kernel: update 4.1 to 4.1.10
[openwrt/openwrt.git] / target / linux / brcm2708 / patches-4.1 / 0156-spi-bcm2835-enable-dma-modes-for-transfers-meeting-c.patch
1 From abff2f91fd0f8163b065b92786be93562c7e67af Mon Sep 17 00:00:00 2001
2 From: Martin Sperl <kernel@martin.sperl.org>
3 Date: Sun, 10 May 2015 20:47:28 +0000
4 Subject: [PATCH 156/171] spi: bcm2835: enable dma modes for transfers meeting
5 certain conditions
6 MIME-Version: 1.0
7 Content-Type: text/plain; charset=UTF-8
8 Content-Transfer-Encoding: 8bit
9
10 Conditions per spi_transfer are:
11 * transfer.len >= 96 bytes (to avoid mapping overhead costs)
12 * transfer.len < 65536 bytes (limitaion by spi-hw block - could get extended)
13 * an individual scatter/gather transfer length must be a multiple of 4
14 for anything but the last transfer - spi-hw block limit.
15 (some shortcut has been taken in can_dma to avoid unnecessary mapping of
16 pages which, for which there is a chance that there is a split with a
17 transfer length not a multiple of 4)
18
19 If it becomes a necessity these restrictions can get removed by additional
20 code.
21
22 Note that this patch requires a patch to dma-bcm2835.c by Noralf to
23 enable scatter-gather mode inside the dmaengine, which has not been
24 merged yet.
25
26 That is why no patch to arch/arm/boot/dts/bcm2835.dtsi is included - the
27 code works as before without dma when tx/rx are not set, but it writes
28 a message warning about dma not used:
29 spi-bcm2835 20204000.spi: no tx-dma configuration found - not using dma mode
30
31 To enable dma-mode add the following lines to the device-tree:
32 dmas = <&dma 6>, <&dma 7>;
33 dma-names = "tx", "rx";
34
35 Tested-by: Noralf Trønnes <noralf@tronnes.org> (private communication)
36 Signed-off-by: Martin Sperl <kernel@martin.sperl.org>
37 Signed-off-by: Mark Brown <broonie@kernel.org>
38 (cherry picked from commit 3ecd37edaa2a6ba3246e2c35714be9316b1087fe)
39 ---
40 drivers/spi/spi-bcm2835.c | 303 +++++++++++++++++++++++++++++++++++++++++++++-
41 1 file changed, 301 insertions(+), 2 deletions(-)
42
43 --- a/drivers/spi/spi-bcm2835.c
44 +++ b/drivers/spi/spi-bcm2835.c
45 @@ -23,15 +23,18 @@
46 #include <linux/clk.h>
47 #include <linux/completion.h>
48 #include <linux/delay.h>
49 +#include <linux/dma-mapping.h>
50 +#include <linux/dmaengine.h>
51 #include <linux/err.h>
52 #include <linux/interrupt.h>
53 #include <linux/io.h>
54 #include <linux/kernel.h>
55 #include <linux/module.h>
56 #include <linux/of.h>
57 -#include <linux/of_irq.h>
58 -#include <linux/of_gpio.h>
59 +#include <linux/of_address.h>
60 #include <linux/of_device.h>
61 +#include <linux/of_gpio.h>
62 +#include <linux/of_irq.h>
63 #include <linux/spi/spi.h>
64
65 /* SPI register offsets */
66 @@ -70,6 +73,7 @@
67
68 #define BCM2835_SPI_POLLING_LIMIT_US 30
69 #define BCM2835_SPI_POLLING_JIFFIES 2
70 +#define BCM2835_SPI_DMA_MIN_LENGTH 96
71 #define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
72 | SPI_NO_CS | SPI_3WIRE)
73
74 @@ -83,6 +87,7 @@ struct bcm2835_spi {
75 u8 *rx_buf;
76 int tx_len;
77 int rx_len;
78 + bool dma_pending;
79 };
80
81 static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned reg)
82 @@ -128,12 +133,15 @@ static void bcm2835_spi_reset_hw(struct
83 /* Disable SPI interrupts and transfer */
84 cs &= ~(BCM2835_SPI_CS_INTR |
85 BCM2835_SPI_CS_INTD |
86 + BCM2835_SPI_CS_DMAEN |
87 BCM2835_SPI_CS_TA);
88 /* and reset RX/TX FIFOS */
89 cs |= BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX;
90
91 /* and reset the SPI_HW */
92 bcm2835_wr(bs, BCM2835_SPI_CS, cs);
93 + /* as well as DLEN */
94 + bcm2835_wr(bs, BCM2835_SPI_DLEN, 0);
95 }
96
97 static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id)
98 @@ -193,6 +201,279 @@ static int bcm2835_spi_transfer_one_irq(
99 return 1;
100 }
101
102 +/*
103 + * DMA support
104 + *
105 + * this implementation has currently a few issues in so far as it does
106 + * not work arrount limitations of the HW.
107 + *
108 + * the main one being that DMA transfers are limited to 16 bit
109 + * (so 0 to 65535 bytes) by the SPI HW due to BCM2835_SPI_DLEN
110 + *
111 + * also we currently assume that the scatter-gather fragments are
112 + * all multiple of 4 (except the last) - otherwise we would need
113 + * to reset the FIFO before subsequent transfers...
114 + * this also means that tx/rx transfers sg's need to be of equal size!
115 + *
116 + * there may be a few more border-cases we may need to address as well
117 + * but unfortunately this would mean splitting up the scatter-gather
118 + * list making it slightly unpractical...
119 + */
120 +static void bcm2835_spi_dma_done(void *data)
121 +{
122 + struct spi_master *master = data;
123 + struct bcm2835_spi *bs = spi_master_get_devdata(master);
124 +
125 + /* reset fifo and HW */
126 + bcm2835_spi_reset_hw(master);
127 +
128 + /* and terminate tx-dma as we do not have an irq for it
129 + * because when the rx dma will terminate and this callback
130 + * is called the tx-dma must have finished - can't get to this
131 + * situation otherwise...
132 + */
133 + dmaengine_terminate_all(master->dma_tx);
134 +
135 + /* mark as no longer pending */
136 + bs->dma_pending = 0;
137 +
138 + /* and mark as completed */;
139 + complete(&master->xfer_completion);
140 +}
141 +
142 +static int bcm2835_spi_prepare_sg(struct spi_master *master,
143 + struct spi_transfer *tfr,
144 + bool is_tx)
145 +{
146 + struct dma_chan *chan;
147 + struct scatterlist *sgl;
148 + unsigned int nents;
149 + enum dma_transfer_direction dir;
150 + unsigned long flags;
151 +
152 + struct dma_async_tx_descriptor *desc;
153 + dma_cookie_t cookie;
154 +
155 + if (is_tx) {
156 + dir = DMA_MEM_TO_DEV;
157 + chan = master->dma_tx;
158 + nents = tfr->tx_sg.nents;
159 + sgl = tfr->tx_sg.sgl;
160 + flags = 0 /* no tx interrupt */;
161 +
162 + } else {
163 + dir = DMA_DEV_TO_MEM;
164 + chan = master->dma_rx;
165 + nents = tfr->rx_sg.nents;
166 + sgl = tfr->rx_sg.sgl;
167 + flags = DMA_PREP_INTERRUPT;
168 + }
169 + /* prepare the channel */
170 + desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags);
171 + if (!desc)
172 + return -EINVAL;
173 +
174 + /* set callback for rx */
175 + if (!is_tx) {
176 + desc->callback = bcm2835_spi_dma_done;
177 + desc->callback_param = master;
178 + }
179 +
180 + /* submit it to DMA-engine */
181 + cookie = dmaengine_submit(desc);
182 +
183 + return dma_submit_error(cookie);
184 +}
185 +
186 +static inline int bcm2835_check_sg_length(struct sg_table *sgt)
187 +{
188 + int i;
189 + struct scatterlist *sgl;
190 +
191 + /* check that the sg entries are word-sized (except for last) */
192 + for_each_sg(sgt->sgl, sgl, (int)sgt->nents - 1, i) {
193 + if (sg_dma_len(sgl) % 4)
194 + return -EFAULT;
195 + }
196 +
197 + return 0;
198 +}
199 +
200 +static int bcm2835_spi_transfer_one_dma(struct spi_master *master,
201 + struct spi_device *spi,
202 + struct spi_transfer *tfr,
203 + u32 cs)
204 +{
205 + struct bcm2835_spi *bs = spi_master_get_devdata(master);
206 + int ret;
207 +
208 + /* check that the scatter gather segments are all a multiple of 4 */
209 + if (bcm2835_check_sg_length(&tfr->tx_sg) ||
210 + bcm2835_check_sg_length(&tfr->rx_sg)) {
211 + dev_warn_once(&spi->dev,
212 + "scatter gather segment length is not a multiple of 4 - falling back to interrupt mode\n");
213 + return bcm2835_spi_transfer_one_irq(master, spi, tfr, cs);
214 + }
215 +
216 + /* setup tx-DMA */
217 + ret = bcm2835_spi_prepare_sg(master, tfr, true);
218 + if (ret)
219 + return ret;
220 +
221 + /* start TX early */
222 + dma_async_issue_pending(master->dma_tx);
223 +
224 + /* mark as dma pending */
225 + bs->dma_pending = 1;
226 +
227 + /* set the DMA length */
228 + bcm2835_wr(bs, BCM2835_SPI_DLEN, tfr->len);
229 +
230 + /* start the HW */
231 + bcm2835_wr(bs, BCM2835_SPI_CS,
232 + cs | BCM2835_SPI_CS_TA | BCM2835_SPI_CS_DMAEN);
233 +
234 + /* setup rx-DMA late - to run transfers while
235 + * mapping of the rx buffers still takes place
236 + * this saves 10us or more.
237 + */
238 + ret = bcm2835_spi_prepare_sg(master, tfr, false);
239 + if (ret) {
240 + /* need to reset on errors */
241 + dmaengine_terminate_all(master->dma_tx);
242 + bcm2835_spi_reset_hw(master);
243 + return ret;
244 + }
245 +
246 + /* start rx dma late */
247 + dma_async_issue_pending(master->dma_rx);
248 +
249 + /* wait for wakeup in framework */
250 + return 1;
251 +}
252 +
253 +static bool bcm2835_spi_can_dma(struct spi_master *master,
254 + struct spi_device *spi,
255 + struct spi_transfer *tfr)
256 +{
257 + /* only run for gpio_cs */
258 + if (!gpio_is_valid(spi->cs_gpio))
259 + return false;
260 +
261 + /* we start DMA efforts only on bigger transfers */
262 + if (tfr->len < BCM2835_SPI_DMA_MIN_LENGTH)
263 + return false;
264 +
265 + /* BCM2835_SPI_DLEN has defined a max transfer size as
266 + * 16 bit, so max is 65535
267 + * we can revisit this by using an alternative transfer
268 + * method - ideally this would get done without any more
269 + * interaction...
270 + */
271 + if (tfr->len > 65535) {
272 + dev_warn_once(&spi->dev,
273 + "transfer size of %d too big for dma-transfer\n",
274 + tfr->len);
275 + return false;
276 + }
277 +
278 + /* if we run rx/tx_buf with word aligned addresses then we are OK */
279 + if (((u32)tfr->tx_buf % 4 == 0) && ((u32)tfr->tx_buf % 4 == 0))
280 + return true;
281 +
282 + /* otherwise we only allow transfers within the same page
283 + * to avoid wasting time on dma_mapping when it is not practical
284 + */
285 + if (((u32)tfr->tx_buf % SZ_4K) + tfr->len > SZ_4K) {
286 + dev_warn_once(&spi->dev,
287 + "Unaligned spi tx-transfer bridging page\n");
288 + return false;
289 + }
290 + if (((u32)tfr->rx_buf % SZ_4K) + tfr->len > SZ_4K) {
291 + dev_warn_once(&spi->dev,
292 + "Unaligned spi tx-transfer bridging page\n");
293 + return false;
294 + }
295 +
296 + /* return OK */
297 + return true;
298 +}
299 +
300 +void bcm2835_dma_release(struct spi_master *master)
301 +{
302 + if (master->dma_tx) {
303 + dmaengine_terminate_all(master->dma_tx);
304 + dma_release_channel(master->dma_tx);
305 + master->dma_tx = NULL;
306 + }
307 + if (master->dma_rx) {
308 + dmaengine_terminate_all(master->dma_rx);
309 + dma_release_channel(master->dma_rx);
310 + master->dma_rx = NULL;
311 + }
312 +}
313 +
314 +void bcm2835_dma_init(struct spi_master *master, struct device *dev)
315 +{
316 + struct dma_slave_config slave_config;
317 + const __be32 *addr;
318 + dma_addr_t dma_reg_base;
319 + int ret;
320 +
321 + /* base address in dma-space */
322 + addr = of_get_address(master->dev.of_node, 0, NULL, NULL);
323 + if (!addr) {
324 + dev_err(dev, "could not get DMA-register address - not using dma mode\n");
325 + goto err;
326 + }
327 + dma_reg_base = be32_to_cpup(addr);
328 +
329 + /* get tx/rx dma */
330 + master->dma_tx = dma_request_slave_channel(dev, "tx");
331 + if (!master->dma_tx) {
332 + dev_err(dev, "no tx-dma configuration found - not using dma mode\n");
333 + goto err;
334 + }
335 + master->dma_rx = dma_request_slave_channel(dev, "rx");
336 + if (!master->dma_rx) {
337 + dev_err(dev, "no rx-dma configuration found - not using dma mode\n");
338 + goto err_release;
339 + }
340 +
341 + /* configure DMAs */
342 + slave_config.direction = DMA_MEM_TO_DEV;
343 + slave_config.dst_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO);
344 + slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
345 +
346 + ret = dmaengine_slave_config(master->dma_tx, &slave_config);
347 + if (ret)
348 + goto err_config;
349 +
350 + slave_config.direction = DMA_DEV_TO_MEM;
351 + slave_config.src_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO);
352 + slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
353 +
354 + ret = dmaengine_slave_config(master->dma_rx, &slave_config);
355 + if (ret)
356 + goto err_config;
357 +
358 + /* all went well, so set can_dma */
359 + master->can_dma = bcm2835_spi_can_dma;
360 + master->max_dma_len = 65535; /* limitation by BCM2835_SPI_DLEN */
361 + /* need to do TX AND RX DMA, so we need dummy buffers */
362 + master->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
363 +
364 + return;
365 +
366 +err_config:
367 + dev_err(dev, "issue configuring dma: %d - not using DMA mode\n",
368 + ret);
369 +err_release:
370 + bcm2835_dma_release(master);
371 +err:
372 + return;
373 +}
374 +
375 static int bcm2835_spi_transfer_one_poll(struct spi_master *master,
376 struct spi_device *spi,
377 struct spi_transfer *tfr,
378 @@ -299,6 +580,11 @@ static int bcm2835_spi_transfer_one(stru
379 return bcm2835_spi_transfer_one_poll(master, spi, tfr,
380 cs, xfer_time_us);
381
382 + /* run in dma mode if conditions are right */
383 + if (master->can_dma && bcm2835_spi_can_dma(master, spi, tfr))
384 + return bcm2835_spi_transfer_one_dma(master, spi, tfr, cs);
385 +
386 + /* run in interrupt-mode */
387 return bcm2835_spi_transfer_one_irq(master, spi, tfr, cs);
388 }
389
390 @@ -324,6 +610,15 @@ static int bcm2835_spi_prepare_message(s
391 static void bcm2835_spi_handle_err(struct spi_master *master,
392 struct spi_message *msg)
393 {
394 + struct bcm2835_spi *bs = spi_master_get_devdata(master);
395 +
396 + /* if an error occurred and we have an active dma, then terminate */
397 + if (bs->dma_pending) {
398 + dmaengine_terminate_all(master->dma_tx);
399 + dmaengine_terminate_all(master->dma_rx);
400 + bs->dma_pending = 0;
401 + }
402 + /* and reset */
403 bcm2835_spi_reset_hw(master);
404 }
405
406 @@ -523,6 +818,8 @@ static int bcm2835_spi_probe(struct plat
407 goto out_clk_disable;
408 }
409
410 + bcm2835_dma_init(master, &pdev->dev);
411 +
412 /* initialise the hardware with the default polarities */
413 bcm2835_wr(bs, BCM2835_SPI_CS,
414 BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
415 @@ -553,6 +850,8 @@ static int bcm2835_spi_remove(struct pla
416
417 clk_disable_unprepare(bs->clk);
418
419 + bcm2835_dma_release(master);
420 +
421 return 0;
422 }
423