layerscape: add linux 4.9 support
[openwrt/staging/wigyori.git] / target / linux / layerscape / patches-4.9 / 815-spi-support-layerscape.patch
1 From a12f522b48a8cb637c1c026b46a76b2ef7983f8d Mon Sep 17 00:00:00 2001
2 From: Yangbo Lu <yangbo.lu@nxp.com>
3 Date: Mon, 25 Sep 2017 12:12:41 +0800
4 Subject: [PATCH] spi: support layerscape
5
6 This is a integrated patch for layerscape dspi support.
7
8 Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
9 Signed-off-by: Sanchayan Maity <maitysanchayan@gmail.com>
10 Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org>
11 Signed-off-by: Sanchayan Maity <maitysanchayan@gmail.com>
12 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
13 ---
14 drivers/spi/Kconfig | 1 +
15 drivers/spi/spi-fsl-dspi.c | 309 ++++++++++++++++++++++++++++++++++++++++++++-
16 2 files changed, 305 insertions(+), 5 deletions(-)
17
18 diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
19 index b7995474..8e281e47 100644
20 --- a/drivers/spi/Kconfig
21 +++ b/drivers/spi/Kconfig
22 @@ -365,6 +365,7 @@ config SPI_FSL_SPI
23 config SPI_FSL_DSPI
24 tristate "Freescale DSPI controller"
25 select REGMAP_MMIO
26 + depends on HAS_DMA
27 depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST
28 help
29 This enables support for the Freescale DSPI controller in master
30 diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
31 index a67b0ff6..15201645 100644
32 --- a/drivers/spi/spi-fsl-dspi.c
33 +++ b/drivers/spi/spi-fsl-dspi.c
34 @@ -15,6 +15,8 @@
35
36 #include <linux/clk.h>
37 #include <linux/delay.h>
38 +#include <linux/dmaengine.h>
39 +#include <linux/dma-mapping.h>
40 #include <linux/err.h>
41 #include <linux/errno.h>
42 #include <linux/interrupt.h>
43 @@ -40,6 +42,7 @@
44 #define TRAN_STATE_WORD_ODD_NUM 0x04
45
46 #define DSPI_FIFO_SIZE 4
47 +#define DSPI_DMA_BUFSIZE (DSPI_FIFO_SIZE * 1024)
48
49 #define SPI_MCR 0x00
50 #define SPI_MCR_MASTER (1 << 31)
51 @@ -72,6 +75,11 @@
52 #define SPI_SR_TCFQF 0x80000000
53 #define SPI_SR_CLEAR 0xdaad0000
54
55 +#define SPI_RSER_TFFFE BIT(25)
56 +#define SPI_RSER_TFFFD BIT(24)
57 +#define SPI_RSER_RFDFE BIT(17)
58 +#define SPI_RSER_RFDFD BIT(16)
59 +
60 #define SPI_RSER 0x30
61 #define SPI_RSER_EOQFE 0x10000000
62 #define SPI_RSER_TCFQE 0x80000000
63 @@ -109,6 +117,8 @@
64
65 #define SPI_TCR_TCNT_MAX 0x10000
66
67 +#define DMA_COMPLETION_TIMEOUT msecs_to_jiffies(3000)
68 +
69 struct chip_data {
70 u32 mcr_val;
71 u32 ctar_val;
72 @@ -118,6 +128,7 @@ struct chip_data {
73 enum dspi_trans_mode {
74 DSPI_EOQ_MODE = 0,
75 DSPI_TCFQ_MODE,
76 + DSPI_DMA_MODE,
77 };
78
79 struct fsl_dspi_devtype_data {
80 @@ -126,7 +137,7 @@ struct fsl_dspi_devtype_data {
81 };
82
83 static const struct fsl_dspi_devtype_data vf610_data = {
84 - .trans_mode = DSPI_EOQ_MODE,
85 + .trans_mode = DSPI_DMA_MODE,
86 .max_clock_factor = 2,
87 };
88
89 @@ -140,6 +151,23 @@ static const struct fsl_dspi_devtype_data ls2085a_data = {
90 .max_clock_factor = 8,
91 };
92
93 +struct fsl_dspi_dma {
94 + /* Length of transfer in words of DSPI_FIFO_SIZE */
95 + u32 curr_xfer_len;
96 +
97 + u32 *tx_dma_buf;
98 + struct dma_chan *chan_tx;
99 + dma_addr_t tx_dma_phys;
100 + struct completion cmd_tx_complete;
101 + struct dma_async_tx_descriptor *tx_desc;
102 +
103 + u32 *rx_dma_buf;
104 + struct dma_chan *chan_rx;
105 + dma_addr_t rx_dma_phys;
106 + struct completion cmd_rx_complete;
107 + struct dma_async_tx_descriptor *rx_desc;
108 +};
109 +
110 struct fsl_dspi {
111 struct spi_master *master;
112 struct platform_device *pdev;
113 @@ -166,8 +194,11 @@ struct fsl_dspi {
114 u32 waitflags;
115
116 u32 spi_tcnt;
117 + struct fsl_dspi_dma *dma;
118 };
119
120 +static u32 dspi_data_to_pushr(struct fsl_dspi *dspi, int tx_word);
121 +
122 static inline int is_double_byte_mode(struct fsl_dspi *dspi)
123 {
124 unsigned int val;
125 @@ -177,6 +208,255 @@ static inline int is_double_byte_mode(struct fsl_dspi *dspi)
126 return ((val & SPI_FRAME_BITS_MASK) == SPI_FRAME_BITS(8)) ? 0 : 1;
127 }
128
129 +static void dspi_tx_dma_callback(void *arg)
130 +{
131 + struct fsl_dspi *dspi = arg;
132 + struct fsl_dspi_dma *dma = dspi->dma;
133 +
134 + complete(&dma->cmd_tx_complete);
135 +}
136 +
137 +static void dspi_rx_dma_callback(void *arg)
138 +{
139 + struct fsl_dspi *dspi = arg;
140 + struct fsl_dspi_dma *dma = dspi->dma;
141 + int rx_word;
142 + int i;
143 + u16 d;
144 +
145 + rx_word = is_double_byte_mode(dspi);
146 +
147 + if (!(dspi->dataflags & TRAN_STATE_RX_VOID)) {
148 + for (i = 0; i < dma->curr_xfer_len; i++) {
149 + d = dspi->dma->rx_dma_buf[i];
150 + rx_word ? (*(u16 *)dspi->rx = d) :
151 + (*(u8 *)dspi->rx = d);
152 + dspi->rx += rx_word + 1;
153 + }
154 + }
155 +
156 + complete(&dma->cmd_rx_complete);
157 +}
158 +
159 +static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
160 +{
161 + struct fsl_dspi_dma *dma = dspi->dma;
162 + struct device *dev = &dspi->pdev->dev;
163 + int time_left;
164 + int tx_word;
165 + int i;
166 +
167 + tx_word = is_double_byte_mode(dspi);
168 +
169 + for (i = 0; i < dma->curr_xfer_len; i++) {
170 + dspi->dma->tx_dma_buf[i] = dspi_data_to_pushr(dspi, tx_word);
171 + if ((dspi->cs_change) && (!dspi->len))
172 + dspi->dma->tx_dma_buf[i] &= ~SPI_PUSHR_CONT;
173 + }
174 +
175 + dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx,
176 + dma->tx_dma_phys,
177 + dma->curr_xfer_len *
178 + DMA_SLAVE_BUSWIDTH_4_BYTES,
179 + DMA_MEM_TO_DEV,
180 + DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
181 + if (!dma->tx_desc) {
182 + dev_err(dev, "Not able to get desc for DMA xfer\n");
183 + return -EIO;
184 + }
185 +
186 + dma->tx_desc->callback = dspi_tx_dma_callback;
187 + dma->tx_desc->callback_param = dspi;
188 + if (dma_submit_error(dmaengine_submit(dma->tx_desc))) {
189 + dev_err(dev, "DMA submit failed\n");
190 + return -EINVAL;
191 + }
192 +
193 + dma->rx_desc = dmaengine_prep_slave_single(dma->chan_rx,
194 + dma->rx_dma_phys,
195 + dma->curr_xfer_len *
196 + DMA_SLAVE_BUSWIDTH_4_BYTES,
197 + DMA_DEV_TO_MEM,
198 + DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
199 + if (!dma->rx_desc) {
200 + dev_err(dev, "Not able to get desc for DMA xfer\n");
201 + return -EIO;
202 + }
203 +
204 + dma->rx_desc->callback = dspi_rx_dma_callback;
205 + dma->rx_desc->callback_param = dspi;
206 + if (dma_submit_error(dmaengine_submit(dma->rx_desc))) {
207 + dev_err(dev, "DMA submit failed\n");
208 + return -EINVAL;
209 + }
210 +
211 + reinit_completion(&dspi->dma->cmd_rx_complete);
212 + reinit_completion(&dspi->dma->cmd_tx_complete);
213 +
214 + dma_async_issue_pending(dma->chan_rx);
215 + dma_async_issue_pending(dma->chan_tx);
216 +
217 + time_left = wait_for_completion_timeout(&dspi->dma->cmd_tx_complete,
218 + DMA_COMPLETION_TIMEOUT);
219 + if (time_left == 0) {
220 + dev_err(dev, "DMA tx timeout\n");
221 + dmaengine_terminate_all(dma->chan_tx);
222 + dmaengine_terminate_all(dma->chan_rx);
223 + return -ETIMEDOUT;
224 + }
225 +
226 + time_left = wait_for_completion_timeout(&dspi->dma->cmd_rx_complete,
227 + DMA_COMPLETION_TIMEOUT);
228 + if (time_left == 0) {
229 + dev_err(dev, "DMA rx timeout\n");
230 + dmaengine_terminate_all(dma->chan_tx);
231 + dmaengine_terminate_all(dma->chan_rx);
232 + return -ETIMEDOUT;
233 + }
234 +
235 + return 0;
236 +}
237 +
238 +static int dspi_dma_xfer(struct fsl_dspi *dspi)
239 +{
240 + struct fsl_dspi_dma *dma = dspi->dma;
241 + struct device *dev = &dspi->pdev->dev;
242 + int curr_remaining_bytes;
243 + int bytes_per_buffer;
244 + int word = 1;
245 + int ret = 0;
246 +
247 + if (is_double_byte_mode(dspi))
248 + word = 2;
249 + curr_remaining_bytes = dspi->len;
250 + bytes_per_buffer = DSPI_DMA_BUFSIZE / DSPI_FIFO_SIZE;
251 + while (curr_remaining_bytes) {
252 + /* Check if current transfer fits the DMA buffer */
253 + dma->curr_xfer_len = curr_remaining_bytes / word;
254 + if (dma->curr_xfer_len > bytes_per_buffer)
255 + dma->curr_xfer_len = bytes_per_buffer;
256 +
257 + ret = dspi_next_xfer_dma_submit(dspi);
258 + if (ret) {
259 + dev_err(dev, "DMA transfer failed\n");
260 + goto exit;
261 +
262 + } else {
263 + curr_remaining_bytes -= dma->curr_xfer_len * word;
264 + if (curr_remaining_bytes < 0)
265 + curr_remaining_bytes = 0;
266 + }
267 + }
268 +
269 +exit:
270 + return ret;
271 +}
272 +
273 +static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
274 +{
275 + struct fsl_dspi_dma *dma;
276 + struct dma_slave_config cfg;
277 + struct device *dev = &dspi->pdev->dev;
278 + int ret;
279 +
280 + dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
281 + if (!dma)
282 + return -ENOMEM;
283 +
284 + dma->chan_rx = dma_request_slave_channel(dev, "rx");
285 + if (!dma->chan_rx) {
286 + dev_err(dev, "rx dma channel not available\n");
287 + ret = -ENODEV;
288 + return ret;
289 + }
290 +
291 + dma->chan_tx = dma_request_slave_channel(dev, "tx");
292 + if (!dma->chan_tx) {
293 + dev_err(dev, "tx dma channel not available\n");
294 + ret = -ENODEV;
295 + goto err_tx_channel;
296 + }
297 +
298 + dma->tx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE,
299 + &dma->tx_dma_phys, GFP_KERNEL);
300 + if (!dma->tx_dma_buf) {
301 + ret = -ENOMEM;
302 + goto err_tx_dma_buf;
303 + }
304 +
305 + dma->rx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE,
306 + &dma->rx_dma_phys, GFP_KERNEL);
307 + if (!dma->rx_dma_buf) {
308 + ret = -ENOMEM;
309 + goto err_rx_dma_buf;
310 + }
311 +
312 + cfg.src_addr = phy_addr + SPI_POPR;
313 + cfg.dst_addr = phy_addr + SPI_PUSHR;
314 + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
315 + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
316 + cfg.src_maxburst = 1;
317 + cfg.dst_maxburst = 1;
318 +
319 + cfg.direction = DMA_DEV_TO_MEM;
320 + ret = dmaengine_slave_config(dma->chan_rx, &cfg);
321 + if (ret) {
322 + dev_err(dev, "can't configure rx dma channel\n");
323 + ret = -EINVAL;
324 + goto err_slave_config;
325 + }
326 +
327 + cfg.direction = DMA_MEM_TO_DEV;
328 + ret = dmaengine_slave_config(dma->chan_tx, &cfg);
329 + if (ret) {
330 + dev_err(dev, "can't configure tx dma channel\n");
331 + ret = -EINVAL;
332 + goto err_slave_config;
333 + }
334 +
335 + dspi->dma = dma;
336 + init_completion(&dma->cmd_tx_complete);
337 + init_completion(&dma->cmd_rx_complete);
338 +
339 + return 0;
340 +
341 +err_slave_config:
342 + dma_free_coherent(dev, DSPI_DMA_BUFSIZE,
343 + dma->rx_dma_buf, dma->rx_dma_phys);
344 +err_rx_dma_buf:
345 + dma_free_coherent(dev, DSPI_DMA_BUFSIZE,
346 + dma->tx_dma_buf, dma->tx_dma_phys);
347 +err_tx_dma_buf:
348 + dma_release_channel(dma->chan_tx);
349 +err_tx_channel:
350 + dma_release_channel(dma->chan_rx);
351 +
352 + devm_kfree(dev, dma);
353 + dspi->dma = NULL;
354 +
355 + return ret;
356 +}
357 +
358 +static void dspi_release_dma(struct fsl_dspi *dspi)
359 +{
360 + struct fsl_dspi_dma *dma = dspi->dma;
361 + struct device *dev = &dspi->pdev->dev;
362 +
363 + if (dma) {
364 + if (dma->chan_tx) {
365 + dma_unmap_single(dev, dma->tx_dma_phys,
366 + DSPI_DMA_BUFSIZE, DMA_TO_DEVICE);
367 + dma_release_channel(dma->chan_tx);
368 + }
369 +
370 + if (dma->chan_rx) {
371 + dma_unmap_single(dev, dma->rx_dma_phys,
372 + DSPI_DMA_BUFSIZE, DMA_FROM_DEVICE);
373 + dma_release_channel(dma->chan_rx);
374 + }
375 + }
376 +}
377 +
378 static void hz_to_spi_baud(char *pbr, char *br, int speed_hz,
379 unsigned long clkrate)
380 {
381 @@ -425,6 +705,12 @@ static int dspi_transfer_one_message(struct spi_master *master,
382 regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_TCFQE);
383 dspi_tcfq_write(dspi);
384 break;
385 + case DSPI_DMA_MODE:
386 + regmap_write(dspi->regmap, SPI_RSER,
387 + SPI_RSER_TFFFE | SPI_RSER_TFFFD |
388 + SPI_RSER_RFDFE | SPI_RSER_RFDFD);
389 + status = dspi_dma_xfer(dspi);
390 + break;
391 default:
392 dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
393 trans_mode);
394 @@ -432,9 +718,13 @@ static int dspi_transfer_one_message(struct spi_master *master,
395 goto out;
396 }
397
398 - if (wait_event_interruptible(dspi->waitq, dspi->waitflags))
399 - dev_err(&dspi->pdev->dev, "wait transfer complete fail!\n");
400 - dspi->waitflags = 0;
401 + if (trans_mode != DSPI_DMA_MODE) {
402 + if (wait_event_interruptible(dspi->waitq,
403 + dspi->waitflags))
404 + dev_err(&dspi->pdev->dev,
405 + "wait transfer complete fail!\n");
406 + dspi->waitflags = 0;
407 + }
408
409 if (transfer->delay_usecs)
410 udelay(transfer->delay_usecs);
411 @@ -712,7 +1002,8 @@ static int dspi_probe(struct platform_device *pdev)
412 if (IS_ERR(dspi->regmap)) {
413 dev_err(&pdev->dev, "failed to init regmap: %ld\n",
414 PTR_ERR(dspi->regmap));
415 - return PTR_ERR(dspi->regmap);
416 + ret = PTR_ERR(dspi->regmap);
417 + goto out_master_put;
418 }
419
420 dspi_init(dspi);
421 @@ -740,6 +1031,13 @@ static int dspi_probe(struct platform_device *pdev)
422 if (ret)
423 goto out_master_put;
424
425 + if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
426 + if (dspi_request_dma(dspi, res->start)) {
427 + dev_err(&pdev->dev, "can't get dma channels\n");
428 + goto out_clk_put;
429 + }
430 + }
431 +
432 master->max_speed_hz =
433 clk_get_rate(dspi->clk) / dspi->devtype_data->max_clock_factor;
434
435 @@ -768,6 +1066,7 @@ static int dspi_remove(struct platform_device *pdev)
436 struct fsl_dspi *dspi = spi_master_get_devdata(master);
437
438 /* Disconnect from the SPI framework */
439 + dspi_release_dma(dspi);
440 clk_disable_unprepare(dspi->clk);
441 spi_unregister_master(dspi->master);
442
443 --
444 2.14.1
445