kernel: bump 4.9 to 4.9.125
[openwrt/openwrt.git] / target / linux / layerscape / patches-4.9 / 815-spi-support-layerscape.patch
1 From 027b679f248f15dea36c6cd6782d6643e2151057 Mon Sep 17 00:00:00 2001
2 From: Yangbo Lu <yangbo.lu@nxp.com>
3 Date: Wed, 17 Jan 2018 15:39:43 +0800
4 Subject: [PATCH 27/30] spi: support layerscape
5
6 This is an integrated patch for layerscape dspi support.
7
8 Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
9 Signed-off-by: Sanchayan Maity <maitysanchayan@gmail.com>
10 Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org>
11 Signed-off-by: Sanchayan Maity <maitysanchayan@gmail.com>
12 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
13 ---
14 drivers/spi/spi-fsl-dspi.c | 309 ++++++++++++++++++++++++++++++++++++++++++++-
15 1 file changed, 304 insertions(+), 5 deletions(-)
16
17 --- a/drivers/spi/spi-fsl-dspi.c
18 +++ b/drivers/spi/spi-fsl-dspi.c
19 @@ -15,6 +15,8 @@
20
21 #include <linux/clk.h>
22 #include <linux/delay.h>
23 +#include <linux/dmaengine.h>
24 +#include <linux/dma-mapping.h>
25 #include <linux/err.h>
26 #include <linux/errno.h>
27 #include <linux/interrupt.h>
28 @@ -40,6 +42,7 @@
29 #define TRAN_STATE_WORD_ODD_NUM 0x04
30
31 #define DSPI_FIFO_SIZE 4
32 +#define DSPI_DMA_BUFSIZE (DSPI_FIFO_SIZE * 1024)
33
34 #define SPI_MCR 0x00
35 #define SPI_MCR_MASTER (1 << 31)
36 @@ -72,6 +75,11 @@
37 #define SPI_SR_TCFQF 0x80000000
38 #define SPI_SR_CLEAR 0xdaad0000
39
40 +#define SPI_RSER_TFFFE BIT(25)
41 +#define SPI_RSER_TFFFD BIT(24)
42 +#define SPI_RSER_RFDFE BIT(17)
43 +#define SPI_RSER_RFDFD BIT(16)
44 +
45 #define SPI_RSER 0x30
46 #define SPI_RSER_EOQFE 0x10000000
47 #define SPI_RSER_TCFQE 0x80000000
48 @@ -109,6 +117,8 @@
49
50 #define SPI_TCR_TCNT_MAX 0x10000
51
52 +#define DMA_COMPLETION_TIMEOUT msecs_to_jiffies(3000)
53 +
54 struct chip_data {
55 u32 mcr_val;
56 u32 ctar_val;
57 @@ -118,6 +128,7 @@ struct chip_data {
58 enum dspi_trans_mode {
59 DSPI_EOQ_MODE = 0,
60 DSPI_TCFQ_MODE,
61 + DSPI_DMA_MODE,
62 };
63
64 struct fsl_dspi_devtype_data {
65 @@ -126,7 +137,7 @@ struct fsl_dspi_devtype_data {
66 };
67
68 static const struct fsl_dspi_devtype_data vf610_data = {
69 - .trans_mode = DSPI_EOQ_MODE,
70 + .trans_mode = DSPI_DMA_MODE,
71 .max_clock_factor = 2,
72 };
73
74 @@ -140,6 +151,23 @@ static const struct fsl_dspi_devtype_dat
75 .max_clock_factor = 8,
76 };
77
78 +struct fsl_dspi_dma {
79 + /* Length of transfer in words of DSPI_FIFO_SIZE */
80 + u32 curr_xfer_len;
81 +
82 + u32 *tx_dma_buf;
83 + struct dma_chan *chan_tx;
84 + dma_addr_t tx_dma_phys;
85 + struct completion cmd_tx_complete;
86 + struct dma_async_tx_descriptor *tx_desc;
87 +
88 + u32 *rx_dma_buf;
89 + struct dma_chan *chan_rx;
90 + dma_addr_t rx_dma_phys;
91 + struct completion cmd_rx_complete;
92 + struct dma_async_tx_descriptor *rx_desc;
93 +};
94 +
95 struct fsl_dspi {
96 struct spi_master *master;
97 struct platform_device *pdev;
98 @@ -166,8 +194,11 @@ struct fsl_dspi {
99 u32 waitflags;
100
101 u32 spi_tcnt;
102 + struct fsl_dspi_dma *dma;
103 };
104
105 +static u32 dspi_data_to_pushr(struct fsl_dspi *dspi, int tx_word);
106 +
107 static inline int is_double_byte_mode(struct fsl_dspi *dspi)
108 {
109 unsigned int val;
110 @@ -177,6 +208,255 @@ static inline int is_double_byte_mode(st
111 return ((val & SPI_FRAME_BITS_MASK) == SPI_FRAME_BITS(8)) ? 0 : 1;
112 }
113
114 +static void dspi_tx_dma_callback(void *arg)
115 +{
116 + struct fsl_dspi *dspi = arg;
117 + struct fsl_dspi_dma *dma = dspi->dma;
118 +
119 + complete(&dma->cmd_tx_complete);
120 +}
121 +
122 +static void dspi_rx_dma_callback(void *arg)
123 +{
124 + struct fsl_dspi *dspi = arg;
125 + struct fsl_dspi_dma *dma = dspi->dma;
126 + int rx_word;
127 + int i;
128 + u16 d;
129 +
130 + rx_word = is_double_byte_mode(dspi);
131 +
132 + if (!(dspi->dataflags & TRAN_STATE_RX_VOID)) {
133 + for (i = 0; i < dma->curr_xfer_len; i++) {
134 + d = dspi->dma->rx_dma_buf[i];
135 + rx_word ? (*(u16 *)dspi->rx = d) :
136 + (*(u8 *)dspi->rx = d);
137 + dspi->rx += rx_word + 1;
138 + }
139 + }
140 +
141 + complete(&dma->cmd_rx_complete);
142 +}
143 +
144 +static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
145 +{
146 + struct fsl_dspi_dma *dma = dspi->dma;
147 + struct device *dev = &dspi->pdev->dev;
148 + int time_left;
149 + int tx_word;
150 + int i;
151 +
152 + tx_word = is_double_byte_mode(dspi);
153 +
154 + for (i = 0; i < dma->curr_xfer_len; i++) {
155 + dspi->dma->tx_dma_buf[i] = dspi_data_to_pushr(dspi, tx_word);
156 + if ((dspi->cs_change) && (!dspi->len))
157 + dspi->dma->tx_dma_buf[i] &= ~SPI_PUSHR_CONT;
158 + }
159 +
160 + dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx,
161 + dma->tx_dma_phys,
162 + dma->curr_xfer_len *
163 + DMA_SLAVE_BUSWIDTH_4_BYTES,
164 + DMA_MEM_TO_DEV,
165 + DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
166 + if (!dma->tx_desc) {
167 + dev_err(dev, "Not able to get desc for DMA xfer\n");
168 + return -EIO;
169 + }
170 +
171 + dma->tx_desc->callback = dspi_tx_dma_callback;
172 + dma->tx_desc->callback_param = dspi;
173 + if (dma_submit_error(dmaengine_submit(dma->tx_desc))) {
174 + dev_err(dev, "DMA submit failed\n");
175 + return -EINVAL;
176 + }
177 +
178 + dma->rx_desc = dmaengine_prep_slave_single(dma->chan_rx,
179 + dma->rx_dma_phys,
180 + dma->curr_xfer_len *
181 + DMA_SLAVE_BUSWIDTH_4_BYTES,
182 + DMA_DEV_TO_MEM,
183 + DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
184 + if (!dma->rx_desc) {
185 + dev_err(dev, "Not able to get desc for DMA xfer\n");
186 + return -EIO;
187 + }
188 +
189 + dma->rx_desc->callback = dspi_rx_dma_callback;
190 + dma->rx_desc->callback_param = dspi;
191 + if (dma_submit_error(dmaengine_submit(dma->rx_desc))) {
192 + dev_err(dev, "DMA submit failed\n");
193 + return -EINVAL;
194 + }
195 +
196 + reinit_completion(&dspi->dma->cmd_rx_complete);
197 + reinit_completion(&dspi->dma->cmd_tx_complete);
198 +
199 + dma_async_issue_pending(dma->chan_rx);
200 + dma_async_issue_pending(dma->chan_tx);
201 +
202 + time_left = wait_for_completion_timeout(&dspi->dma->cmd_tx_complete,
203 + DMA_COMPLETION_TIMEOUT);
204 + if (time_left == 0) {
205 + dev_err(dev, "DMA tx timeout\n");
206 + dmaengine_terminate_all(dma->chan_tx);
207 + dmaengine_terminate_all(dma->chan_rx);
208 + return -ETIMEDOUT;
209 + }
210 +
211 + time_left = wait_for_completion_timeout(&dspi->dma->cmd_rx_complete,
212 + DMA_COMPLETION_TIMEOUT);
213 + if (time_left == 0) {
214 + dev_err(dev, "DMA rx timeout\n");
215 + dmaengine_terminate_all(dma->chan_tx);
216 + dmaengine_terminate_all(dma->chan_rx);
217 + return -ETIMEDOUT;
218 + }
219 +
220 + return 0;
221 +}
222 +
223 +static int dspi_dma_xfer(struct fsl_dspi *dspi)
224 +{
225 + struct fsl_dspi_dma *dma = dspi->dma;
226 + struct device *dev = &dspi->pdev->dev;
227 + int curr_remaining_bytes;
228 + int bytes_per_buffer;
229 + int word = 1;
230 + int ret = 0;
231 +
232 + if (is_double_byte_mode(dspi))
233 + word = 2;
234 + curr_remaining_bytes = dspi->len;
235 + bytes_per_buffer = DSPI_DMA_BUFSIZE / DSPI_FIFO_SIZE;
236 + while (curr_remaining_bytes) {
237 + /* Check if current transfer fits the DMA buffer */
238 + dma->curr_xfer_len = curr_remaining_bytes / word;
239 + if (dma->curr_xfer_len > bytes_per_buffer)
240 + dma->curr_xfer_len = bytes_per_buffer;
241 +
242 + ret = dspi_next_xfer_dma_submit(dspi);
243 + if (ret) {
244 + dev_err(dev, "DMA transfer failed\n");
245 + goto exit;
246 +
247 + } else {
248 + curr_remaining_bytes -= dma->curr_xfer_len * word;
249 + if (curr_remaining_bytes < 0)
250 + curr_remaining_bytes = 0;
251 + }
252 + }
253 +
254 +exit:
255 + return ret;
256 +}
257 +
258 +static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
259 +{
260 + struct fsl_dspi_dma *dma;
261 + struct dma_slave_config cfg;
262 + struct device *dev = &dspi->pdev->dev;
263 + int ret;
264 +
265 + dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
266 + if (!dma)
267 + return -ENOMEM;
268 +
269 + dma->chan_rx = dma_request_slave_channel(dev, "rx");
270 + if (!dma->chan_rx) {
271 + dev_err(dev, "rx dma channel not available\n");
272 + ret = -ENODEV;
273 + return ret;
274 + }
275 +
276 + dma->chan_tx = dma_request_slave_channel(dev, "tx");
277 + if (!dma->chan_tx) {
278 + dev_err(dev, "tx dma channel not available\n");
279 + ret = -ENODEV;
280 + goto err_tx_channel;
281 + }
282 +
283 + dma->tx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE,
284 + &dma->tx_dma_phys, GFP_KERNEL);
285 + if (!dma->tx_dma_buf) {
286 + ret = -ENOMEM;
287 + goto err_tx_dma_buf;
288 + }
289 +
290 + dma->rx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE,
291 + &dma->rx_dma_phys, GFP_KERNEL);
292 + if (!dma->rx_dma_buf) {
293 + ret = -ENOMEM;
294 + goto err_rx_dma_buf;
295 + }
296 +
297 + cfg.src_addr = phy_addr + SPI_POPR;
298 + cfg.dst_addr = phy_addr + SPI_PUSHR;
299 + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
300 + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
301 + cfg.src_maxburst = 1;
302 + cfg.dst_maxburst = 1;
303 +
304 + cfg.direction = DMA_DEV_TO_MEM;
305 + ret = dmaengine_slave_config(dma->chan_rx, &cfg);
306 + if (ret) {
307 + dev_err(dev, "can't configure rx dma channel\n");
308 + ret = -EINVAL;
309 + goto err_slave_config;
310 + }
311 +
312 + cfg.direction = DMA_MEM_TO_DEV;
313 + ret = dmaengine_slave_config(dma->chan_tx, &cfg);
314 + if (ret) {
315 + dev_err(dev, "can't configure tx dma channel\n");
316 + ret = -EINVAL;
317 + goto err_slave_config;
318 + }
319 +
320 + dspi->dma = dma;
321 + init_completion(&dma->cmd_tx_complete);
322 + init_completion(&dma->cmd_rx_complete);
323 +
324 + return 0;
325 +
326 +err_slave_config:
327 + dma_free_coherent(dev, DSPI_DMA_BUFSIZE,
328 + dma->rx_dma_buf, dma->rx_dma_phys);
329 +err_rx_dma_buf:
330 + dma_free_coherent(dev, DSPI_DMA_BUFSIZE,
331 + dma->tx_dma_buf, dma->tx_dma_phys);
332 +err_tx_dma_buf:
333 + dma_release_channel(dma->chan_tx);
334 +err_tx_channel:
335 + dma_release_channel(dma->chan_rx);
336 +
337 + devm_kfree(dev, dma);
338 + dspi->dma = NULL;
339 +
340 + return ret;
341 +}
342 +
343 +static void dspi_release_dma(struct fsl_dspi *dspi)
344 +{
345 + struct fsl_dspi_dma *dma = dspi->dma;
346 + struct device *dev = &dspi->pdev->dev;
347 +
348 + if (dma) {
349 + if (dma->chan_tx) {
350 + dma_unmap_single(dev, dma->tx_dma_phys,
351 + DSPI_DMA_BUFSIZE, DMA_TO_DEVICE);
352 + dma_release_channel(dma->chan_tx);
353 + }
354 +
355 + if (dma->chan_rx) {
356 + dma_unmap_single(dev, dma->rx_dma_phys,
357 + DSPI_DMA_BUFSIZE, DMA_FROM_DEVICE);
358 + dma_release_channel(dma->chan_rx);
359 + }
360 + }
361 +}
362 +
363 static void hz_to_spi_baud(char *pbr, char *br, int speed_hz,
364 unsigned long clkrate)
365 {
366 @@ -425,6 +705,12 @@ static int dspi_transfer_one_message(str
367 regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_TCFQE);
368 dspi_tcfq_write(dspi);
369 break;
370 + case DSPI_DMA_MODE:
371 + regmap_write(dspi->regmap, SPI_RSER,
372 + SPI_RSER_TFFFE | SPI_RSER_TFFFD |
373 + SPI_RSER_RFDFE | SPI_RSER_RFDFD);
374 + status = dspi_dma_xfer(dspi);
375 + break;
376 default:
377 dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
378 trans_mode);
379 @@ -432,9 +718,13 @@ static int dspi_transfer_one_message(str
380 goto out;
381 }
382
383 - if (wait_event_interruptible(dspi->waitq, dspi->waitflags))
384 - dev_err(&dspi->pdev->dev, "wait transfer complete fail!\n");
385 - dspi->waitflags = 0;
386 + if (trans_mode != DSPI_DMA_MODE) {
387 + if (wait_event_interruptible(dspi->waitq,
388 + dspi->waitflags))
389 + dev_err(&dspi->pdev->dev,
390 + "wait transfer complete fail!\n");
391 + dspi->waitflags = 0;
392 + }
393
394 if (transfer->delay_usecs)
395 udelay(transfer->delay_usecs);
396 @@ -712,7 +1002,8 @@ static int dspi_probe(struct platform_de
397 if (IS_ERR(dspi->regmap)) {
398 dev_err(&pdev->dev, "failed to init regmap: %ld\n",
399 PTR_ERR(dspi->regmap));
400 - return PTR_ERR(dspi->regmap);
401 + ret = PTR_ERR(dspi->regmap);
402 + goto out_master_put;
403 }
404
405 dspi_init(dspi);
406 @@ -740,6 +1031,13 @@ static int dspi_probe(struct platform_de
407 if (ret)
408 goto out_master_put;
409
410 + if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
411 + if (dspi_request_dma(dspi, res->start)) {
412 + dev_err(&pdev->dev, "can't get dma channels\n");
413 + goto out_clk_put;
414 + }
415 + }
416 +
417 master->max_speed_hz =
418 clk_get_rate(dspi->clk) / dspi->devtype_data->max_clock_factor;
419
420 @@ -768,6 +1066,7 @@ static int dspi_remove(struct platform_d
421 struct fsl_dspi *dspi = spi_master_get_devdata(master);
422
423 /* Disconnect from the SPI framework */
424 + dspi_release_dma(dspi);
425 clk_disable_unprepare(dspi->clk);
426 spi_unregister_master(dspi->master);
427