1 From a12f522b48a8cb637c1c026b46a76b2ef7983f8d Mon Sep 17 00:00:00 2001
2 From: Yangbo Lu <yangbo.lu@nxp.com>
3 Date: Mon, 25 Sep 2017 12:12:41 +0800
4 Subject: [PATCH] spi: support layerscape
6 This is a integrated patch for layerscape dspi support.
8 Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
9 Signed-off-by: Sanchayan Maity <maitysanchayan@gmail.com>
10 Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org>
11 Signed-off-by: Sanchayan Maity <maitysanchayan@gmail.com>
12 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
14 drivers/spi/Kconfig | 1 +
15 drivers/spi/spi-fsl-dspi.c | 309 ++++++++++++++++++++++++++++++++++++++++++++-
16 2 files changed, 305 insertions(+), 5 deletions(-)
18 --- a/drivers/spi/spi-fsl-dspi.c
19 +++ b/drivers/spi/spi-fsl-dspi.c
22 #include <linux/clk.h>
23 #include <linux/delay.h>
24 +#include <linux/dmaengine.h>
25 +#include <linux/dma-mapping.h>
26 #include <linux/err.h>
27 #include <linux/errno.h>
28 #include <linux/interrupt.h>
30 #define TRAN_STATE_WORD_ODD_NUM 0x04
32 #define DSPI_FIFO_SIZE 4
33 +#define DSPI_DMA_BUFSIZE (DSPI_FIFO_SIZE * 1024)
36 #define SPI_MCR_MASTER (1 << 31)
38 #define SPI_SR_TCFQF 0x80000000
39 #define SPI_SR_CLEAR 0xdaad0000
41 +#define SPI_RSER_TFFFE BIT(25)
42 +#define SPI_RSER_TFFFD BIT(24)
43 +#define SPI_RSER_RFDFE BIT(17)
44 +#define SPI_RSER_RFDFD BIT(16)
47 #define SPI_RSER_EOQFE 0x10000000
48 #define SPI_RSER_TCFQE 0x80000000
51 #define SPI_TCR_TCNT_MAX 0x10000
53 +#define DMA_COMPLETION_TIMEOUT msecs_to_jiffies(3000)
58 @@ -118,6 +128,7 @@ struct chip_data {
59 enum dspi_trans_mode {
65 struct fsl_dspi_devtype_data {
66 @@ -126,7 +137,7 @@ struct fsl_dspi_devtype_data {
69 static const struct fsl_dspi_devtype_data vf610_data = {
70 - .trans_mode = DSPI_EOQ_MODE,
71 + .trans_mode = DSPI_DMA_MODE,
72 .max_clock_factor = 2,
75 @@ -140,6 +151,23 @@ static const struct fsl_dspi_devtype_dat
76 .max_clock_factor = 8,
79 +struct fsl_dspi_dma {
80 + /* Length of transfer in words of DSPI_FIFO_SIZE */
84 + struct dma_chan *chan_tx;
85 + dma_addr_t tx_dma_phys;
86 + struct completion cmd_tx_complete;
87 + struct dma_async_tx_descriptor *tx_desc;
90 + struct dma_chan *chan_rx;
91 + dma_addr_t rx_dma_phys;
92 + struct completion cmd_rx_complete;
93 + struct dma_async_tx_descriptor *rx_desc;
97 struct spi_master *master;
98 struct platform_device *pdev;
99 @@ -166,8 +194,11 @@ struct fsl_dspi {
103 + struct fsl_dspi_dma *dma;
106 +static u32 dspi_data_to_pushr(struct fsl_dspi *dspi, int tx_word);
108 static inline int is_double_byte_mode(struct fsl_dspi *dspi)
111 @@ -177,6 +208,255 @@ static inline int is_double_byte_mode(st
112 return ((val & SPI_FRAME_BITS_MASK) == SPI_FRAME_BITS(8)) ? 0 : 1;
115 +static void dspi_tx_dma_callback(void *arg)
117 + struct fsl_dspi *dspi = arg;
118 + struct fsl_dspi_dma *dma = dspi->dma;
120 + complete(&dma->cmd_tx_complete);
123 +static void dspi_rx_dma_callback(void *arg)
125 + struct fsl_dspi *dspi = arg;
126 + struct fsl_dspi_dma *dma = dspi->dma;
131 + rx_word = is_double_byte_mode(dspi);
133 + if (!(dspi->dataflags & TRAN_STATE_RX_VOID)) {
134 + for (i = 0; i < dma->curr_xfer_len; i++) {
135 + d = dspi->dma->rx_dma_buf[i];
136 + rx_word ? (*(u16 *)dspi->rx = d) :
137 + (*(u8 *)dspi->rx = d);
138 + dspi->rx += rx_word + 1;
142 + complete(&dma->cmd_rx_complete);
145 +static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
147 + struct fsl_dspi_dma *dma = dspi->dma;
148 + struct device *dev = &dspi->pdev->dev;
153 + tx_word = is_double_byte_mode(dspi);
155 + for (i = 0; i < dma->curr_xfer_len; i++) {
156 + dspi->dma->tx_dma_buf[i] = dspi_data_to_pushr(dspi, tx_word);
157 + if ((dspi->cs_change) && (!dspi->len))
158 + dspi->dma->tx_dma_buf[i] &= ~SPI_PUSHR_CONT;
161 + dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx,
163 + dma->curr_xfer_len *
164 + DMA_SLAVE_BUSWIDTH_4_BYTES,
166 + DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
167 + if (!dma->tx_desc) {
168 + dev_err(dev, "Not able to get desc for DMA xfer\n");
172 + dma->tx_desc->callback = dspi_tx_dma_callback;
173 + dma->tx_desc->callback_param = dspi;
174 + if (dma_submit_error(dmaengine_submit(dma->tx_desc))) {
175 + dev_err(dev, "DMA submit failed\n");
179 + dma->rx_desc = dmaengine_prep_slave_single(dma->chan_rx,
181 + dma->curr_xfer_len *
182 + DMA_SLAVE_BUSWIDTH_4_BYTES,
184 + DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
185 + if (!dma->rx_desc) {
186 + dev_err(dev, "Not able to get desc for DMA xfer\n");
190 + dma->rx_desc->callback = dspi_rx_dma_callback;
191 + dma->rx_desc->callback_param = dspi;
192 + if (dma_submit_error(dmaengine_submit(dma->rx_desc))) {
193 + dev_err(dev, "DMA submit failed\n");
197 + reinit_completion(&dspi->dma->cmd_rx_complete);
198 + reinit_completion(&dspi->dma->cmd_tx_complete);
200 + dma_async_issue_pending(dma->chan_rx);
201 + dma_async_issue_pending(dma->chan_tx);
203 + time_left = wait_for_completion_timeout(&dspi->dma->cmd_tx_complete,
204 + DMA_COMPLETION_TIMEOUT);
205 + if (time_left == 0) {
206 + dev_err(dev, "DMA tx timeout\n");
207 + dmaengine_terminate_all(dma->chan_tx);
208 + dmaengine_terminate_all(dma->chan_rx);
212 + time_left = wait_for_completion_timeout(&dspi->dma->cmd_rx_complete,
213 + DMA_COMPLETION_TIMEOUT);
214 + if (time_left == 0) {
215 + dev_err(dev, "DMA rx timeout\n");
216 + dmaengine_terminate_all(dma->chan_tx);
217 + dmaengine_terminate_all(dma->chan_rx);
224 +static int dspi_dma_xfer(struct fsl_dspi *dspi)
226 + struct fsl_dspi_dma *dma = dspi->dma;
227 + struct device *dev = &dspi->pdev->dev;
228 + int curr_remaining_bytes;
229 + int bytes_per_buffer;
233 + if (is_double_byte_mode(dspi))
235 + curr_remaining_bytes = dspi->len;
236 + bytes_per_buffer = DSPI_DMA_BUFSIZE / DSPI_FIFO_SIZE;
237 + while (curr_remaining_bytes) {
238 + /* Check if current transfer fits the DMA buffer */
239 + dma->curr_xfer_len = curr_remaining_bytes / word;
240 + if (dma->curr_xfer_len > bytes_per_buffer)
241 + dma->curr_xfer_len = bytes_per_buffer;
243 + ret = dspi_next_xfer_dma_submit(dspi);
245 + dev_err(dev, "DMA transfer failed\n");
249 + curr_remaining_bytes -= dma->curr_xfer_len * word;
250 + if (curr_remaining_bytes < 0)
251 + curr_remaining_bytes = 0;
259 +static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
261 + struct fsl_dspi_dma *dma;
262 + struct dma_slave_config cfg;
263 + struct device *dev = &dspi->pdev->dev;
266 + dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
270 + dma->chan_rx = dma_request_slave_channel(dev, "rx");
271 + if (!dma->chan_rx) {
272 + dev_err(dev, "rx dma channel not available\n");
277 + dma->chan_tx = dma_request_slave_channel(dev, "tx");
278 + if (!dma->chan_tx) {
279 + dev_err(dev, "tx dma channel not available\n");
281 + goto err_tx_channel;
284 + dma->tx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE,
285 + &dma->tx_dma_phys, GFP_KERNEL);
286 + if (!dma->tx_dma_buf) {
288 + goto err_tx_dma_buf;
291 + dma->rx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE,
292 + &dma->rx_dma_phys, GFP_KERNEL);
293 + if (!dma->rx_dma_buf) {
295 + goto err_rx_dma_buf;
298 + cfg.src_addr = phy_addr + SPI_POPR;
299 + cfg.dst_addr = phy_addr + SPI_PUSHR;
300 + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
301 + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
302 + cfg.src_maxburst = 1;
303 + cfg.dst_maxburst = 1;
305 + cfg.direction = DMA_DEV_TO_MEM;
306 + ret = dmaengine_slave_config(dma->chan_rx, &cfg);
308 + dev_err(dev, "can't configure rx dma channel\n");
310 + goto err_slave_config;
313 + cfg.direction = DMA_MEM_TO_DEV;
314 + ret = dmaengine_slave_config(dma->chan_tx, &cfg);
316 + dev_err(dev, "can't configure tx dma channel\n");
318 + goto err_slave_config;
322 + init_completion(&dma->cmd_tx_complete);
323 + init_completion(&dma->cmd_rx_complete);
328 + dma_free_coherent(dev, DSPI_DMA_BUFSIZE,
329 + dma->rx_dma_buf, dma->rx_dma_phys);
331 + dma_free_coherent(dev, DSPI_DMA_BUFSIZE,
332 + dma->tx_dma_buf, dma->tx_dma_phys);
334 + dma_release_channel(dma->chan_tx);
336 + dma_release_channel(dma->chan_rx);
338 + devm_kfree(dev, dma);
344 +static void dspi_release_dma(struct fsl_dspi *dspi)
346 + struct fsl_dspi_dma *dma = dspi->dma;
347 + struct device *dev = &dspi->pdev->dev;
350 + if (dma->chan_tx) {
351 + dma_unmap_single(dev, dma->tx_dma_phys,
352 + DSPI_DMA_BUFSIZE, DMA_TO_DEVICE);
353 + dma_release_channel(dma->chan_tx);
356 + if (dma->chan_rx) {
357 + dma_unmap_single(dev, dma->rx_dma_phys,
358 + DSPI_DMA_BUFSIZE, DMA_FROM_DEVICE);
359 + dma_release_channel(dma->chan_rx);
364 static void hz_to_spi_baud(char *pbr, char *br, int speed_hz,
365 unsigned long clkrate)
367 @@ -425,6 +705,12 @@ static int dspi_transfer_one_message(str
368 regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_TCFQE);
369 dspi_tcfq_write(dspi);
371 + case DSPI_DMA_MODE:
372 + regmap_write(dspi->regmap, SPI_RSER,
373 + SPI_RSER_TFFFE | SPI_RSER_TFFFD |
374 + SPI_RSER_RFDFE | SPI_RSER_RFDFD);
375 + status = dspi_dma_xfer(dspi);
378 dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
380 @@ -432,9 +718,13 @@ static int dspi_transfer_one_message(str
384 - if (wait_event_interruptible(dspi->waitq, dspi->waitflags))
385 - dev_err(&dspi->pdev->dev, "wait transfer complete fail!\n");
386 - dspi->waitflags = 0;
387 + if (trans_mode != DSPI_DMA_MODE) {
388 + if (wait_event_interruptible(dspi->waitq,
390 + dev_err(&dspi->pdev->dev,
391 + "wait transfer complete fail!\n");
392 + dspi->waitflags = 0;
395 if (transfer->delay_usecs)
396 udelay(transfer->delay_usecs);
397 @@ -712,7 +1002,8 @@ static int dspi_probe(struct platform_de
398 if (IS_ERR(dspi->regmap)) {
399 dev_err(&pdev->dev, "failed to init regmap: %ld\n",
400 PTR_ERR(dspi->regmap));
401 - return PTR_ERR(dspi->regmap);
402 + ret = PTR_ERR(dspi->regmap);
403 + goto out_master_put;
407 @@ -740,6 +1031,13 @@ static int dspi_probe(struct platform_de
411 + if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
412 + if (dspi_request_dma(dspi, res->start)) {
413 + dev_err(&pdev->dev, "can't get dma channels\n");
418 master->max_speed_hz =
419 clk_get_rate(dspi->clk) / dspi->devtype_data->max_clock_factor;
421 @@ -768,6 +1066,7 @@ static int dspi_remove(struct platform_d
422 struct fsl_dspi *dspi = spi_master_get_devdata(master);
424 /* Disconnect from the SPI framework */
425 + dspi_release_dma(dspi);
426 clk_disable_unprepare(dspi->clk);
427 spi_unregister_master(dspi->master);