1 From ed56e6322b067a898a25bda1774eb1180a832246 Mon Sep 17 00:00:00 2001
2 From: Andy Gross <andy.gross@linaro.org>
3 Date: Tue, 2 Feb 2016 17:00:53 -0600
4 Subject: [PATCH] spi: qup: Fix DMA mode to work correctly
6 This patch fixes a few issues with the DMA mode. The QUP needs to be
7 placed in the run mode before the DMA transactions are executed. The
8 conditions for being able to DMA vary between revisions of the QUP.
9 This is due to v1.1.1 using ADM DMA and later revisions using BAM.
11 Change-Id: Ib1f876eaa05d079e0bca4358d2b25b2940986089
12 Signed-off-by: Andy Gross <andy.gross@linaro.org>
14 drivers/spi/spi-qup.c | 95 ++++++++++++++++++++++++++++++++++-----------------
15 1 file changed, 63 insertions(+), 32 deletions(-)
17 diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
18 index fe629f2..089c5e8 100644
19 --- a/drivers/spi/spi-qup.c
20 +++ b/drivers/spi/spi-qup.c
21 @@ -143,6 +143,7 @@ struct spi_qup {
23 struct spi_transfer *xfer;
24 struct completion done;
25 + struct completion dma_tx_done;
27 int w_size; /* bytes per SPI word */
29 @@ -285,16 +286,16 @@ static void spi_qup_fifo_write(struct spi_qup *controller,
31 static void spi_qup_dma_done(void *data)
33 - struct spi_qup *qup = data;
34 + struct completion *done = data;
36 - complete(&qup->done);
40 static int spi_qup_prep_sg(struct spi_master *master, struct spi_transfer *xfer,
41 enum dma_transfer_direction dir,
42 - dma_async_tx_callback callback)
43 + dma_async_tx_callback callback,
46 - struct spi_qup *qup = spi_master_get_devdata(master);
47 unsigned long flags = DMA_PREP_INTERRUPT | DMA_PREP_FENCE;
48 struct dma_async_tx_descriptor *desc;
49 struct scatterlist *sgl;
50 @@ -313,11 +314,11 @@ static int spi_qup_prep_sg(struct spi_master *master, struct spi_transfer *xfer,
53 desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags);
56 + if (IS_ERR_OR_NULL(desc))
57 + return desc ? PTR_ERR(desc) : -EINVAL;
59 desc->callback = callback;
60 - desc->callback_param = qup;
61 + desc->callback_param = data;
63 cookie = dmaengine_submit(desc);
65 @@ -333,18 +334,29 @@ static void spi_qup_dma_terminate(struct spi_master *master,
66 dmaengine_terminate_all(master->dma_rx);
69 -static int spi_qup_do_dma(struct spi_master *master, struct spi_transfer *xfer)
70 +static int spi_qup_do_dma(struct spi_master *master, struct spi_transfer *xfer,
71 +unsigned long timeout)
73 + struct spi_qup *qup = spi_master_get_devdata(master);
74 dma_async_tx_callback rx_done = NULL, tx_done = NULL;
77 + /* before issuing the descriptors, set the QUP to run */
78 + ret = spi_qup_set_state(qup, QUP_STATE_RUN);
80 + dev_warn(qup->dev, "cannot set RUN state\n");
85 rx_done = spi_qup_dma_done;
86 - else if (xfer->tx_buf)
89 tx_done = spi_qup_dma_done;
92 - ret = spi_qup_prep_sg(master, xfer, DMA_DEV_TO_MEM, rx_done);
93 + ret = spi_qup_prep_sg(master, xfer, DMA_DEV_TO_MEM, rx_done,
98 @@ -352,17 +364,26 @@ static int spi_qup_do_dma(struct spi_master *master, struct spi_transfer *xfer)
102 - ret = spi_qup_prep_sg(master, xfer, DMA_MEM_TO_DEV, tx_done);
103 + ret = spi_qup_prep_sg(master, xfer, DMA_MEM_TO_DEV, tx_done,
104 + &qup->dma_tx_done);
108 dma_async_issue_pending(master->dma_tx);
112 + if (xfer->rx_buf && !wait_for_completion_timeout(&qup->done, timeout))
115 + if (xfer->tx_buf &&
116 + !wait_for_completion_timeout(&qup->dma_tx_done, timeout))
122 -static int spi_qup_do_pio(struct spi_master *master, struct spi_transfer *xfer)
123 +static int spi_qup_do_pio(struct spi_master *master, struct spi_transfer *xfer,
124 + unsigned long timeout)
126 struct spi_qup *qup = spi_master_get_devdata(master);
128 @@ -382,6 +403,15 @@ static int spi_qup_do_pio(struct spi_master *master, struct spi_transfer *xfer)
129 if (qup->mode == QUP_IO_M_MODE_FIFO)
130 spi_qup_fifo_write(qup, xfer);
132 + ret = spi_qup_set_state(qup, QUP_STATE_RUN);
134 + dev_warn(qup->dev, "cannot set RUN state\n");
138 + if (!wait_for_completion_timeout(&qup->done, timeout))
144 @@ -430,7 +460,6 @@ static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
145 dev_warn(controller->dev, "CLK_OVER_RUN\n");
146 if (spi_err & SPI_ERROR_CLK_UNDER_RUN)
147 dev_warn(controller->dev, "CLK_UNDER_RUN\n");
152 @@ -619,6 +648,7 @@ static int spi_qup_transfer_one(struct spi_master *master,
153 timeout = 100 * msecs_to_jiffies(timeout);
155 reinit_completion(&controller->done);
156 + reinit_completion(&controller->dma_tx_done);
158 spin_lock_irqsave(&controller->lock, flags);
159 controller->xfer = xfer;
160 @@ -628,21 +658,13 @@ static int spi_qup_transfer_one(struct spi_master *master,
161 spin_unlock_irqrestore(&controller->lock, flags);
163 if (spi_qup_is_dma_xfer(controller->mode))
164 - ret = spi_qup_do_dma(master, xfer);
165 + ret = spi_qup_do_dma(master, xfer, timeout);
167 - ret = spi_qup_do_pio(master, xfer);
168 + ret = spi_qup_do_pio(master, xfer, timeout);
173 - if (spi_qup_set_state(controller, QUP_STATE_RUN)) {
174 - dev_warn(controller->dev, "cannot set EXECUTE state\n");
178 - if (!wait_for_completion_timeout(&controller->done, timeout))
182 spi_qup_set_state(controller, QUP_STATE_RESET);
183 spin_lock_irqsave(&controller->lock, flags);
184 @@ -664,15 +686,23 @@ static bool spi_qup_can_dma(struct spi_master *master, struct spi_device *spi,
185 size_t dma_align = dma_get_cache_alignment();
188 - if (xfer->rx_buf && (xfer->len % qup->in_blk_sz ||
189 - IS_ERR_OR_NULL(master->dma_rx) ||
190 - !IS_ALIGNED((size_t)xfer->rx_buf, dma_align)))
192 + if (xfer->rx_buf) {
193 + if (!IS_ALIGNED((size_t)xfer->rx_buf, dma_align) ||
194 + IS_ERR_OR_NULL(master->dma_rx))
197 - if (xfer->tx_buf && (xfer->len % qup->out_blk_sz ||
198 - IS_ERR_OR_NULL(master->dma_tx) ||
199 - !IS_ALIGNED((size_t)xfer->tx_buf, dma_align)))
201 + if (qup->qup_v1 && (xfer->len % qup->in_blk_sz))
205 + if (xfer->tx_buf) {
206 + if (!IS_ALIGNED((size_t)xfer->tx_buf, dma_align) ||
207 + IS_ERR_OR_NULL(master->dma_tx))
210 + if (qup->qup_v1 && (xfer->len % qup->out_blk_sz))
214 n_words = xfer->len / DIV_ROUND_UP(xfer->bits_per_word, 8);
215 if (n_words <= (qup->in_fifo_sz / sizeof(u32)))
216 @@ -875,6 +905,7 @@ static int spi_qup_probe(struct platform_device *pdev)
218 spin_lock_init(&controller->lock);
219 init_completion(&controller->done);
220 + init_completion(&controller->dma_tx_done);
222 iomode = readl_relaxed(base + QUP_IO_M_MODES);