ipq806x: sync with latest patches sent by QCA
[openwrt/openwrt.git] / target / linux / ipq806x / patches-4.4 / 712-spi-qup-Fix-DMA-mode-to-work-correctly.patch
1 From ed56e6322b067a898a25bda1774eb1180a832246 Mon Sep 17 00:00:00 2001
2 From: Andy Gross <andy.gross@linaro.org>
3 Date: Tue, 2 Feb 2016 17:00:53 -0600
4 Subject: [PATCH] spi: qup: Fix DMA mode to work correctly
5
6 This patch fixes a few issues with the DMA mode. The QUP needs to be
7 placed in the run mode before the DMA transactions are executed. The
8 conditions for being able to DMA vary between revisions of the QUP.
9 This is due to v1.1.1 using ADM DMA and later revisions using BAM.
10
11 Change-Id: Ib1f876eaa05d079e0bca4358d2b25b2940986089
12 Signed-off-by: Andy Gross <andy.gross@linaro.org>
13 ---
14 drivers/spi/spi-qup.c | 95 ++++++++++++++++++++++++++++++++++-----------------
15 1 file changed, 63 insertions(+), 32 deletions(-)
16
17 diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
18 index fe629f2..089c5e8 100644
19 --- a/drivers/spi/spi-qup.c
20 +++ b/drivers/spi/spi-qup.c
21 @@ -143,6 +143,7 @@ struct spi_qup {
22
23 struct spi_transfer *xfer;
24 struct completion done;
25 + struct completion dma_tx_done;
26 int error;
27 int w_size; /* bytes per SPI word */
28 int n_words;
29 @@ -285,16 +286,16 @@ static void spi_qup_fifo_write(struct spi_qup *controller,
30
31 static void spi_qup_dma_done(void *data)
32 {
33 - struct spi_qup *qup = data;
34 + struct completion *done = data;
35
36 - complete(&qup->done);
37 + complete(done);
38 }
39
40 static int spi_qup_prep_sg(struct spi_master *master, struct spi_transfer *xfer,
41 enum dma_transfer_direction dir,
42 - dma_async_tx_callback callback)
43 + dma_async_tx_callback callback,
44 + void *data)
45 {
46 - struct spi_qup *qup = spi_master_get_devdata(master);
47 unsigned long flags = DMA_PREP_INTERRUPT | DMA_PREP_FENCE;
48 struct dma_async_tx_descriptor *desc;
49 struct scatterlist *sgl;
50 @@ -313,11 +314,11 @@ static int spi_qup_prep_sg(struct spi_master *master, struct spi_transfer *xfer,
51 }
52
53 desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags);
54 - if (!desc)
55 - return -EINVAL;
56 + if (IS_ERR_OR_NULL(desc))
57 + return desc ? PTR_ERR(desc) : -EINVAL;
58
59 desc->callback = callback;
60 - desc->callback_param = qup;
61 + desc->callback_param = data;
62
63 cookie = dmaengine_submit(desc);
64
65 @@ -333,18 +334,29 @@ static void spi_qup_dma_terminate(struct spi_master *master,
66 dmaengine_terminate_all(master->dma_rx);
67 }
68
69 -static int spi_qup_do_dma(struct spi_master *master, struct spi_transfer *xfer)
70 +static int spi_qup_do_dma(struct spi_master *master, struct spi_transfer *xfer,
71 +unsigned long timeout)
72 {
73 + struct spi_qup *qup = spi_master_get_devdata(master);
74 dma_async_tx_callback rx_done = NULL, tx_done = NULL;
75 int ret;
76
77 + /* before issuing the descriptors, set the QUP to run */
78 + ret = spi_qup_set_state(qup, QUP_STATE_RUN);
79 + if (ret) {
80 + dev_warn(qup->dev, "cannot set RUN state\n");
81 + return ret;
82 + }
83 +
84 if (xfer->rx_buf)
85 rx_done = spi_qup_dma_done;
86 - else if (xfer->tx_buf)
87 +
88 + if (xfer->tx_buf)
89 tx_done = spi_qup_dma_done;
90
91 if (xfer->rx_buf) {
92 - ret = spi_qup_prep_sg(master, xfer, DMA_DEV_TO_MEM, rx_done);
93 + ret = spi_qup_prep_sg(master, xfer, DMA_DEV_TO_MEM, rx_done,
94 + &qup->done);
95 if (ret)
96 return ret;
97
98 @@ -352,17 +364,26 @@ static int spi_qup_do_dma(struct spi_master *master, struct spi_transfer *xfer)
99 }
100
101 if (xfer->tx_buf) {
102 - ret = spi_qup_prep_sg(master, xfer, DMA_MEM_TO_DEV, tx_done);
103 + ret = spi_qup_prep_sg(master, xfer, DMA_MEM_TO_DEV, tx_done,
104 + &qup->dma_tx_done);
105 if (ret)
106 return ret;
107
108 dma_async_issue_pending(master->dma_tx);
109 }
110
111 - return 0;
112 + if (xfer->rx_buf && !wait_for_completion_timeout(&qup->done, timeout))
113 + return -ETIMEDOUT;
114 +
115 + if (xfer->tx_buf &&
116 + !wait_for_completion_timeout(&qup->dma_tx_done, timeout))
117 + ret = -ETIMEDOUT;
118 +
119 + return ret;
120 }
121
122 -static int spi_qup_do_pio(struct spi_master *master, struct spi_transfer *xfer)
123 +static int spi_qup_do_pio(struct spi_master *master, struct spi_transfer *xfer,
124 + unsigned long timeout)
125 {
126 struct spi_qup *qup = spi_master_get_devdata(master);
127 int ret;
128 @@ -382,6 +403,15 @@ static int spi_qup_do_pio(struct spi_master *master, struct spi_transfer *xfer)
129 if (qup->mode == QUP_IO_M_MODE_FIFO)
130 spi_qup_fifo_write(qup, xfer);
131
132 + ret = spi_qup_set_state(qup, QUP_STATE_RUN);
133 + if (ret) {
134 + dev_warn(qup->dev, "cannot set RUN state\n");
135 + return ret;
136 + }
137 +
138 + if (!wait_for_completion_timeout(&qup->done, timeout))
139 + return -ETIMEDOUT;
140 +
141 return 0;
142 }
143
144 @@ -430,7 +460,6 @@ static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
145 dev_warn(controller->dev, "CLK_OVER_RUN\n");
146 if (spi_err & SPI_ERROR_CLK_UNDER_RUN)
147 dev_warn(controller->dev, "CLK_UNDER_RUN\n");
148 -
149 error = -EIO;
150 }
151
152 @@ -619,6 +648,7 @@ static int spi_qup_transfer_one(struct spi_master *master,
153 timeout = 100 * msecs_to_jiffies(timeout);
154
155 reinit_completion(&controller->done);
156 + reinit_completion(&controller->dma_tx_done);
157
158 spin_lock_irqsave(&controller->lock, flags);
159 controller->xfer = xfer;
160 @@ -628,21 +658,13 @@ static int spi_qup_transfer_one(struct spi_master *master,
161 spin_unlock_irqrestore(&controller->lock, flags);
162
163 if (spi_qup_is_dma_xfer(controller->mode))
164 - ret = spi_qup_do_dma(master, xfer);
165 + ret = spi_qup_do_dma(master, xfer, timeout);
166 else
167 - ret = spi_qup_do_pio(master, xfer);
168 + ret = spi_qup_do_pio(master, xfer, timeout);
169
170 if (ret)
171 goto exit;
172
173 - if (spi_qup_set_state(controller, QUP_STATE_RUN)) {
174 - dev_warn(controller->dev, "cannot set EXECUTE state\n");
175 - goto exit;
176 - }
177 -
178 - if (!wait_for_completion_timeout(&controller->done, timeout))
179 - ret = -ETIMEDOUT;
180 -
181 exit:
182 spi_qup_set_state(controller, QUP_STATE_RESET);
183 spin_lock_irqsave(&controller->lock, flags);
184 @@ -664,15 +686,23 @@ static bool spi_qup_can_dma(struct spi_master *master, struct spi_device *spi,
185 size_t dma_align = dma_get_cache_alignment();
186 int n_words;
187
188 - if (xfer->rx_buf && (xfer->len % qup->in_blk_sz ||
189 - IS_ERR_OR_NULL(master->dma_rx) ||
190 - !IS_ALIGNED((size_t)xfer->rx_buf, dma_align)))
191 - return false;
192 + if (xfer->rx_buf) {
193 + if (!IS_ALIGNED((size_t)xfer->rx_buf, dma_align) ||
194 + IS_ERR_OR_NULL(master->dma_rx))
195 + return false;
196
197 - if (xfer->tx_buf && (xfer->len % qup->out_blk_sz ||
198 - IS_ERR_OR_NULL(master->dma_tx) ||
199 - !IS_ALIGNED((size_t)xfer->tx_buf, dma_align)))
200 - return false;
201 + if (qup->qup_v1 && (xfer->len % qup->in_blk_sz))
202 + return false;
203 + }
204 +
205 + if (xfer->tx_buf) {
206 + if (!IS_ALIGNED((size_t)xfer->tx_buf, dma_align) ||
207 + IS_ERR_OR_NULL(master->dma_tx))
208 + return false;
209 +
210 + if (qup->qup_v1 && (xfer->len % qup->out_blk_sz))
211 + return false;
212 + }
213
214 n_words = xfer->len / DIV_ROUND_UP(xfer->bits_per_word, 8);
215 if (n_words <= (qup->in_fifo_sz / sizeof(u32)))
216 @@ -875,6 +905,7 @@ static int spi_qup_probe(struct platform_device *pdev)
217
218 spin_lock_init(&controller->lock);
219 init_completion(&controller->done);
220 + init_completion(&controller->dma_tx_done);
221
222 iomode = readl_relaxed(base + QUP_IO_M_MODES);
223
224 --
225 2.7.2
226