ar71xx: remove linux 3.18 support
[openwrt/openwrt.git] / target / linux / ipq806x / patches-4.0 / 002-v3-spi-qup-Fix-incorrect-block-transfers.patch
1 Content-Type: text/plain; charset="utf-8"
2 MIME-Version: 1.0
3 Content-Transfer-Encoding: 7bit
4 Subject: [v3] spi: qup: Fix incorrect block transfers
5 From: Andy Gross <agross@codeaurora.org>
6 X-Patchwork-Id: 5007321
7 Message-Id: <1412112088-25928-1-git-send-email-agross@codeaurora.org>
8 To: Mark Brown <broonie@kernel.org>
9 Cc: linux-spi@vger.kernel.org, linux-kernel@vger.kernel.org,
10 linux-arm-kernel@lists.infradead.org, linux-arm-msm@vger.kernel.org,
11 "Ivan T. Ivanov" <iivanov@mm-sol.com>,
12 Bjorn Andersson <bjorn.andersson@sonymobile.com>,
13 Kumar Gala <galak@codeaurora.org>, Andy Gross <agross@codeaurora.org>
14 Date: Tue, 30 Sep 2014 16:21:28 -0500
15
16 This patch fixes a number of errors with the QUP block transfer mode. Errors
17 manifested themselves as input underruns, output overruns, and timed out
18 transactions.
19
20 The block mode does not require the priming that occurs in FIFO mode. At the
21 moment that the QUP is placed into the RUN state, the QUP will immediately raise
22 an interrupt if the request is a write. Therefore, there is no need to prime
23 the pump.
24
25 In addition, the block transfers require that whole blocks of data are
26 read/written at a time. The last block of data that completes a transaction may
27 contain less than a full blocks worth of data.
28
29 Each block of data results in an input/output service interrupt accompanied with
30 a input/output block flag set. Additional block reads/writes require clearing
31 of the service flag. It is ok to check for additional blocks of data in the
32 ISR, but you have to ack every block you transfer. Imbalanced acks result in
33 early return from complete transactions with pending interrupts that still have
34 to be ack'd. The next transaction can be affected by these interrupts.
35 Transactions are deemed complete when the MAX_INPUT or MAX_OUTPUT flag are set.
36
37 Changes from v2:
38 - Added in additional completion check so that transaction done is not
39 prematurely signaled.
40 - Fixed various review comments.
41
42 Changes from v1:
43 - Split out read/write block function.
44 - Removed extraneous checks for transfer length
45
46 Signed-off-by: Andy Gross <agross@codeaurora.org>
47
48 ---
49 drivers/spi/spi-qup.c | 201 ++++++++++++++++++++++++++++++++++++-------------
50 1 file changed, 148 insertions(+), 53 deletions(-)
51
52 --- a/drivers/spi/spi-qup.c
53 +++ b/drivers/spi/spi-qup.c
54 @@ -82,6 +82,8 @@
55 #define QUP_IO_M_MODE_BAM 3
56
57 /* QUP_OPERATIONAL fields */
58 +#define QUP_OP_IN_BLOCK_READ_REQ BIT(13)
59 +#define QUP_OP_OUT_BLOCK_WRITE_REQ BIT(12)
60 #define QUP_OP_MAX_INPUT_DONE_FLAG BIT(11)
61 #define QUP_OP_MAX_OUTPUT_DONE_FLAG BIT(10)
62 #define QUP_OP_IN_SERVICE_FLAG BIT(9)
63 @@ -147,6 +149,7 @@ struct spi_qup {
64 int tx_bytes;
65 int rx_bytes;
66 int qup_v1;
67 + int mode;
68
69 int use_dma;
70
71 @@ -213,30 +216,14 @@ static int spi_qup_set_state(struct spi_
72 return 0;
73 }
74
75 -
76 -static void spi_qup_fifo_read(struct spi_qup *controller,
77 - struct spi_transfer *xfer)
78 +static void spi_qup_fill_read_buffer(struct spi_qup *controller,
79 + struct spi_transfer *xfer, u32 data)
80 {
81 u8 *rx_buf = xfer->rx_buf;
82 - u32 word, state;
83 - int idx, shift, w_size;
84 -
85 - w_size = controller->w_size;
86 -
87 - while (controller->rx_bytes < xfer->len) {
88 -
89 - state = readl_relaxed(controller->base + QUP_OPERATIONAL);
90 - if (0 == (state & QUP_OP_IN_FIFO_NOT_EMPTY))
91 - break;
92 -
93 - word = readl_relaxed(controller->base + QUP_INPUT_FIFO);
94 -
95 - if (!rx_buf) {
96 - controller->rx_bytes += w_size;
97 - continue;
98 - }
99 + int idx, shift;
100
101 - for (idx = 0; idx < w_size; idx++, controller->rx_bytes++) {
102 + if (rx_buf)
103 + for (idx = 0; idx < controller->w_size; idx++) {
104 /*
105 * The data format depends on bytes per SPI word:
106 * 4 bytes: 0x12345678
107 @@ -244,41 +231,139 @@ static void spi_qup_fifo_read(struct spi
108 * 1 byte : 0x00000012
109 */
110 shift = BITS_PER_BYTE;
111 - shift *= (w_size - idx - 1);
112 - rx_buf[controller->rx_bytes] = word >> shift;
113 + shift *= (controller->w_size - idx - 1);
114 + rx_buf[controller->rx_bytes + idx] = data >> shift;
115 + }
116 +
117 + controller->rx_bytes += controller->w_size;
118 +}
119 +
120 +static void spi_qup_prepare_write_data(struct spi_qup *controller,
121 + struct spi_transfer *xfer, u32 *data)
122 +{
123 + const u8 *tx_buf = xfer->tx_buf;
124 + u32 val;
125 + int idx;
126 +
127 + *data = 0;
128 +
129 + if (tx_buf)
130 + for (idx = 0; idx < controller->w_size; idx++) {
131 + val = tx_buf[controller->tx_bytes + idx];
132 + *data |= val << (BITS_PER_BYTE * (3 - idx));
133 }
134 +
135 + controller->tx_bytes += controller->w_size;
136 +}
137 +
138 +static void spi_qup_fifo_read(struct spi_qup *controller,
139 + struct spi_transfer *xfer)
140 +{
141 + u32 data;
142 +
143 + /* clear service request */
144 + writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
145 + controller->base + QUP_OPERATIONAL);
146 +
147 + while (controller->rx_bytes < xfer->len) {
148 + if (!(readl_relaxed(controller->base + QUP_OPERATIONAL) &
149 + QUP_OP_IN_FIFO_NOT_EMPTY))
150 + break;
151 +
152 + data = readl_relaxed(controller->base + QUP_INPUT_FIFO);
153 +
154 + spi_qup_fill_read_buffer(controller, xfer, data);
155 }
156 }
157
158 static void spi_qup_fifo_write(struct spi_qup *controller,
159 - struct spi_transfer *xfer)
160 + struct spi_transfer *xfer)
161 {
162 - const u8 *tx_buf = xfer->tx_buf;
163 - u32 word, state, data;
164 - int idx, w_size;
165 + u32 data;
166
167 - w_size = controller->w_size;
168 + /* clear service request */
169 + writel_relaxed(QUP_OP_OUT_SERVICE_FLAG,
170 + controller->base + QUP_OPERATIONAL);
171
172 while (controller->tx_bytes < xfer->len) {
173
174 - state = readl_relaxed(controller->base + QUP_OPERATIONAL);
175 - if (state & QUP_OP_OUT_FIFO_FULL)
176 + if (readl_relaxed(controller->base + QUP_OPERATIONAL) &
177 + QUP_OP_OUT_FIFO_FULL)
178 break;
179
180 - word = 0;
181 - for (idx = 0; idx < w_size; idx++, controller->tx_bytes++) {
182 + spi_qup_prepare_write_data(controller, xfer, &data);
183 + writel_relaxed(data, controller->base + QUP_OUTPUT_FIFO);
184
185 - if (!tx_buf) {
186 - controller->tx_bytes += w_size;
187 - break;
188 - }
189 + }
190 +}
191
192 - data = tx_buf[controller->tx_bytes];
193 - word |= data << (BITS_PER_BYTE * (3 - idx));
194 - }
195 +static void spi_qup_block_read(struct spi_qup *controller,
196 + struct spi_transfer *xfer)
197 +{
198 + u32 data;
199 + u32 reads_per_blk = controller->in_blk_sz >> 2;
200 + u32 num_words = (xfer->len - controller->rx_bytes) / controller->w_size;
201 + int i;
202 +
203 + do {
204 + /* ACK by clearing service flag */
205 + writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
206 + controller->base + QUP_OPERATIONAL);
207 +
208 + /* transfer up to a block size of data in a single pass */
209 + for (i = 0; num_words && i < reads_per_blk; i++, num_words--) {
210 +
211 + /* read data and fill up rx buffer */
212 + data = readl_relaxed(controller->base + QUP_INPUT_FIFO);
213 + spi_qup_fill_read_buffer(controller, xfer, data);
214 + }
215 +
216 + /* check to see if next block is ready */
217 + if (!(readl_relaxed(controller->base + QUP_OPERATIONAL) &
218 + QUP_OP_IN_BLOCK_READ_REQ))
219 + break;
220
221 - writel_relaxed(word, controller->base + QUP_OUTPUT_FIFO);
222 - }
223 + } while (num_words);
224 +
225 + /*
226 + * Due to extra stickiness of the QUP_OP_IN_SERVICE_FLAG during block
227 + * reads, it has to be cleared again at the very end
228 + */
229 + if (readl_relaxed(controller->base + QUP_OPERATIONAL) &
230 + QUP_OP_MAX_INPUT_DONE_FLAG)
231 + writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
232 + controller->base + QUP_OPERATIONAL);
233 +
234 +}
235 +
236 +static void spi_qup_block_write(struct spi_qup *controller,
237 + struct spi_transfer *xfer)
238 +{
239 + u32 data;
240 + u32 writes_per_blk = controller->out_blk_sz >> 2;
241 + u32 num_words = (xfer->len - controller->tx_bytes) / controller->w_size;
242 + int i;
243 +
244 + do {
245 + /* ACK by clearing service flag */
246 + writel_relaxed(QUP_OP_OUT_SERVICE_FLAG,
247 + controller->base + QUP_OPERATIONAL);
248 +
249 + /* transfer up to a block size of data in a single pass */
250 + for (i = 0; num_words && i < writes_per_blk; i++, num_words--) {
251 +
252 + /* swizzle the bytes for output and write out */
253 + spi_qup_prepare_write_data(controller, xfer, &data);
254 + writel_relaxed(data,
255 + controller->base + QUP_OUTPUT_FIFO);
256 + }
257 +
258 + /* check to see if next block is ready */
259 + if (!(readl_relaxed(controller->base + QUP_OPERATIONAL) &
260 + QUP_OP_OUT_BLOCK_WRITE_REQ))
261 + break;
262 +
263 + } while (num_words);
264 }
265
266 static void qup_dma_callback(void *data)
267 @@ -515,9 +600,9 @@ static irqreturn_t spi_qup_qup_irq(int i
268
269 writel_relaxed(qup_err, controller->base + QUP_ERROR_FLAGS);
270 writel_relaxed(spi_err, controller->base + SPI_ERROR_FLAGS);
271 - writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
272
273 if (!xfer) {
274 + writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
275 dev_err_ratelimited(controller->dev, "unexpected irq %08x %08x %08x\n",
276 qup_err, spi_err, opflags);
277 return IRQ_HANDLED;
278 @@ -546,11 +631,19 @@ static irqreturn_t spi_qup_qup_irq(int i
279 }
280
281 if (!controller->use_dma) {
282 - if (opflags & QUP_OP_IN_SERVICE_FLAG)
283 - spi_qup_fifo_read(controller, xfer);
284 + if (opflags & QUP_OP_IN_SERVICE_FLAG) {
285 + if (opflags & QUP_OP_IN_BLOCK_READ_REQ)
286 + spi_qup_block_read(controller, xfer);
287 + else
288 + spi_qup_fifo_read(controller, xfer);
289 + }
290
291 - if (opflags & QUP_OP_OUT_SERVICE_FLAG)
292 - spi_qup_fifo_write(controller, xfer);
293 + if (opflags & QUP_OP_OUT_SERVICE_FLAG) {
294 + if (opflags & QUP_OP_OUT_BLOCK_WRITE_REQ)
295 + spi_qup_block_write(controller, xfer);
296 + else
297 + spi_qup_fifo_write(controller, xfer);
298 + }
299 }
300
301 spin_lock_irqsave(&controller->lock, flags);
302 @@ -558,7 +651,8 @@ static irqreturn_t spi_qup_qup_irq(int i
303 controller->xfer = xfer;
304 spin_unlock_irqrestore(&controller->lock, flags);
305
306 - if (controller->rx_bytes == xfer->len || error)
307 + if ((controller->rx_bytes == xfer->len &&
308 + (opflags & QUP_OP_MAX_INPUT_DONE_FLAG)) || error)
309 complete(&controller->done);
310
311 return IRQ_HANDLED;
312 @@ -569,7 +663,7 @@ static irqreturn_t spi_qup_qup_irq(int i
313 static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
314 {
315 struct spi_qup *controller = spi_master_get_devdata(spi->master);
316 - u32 config, iomode, mode, control;
317 + u32 config, iomode, control;
318 int ret, n_words, w_size;
319 size_t dma_align = dma_get_cache_alignment();
320 u32 dma_available = 0;
321 @@ -607,7 +701,7 @@ static int spi_qup_io_config(struct spi_
322 dma_available = 1;
323
324 if (n_words <= (controller->in_fifo_sz / sizeof(u32))) {
325 - mode = QUP_IO_M_MODE_FIFO;
326 + controller->mode = QUP_IO_M_MODE_FIFO;
327 writel_relaxed(n_words, controller->base + QUP_MX_READ_CNT);
328 writel_relaxed(n_words, controller->base + QUP_MX_WRITE_CNT);
329 /* must be zero for FIFO */
330 @@ -615,7 +709,7 @@ static int spi_qup_io_config(struct spi_
331 writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
332 controller->use_dma = 0;
333 } else if (!dma_available) {
334 - mode = QUP_IO_M_MODE_BLOCK;
335 + controller->mode = QUP_IO_M_MODE_BLOCK;
336 writel_relaxed(n_words, controller->base + QUP_MX_INPUT_CNT);
337 writel_relaxed(n_words, controller->base + QUP_MX_OUTPUT_CNT);
338 /* must be zero for BLOCK and BAM */
339 @@ -623,7 +717,7 @@ static int spi_qup_io_config(struct spi_
340 writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
341 controller->use_dma = 0;
342 } else {
343 - mode = QUP_IO_M_MODE_DMOV;
344 + controller->mode = QUP_IO_M_MODE_DMOV;
345 writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
346 writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
347 controller->use_dma = 1;
348 @@ -638,8 +732,8 @@ static int spi_qup_io_config(struct spi_
349 else
350 iomode |= QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN;
351
352 - iomode |= (mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT);
353 - iomode |= (mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT);
354 + iomode |= (controller->mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT);
355 + iomode |= (controller->mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT);
356
357 writel_relaxed(iomode, controller->base + QUP_IO_M_MODES);
358
359 @@ -733,7 +827,8 @@ static int spi_qup_transfer_one(struct s
360 goto exit;
361 }
362
363 - spi_qup_fifo_write(controller, xfer);
364 + if (controller->mode == QUP_IO_M_MODE_FIFO)
365 + spi_qup_fifo_write(controller, xfer);
366
367 if (spi_qup_set_state(controller, QUP_STATE_RUN)) {
368 dev_warn(controller->dev, "cannot set EXECUTE state\n");
369 @@ -750,6 +845,7 @@ exit:
370 if (!ret)
371 ret = controller->error;
372 spin_unlock_irqrestore(&controller->lock, flags);
373 +
374 return ret;
375 }
376