ipq806x: sync with latest patches sent by QCA
[openwrt/openwrt.git] / target / linux / ipq806x / patches-4.4 / 713-spi-qup-Fix-block-mode-to-work-correctly.patch
1 From 148f77310a9ddf4db5036066458d7aed92cea9ae Mon Sep 17 00:00:00 2001
2 From: Andy Gross <andy.gross@linaro.org>
3 Date: Sun, 31 Jan 2016 21:28:13 -0600
4 Subject: [PATCH] spi: qup: Fix block mode to work correctly
5
6 This patch corrects the behavior of the BLOCK
7 transactions. During block transactions, the controller
8 must be read/written to in block size transactions.
9
10 Signed-off-by: Andy Gross <andy.gross@linaro.org>
11
12 Change-Id: I4b4f4d25be57e6e8148f6f0d24bed376eb287ecf
13 ---
14 drivers/spi/spi-qup.c | 181 +++++++++++++++++++++++++++++++++++++++-----------
15 1 file changed, 141 insertions(+), 40 deletions(-)
16
17 diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
18 index 089c5e8..e487416 100644
19 --- a/drivers/spi/spi-qup.c
20 +++ b/drivers/spi/spi-qup.c
21 @@ -83,6 +83,8 @@
22 #define QUP_IO_M_MODE_BAM 3
23
24 /* QUP_OPERATIONAL fields */
25 +#define QUP_OP_IN_BLOCK_READ_REQ BIT(13)
26 +#define QUP_OP_OUT_BLOCK_WRITE_REQ BIT(12)
27 #define QUP_OP_MAX_INPUT_DONE_FLAG BIT(11)
28 #define QUP_OP_MAX_OUTPUT_DONE_FLAG BIT(10)
29 #define QUP_OP_IN_SERVICE_FLAG BIT(9)
30 @@ -156,6 +158,12 @@ struct spi_qup {
31 struct dma_slave_config tx_conf;
32 };
33
34 +static inline bool spi_qup_is_flag_set(struct spi_qup *controller, u32 flag)
35 +{
36 + u32 opflag = readl_relaxed(controller->base + QUP_OPERATIONAL);
37 +
38 + return opflag & flag;
39 +}
40
41 static inline bool spi_qup_is_dma_xfer(int mode)
42 {
43 @@ -217,29 +225,26 @@ static int spi_qup_set_state(struct spi_qup *controller, u32 state)
44 return 0;
45 }
46
47 -static void spi_qup_fifo_read(struct spi_qup *controller,
48 - struct spi_transfer *xfer)
49 +static void spi_qup_read_from_fifo(struct spi_qup *controller,
50 + struct spi_transfer *xfer, u32 num_words)
51 {
52 u8 *rx_buf = xfer->rx_buf;
53 - u32 word, state;
54 - int idx, shift, w_size;
55 -
56 - w_size = controller->w_size;
57 -
58 - while (controller->rx_bytes < xfer->len) {
59 + int i, shift, num_bytes;
60 + u32 word;
61
62 - state = readl_relaxed(controller->base + QUP_OPERATIONAL);
63 - if (0 == (state & QUP_OP_IN_FIFO_NOT_EMPTY))
64 - break;
65 + for (; num_words; num_words--) {
66
67 word = readl_relaxed(controller->base + QUP_INPUT_FIFO);
68
69 + num_bytes = min_t(int, xfer->len - controller->rx_bytes,
70 + controller->w_size);
71 +
72 if (!rx_buf) {
73 - controller->rx_bytes += w_size;
74 + controller->rx_bytes += num_bytes;
75 continue;
76 }
77
78 - for (idx = 0; idx < w_size; idx++, controller->rx_bytes++) {
79 + for (i = 0; i < num_bytes; i++, controller->rx_bytes++) {
80 /*
81 * The data format depends on bytes per SPI word:
82 * 4 bytes: 0x12345678
83 @@ -247,38 +252,80 @@ static void spi_qup_fifo_read(struct spi_qup *controller,
84 * 1 byte : 0x00000012
85 */
86 shift = BITS_PER_BYTE;
87 - shift *= (w_size - idx - 1);
88 + shift *= (controller->w_size - i - 1);
89 rx_buf[controller->rx_bytes] = word >> shift;
90 }
91 }
92 }
93
94 -static void spi_qup_fifo_write(struct spi_qup *controller,
95 +static void spi_qup_read(struct spi_qup *controller,
96 struct spi_transfer *xfer)
97 {
98 - const u8 *tx_buf = xfer->tx_buf;
99 - u32 word, state, data;
100 - int idx, w_size;
101 + u32 remainder, words_per_block, num_words;
102 + bool is_block_mode = controller->mode == QUP_IO_M_MODE_BLOCK;
103 +
104 + remainder = DIV_ROUND_UP(xfer->len - controller->rx_bytes,
105 + controller->w_size);
106 + words_per_block = controller->in_blk_sz >> 2;
107 +
108 + do {
109 + /* ACK by clearing service flag */
110 + writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
111 + controller->base + QUP_OPERATIONAL);
112 +
113 + if (is_block_mode) {
114 + num_words = (remainder > words_per_block) ?
115 + words_per_block : remainder;
116 + } else {
117 + if (!spi_qup_is_flag_set(controller,
118 + QUP_OP_IN_FIFO_NOT_EMPTY))
119 + break;
120 +
121 + num_words = 1;
122 + }
123
124 - w_size = controller->w_size;
125 + /* read up to the maximum transfer size available */
126 + spi_qup_read_from_fifo(controller, xfer, num_words);
127
128 - while (controller->tx_bytes < xfer->len) {
129 + remainder -= num_words;
130
131 - state = readl_relaxed(controller->base + QUP_OPERATIONAL);
132 - if (state & QUP_OP_OUT_FIFO_FULL)
133 + /* if block mode, check to see if next block is available */
134 + if (is_block_mode && !spi_qup_is_flag_set(controller,
135 + QUP_OP_IN_BLOCK_READ_REQ))
136 break;
137
138 + } while (remainder);
139 +
140 + /*
141 + * Due to extra stickiness of the QUP_OP_IN_SERVICE_FLAG during block
142 + * mode reads, it has to be cleared again at the very end
143 + */
144 + if (is_block_mode && spi_qup_is_flag_set(controller,
145 + QUP_OP_MAX_INPUT_DONE_FLAG))
146 + writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
147 + controller->base + QUP_OPERATIONAL);
148 +
149 +}
150 +
151 +static void spi_qup_write_to_fifo(struct spi_qup *controller,
152 + struct spi_transfer *xfer, u32 num_words)
153 +{
154 + const u8 *tx_buf = xfer->tx_buf;
155 + int i, num_bytes;
156 + u32 word, data;
157 +
158 + for (; num_words; num_words--) {
159 word = 0;
160 - for (idx = 0; idx < w_size; idx++, controller->tx_bytes++) {
161
162 - if (!tx_buf) {
163 - controller->tx_bytes += w_size;
164 - break;
165 + num_bytes = min_t(int, xfer->len - controller->tx_bytes,
166 + controller->w_size);
167 + if (tx_buf)
168 + for (i = 0; i < num_bytes; i++) {
169 + data = tx_buf[controller->tx_bytes + i];
170 + word |= data << (BITS_PER_BYTE * (3 - i));
171 }
172
173 - data = tx_buf[controller->tx_bytes];
174 - word |= data << (BITS_PER_BYTE * (3 - idx));
175 - }
176 + controller->tx_bytes += num_bytes;
177
178 writel_relaxed(word, controller->base + QUP_OUTPUT_FIFO);
179 }
180 @@ -291,6 +338,44 @@ static void spi_qup_dma_done(void *data)
181 complete(done);
182 }
183
184 +static void spi_qup_write(struct spi_qup *controller,
185 + struct spi_transfer *xfer)
186 +{
187 + bool is_block_mode = controller->mode == QUP_IO_M_MODE_BLOCK;
188 + u32 remainder, words_per_block, num_words;
189 +
190 + remainder = DIV_ROUND_UP(xfer->len - controller->tx_bytes,
191 + controller->w_size);
192 + words_per_block = controller->out_blk_sz >> 2;
193 +
194 + do {
195 + /* ACK by clearing service flag */
196 + writel_relaxed(QUP_OP_OUT_SERVICE_FLAG,
197 + controller->base + QUP_OPERATIONAL);
198 +
199 + if (is_block_mode) {
200 + num_words = (remainder > words_per_block) ?
201 + words_per_block : remainder;
202 + } else {
203 + if (spi_qup_is_flag_set(controller,
204 + QUP_OP_OUT_FIFO_FULL))
205 + break;
206 +
207 + num_words = 1;
208 + }
209 +
210 + spi_qup_write_to_fifo(controller, xfer, num_words);
211 +
212 + remainder -= num_words;
213 +
214 + /* if block mode, check to see if next block is available */
215 + if (is_block_mode && !spi_qup_is_flag_set(controller,
216 + QUP_OP_OUT_BLOCK_WRITE_REQ))
217 + break;
218 +
219 + } while (remainder);
220 +}
221 +
222 static int spi_qup_prep_sg(struct spi_master *master, struct spi_transfer *xfer,
223 enum dma_transfer_direction dir,
224 dma_async_tx_callback callback,
225 @@ -348,11 +433,13 @@ unsigned long timeout)
226 return ret;
227 }
228
229 - if (xfer->rx_buf)
230 - rx_done = spi_qup_dma_done;
231 + if (!qup->qup_v1) {
232 + if (xfer->rx_buf)
233 + rx_done = spi_qup_dma_done;
234
235 - if (xfer->tx_buf)
236 - tx_done = spi_qup_dma_done;
237 + if (xfer->tx_buf)
238 + tx_done = spi_qup_dma_done;
239 + }
240
241 if (xfer->rx_buf) {
242 ret = spi_qup_prep_sg(master, xfer, DMA_DEV_TO_MEM, rx_done,
243 @@ -401,7 +488,7 @@ static int spi_qup_do_pio(struct spi_master *master, struct spi_transfer *xfer,
244 }
245
246 if (qup->mode == QUP_IO_M_MODE_FIFO)
247 - spi_qup_fifo_write(qup, xfer);
248 + spi_qup_write(qup, xfer);
249
250 ret = spi_qup_set_state(qup, QUP_STATE_RUN);
251 if (ret) {
252 @@ -434,10 +521,11 @@ static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
253
254 writel_relaxed(qup_err, controller->base + QUP_ERROR_FLAGS);
255 writel_relaxed(spi_err, controller->base + SPI_ERROR_FLAGS);
256 - writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
257
258 if (!xfer) {
259 - dev_err_ratelimited(controller->dev, "unexpected irq %08x %08x %08x\n",
260 + writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
261 + dev_err_ratelimited(controller->dev,
262 + "unexpected irq %08x %08x %08x\n",
263 qup_err, spi_err, opflags);
264 return IRQ_HANDLED;
265 }
266 @@ -463,12 +551,20 @@ static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
267 error = -EIO;
268 }
269
270 - if (!spi_qup_is_dma_xfer(controller->mode)) {
271 + if (spi_qup_is_dma_xfer(controller->mode)) {
272 + writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
273 + if (opflags & QUP_OP_IN_SERVICE_FLAG &&
274 + opflags & QUP_OP_MAX_INPUT_DONE_FLAG)
275 + complete(&controller->done);
276 + if (opflags & QUP_OP_OUT_SERVICE_FLAG &&
277 + opflags & QUP_OP_MAX_OUTPUT_DONE_FLAG)
278 + complete(&controller->dma_tx_done);
279 + } else {
280 if (opflags & QUP_OP_IN_SERVICE_FLAG)
281 - spi_qup_fifo_read(controller, xfer);
282 + spi_qup_read(controller, xfer);
283
284 if (opflags & QUP_OP_OUT_SERVICE_FLAG)
285 - spi_qup_fifo_write(controller, xfer);
286 + spi_qup_write(controller, xfer);
287 }
288
289 spin_lock_irqsave(&controller->lock, flags);
290 @@ -476,6 +572,9 @@ static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
291 controller->xfer = xfer;
292 spin_unlock_irqrestore(&controller->lock, flags);
293
294 + /* re-read opflags as flags may have changed due to actions above */
295 + opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
296 +
297 if ((controller->rx_bytes == xfer->len &&
298 (opflags & QUP_OP_MAX_INPUT_DONE_FLAG)) || error)
299 complete(&controller->done);
300 @@ -519,11 +618,13 @@ static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
301 /* must be zero for FIFO */
302 writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT);
303 writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
304 - controller->use_dma = 0;
305 } else if (spi->master->can_dma &&
306 spi->master->can_dma(spi->master, spi, xfer) &&
307 spi->master->cur_msg_mapped) {
308 controller->mode = QUP_IO_M_MODE_BAM;
309 + writel_relaxed(n_words, controller->base + QUP_MX_INPUT_CNT);
310 + writel_relaxed(n_words, controller->base + QUP_MX_OUTPUT_CNT);
311 + /* must be zero for BLOCK and BAM */
312 writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
313 writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
314
315 --
316 2.7.2
317