kernel: bump 4.9 to 4.9.77
[openwrt/openwrt.git] / target / linux / ipq806x / patches-4.9 / 0006-spi-qup-Fix-block-mode-to-work-correctly.patch
1 From b56c1e35cc550fd014fa601ca56b964d88fd44a9 Mon Sep 17 00:00:00 2001
2 From: Andy Gross <andy.gross@linaro.org>
3 Date: Sun, 31 Jan 2016 21:28:13 -0600
4 Subject: [PATCH 06/69] spi: qup: Fix block mode to work correctly
5
6 This patch corrects the behavior of the BLOCK transactions. During block
7 transactions, the controller must be read/written to in block size transactions.
8
9 Signed-off-by: Andy Gross <andy.gross@linaro.org>
10 ---
11 drivers/spi/spi-qup.c | 182 +++++++++++++++++++++++++++++++++++++++-----------
12 1 file changed, 142 insertions(+), 40 deletions(-)
13
14 --- a/drivers/spi/spi-qup.c
15 +++ b/drivers/spi/spi-qup.c
16 @@ -82,6 +82,8 @@
17 #define QUP_IO_M_MODE_BAM 3
18
19 /* QUP_OPERATIONAL fields */
20 +#define QUP_OP_IN_BLOCK_READ_REQ BIT(13)
21 +#define QUP_OP_OUT_BLOCK_WRITE_REQ BIT(12)
22 #define QUP_OP_MAX_INPUT_DONE_FLAG BIT(11)
23 #define QUP_OP_MAX_OUTPUT_DONE_FLAG BIT(10)
24 #define QUP_OP_IN_SERVICE_FLAG BIT(9)
25 @@ -155,6 +157,12 @@ struct spi_qup {
26 struct dma_slave_config tx_conf;
27 };
28
29 +static inline bool spi_qup_is_flag_set(struct spi_qup *controller, u32 flag)
30 +{
31 + u32 opflag = readl_relaxed(controller->base + QUP_OPERATIONAL);
32 +
33 + return opflag & flag;
34 +}
35
36 static inline bool spi_qup_is_dma_xfer(int mode)
37 {
38 @@ -216,29 +224,26 @@ static int spi_qup_set_state(struct spi_
39 return 0;
40 }
41
42 -static void spi_qup_fifo_read(struct spi_qup *controller,
43 - struct spi_transfer *xfer)
44 +static void spi_qup_read_from_fifo(struct spi_qup *controller,
45 + struct spi_transfer *xfer, u32 num_words)
46 {
47 u8 *rx_buf = xfer->rx_buf;
48 - u32 word, state;
49 - int idx, shift, w_size;
50 -
51 - w_size = controller->w_size;
52 + int i, shift, num_bytes;
53 + u32 word;
54
55 - while (controller->rx_bytes < xfer->len) {
56 -
57 - state = readl_relaxed(controller->base + QUP_OPERATIONAL);
58 - if (0 == (state & QUP_OP_IN_FIFO_NOT_EMPTY))
59 - break;
60 + for (; num_words; num_words--) {
61
62 word = readl_relaxed(controller->base + QUP_INPUT_FIFO);
63
64 + num_bytes = min_t(int, xfer->len - controller->rx_bytes,
65 + controller->w_size);
66 +
67 if (!rx_buf) {
68 - controller->rx_bytes += w_size;
69 + controller->rx_bytes += num_bytes;
70 continue;
71 }
72
73 - for (idx = 0; idx < w_size; idx++, controller->rx_bytes++) {
74 + for (i = 0; i < num_bytes; i++, controller->rx_bytes++) {
75 /*
76 * The data format depends on bytes per SPI word:
77 * 4 bytes: 0x12345678
78 @@ -246,38 +251,80 @@ static void spi_qup_fifo_read(struct spi
79 * 1 byte : 0x00000012
80 */
81 shift = BITS_PER_BYTE;
82 - shift *= (w_size - idx - 1);
83 + shift *= (controller->w_size - i - 1);
84 rx_buf[controller->rx_bytes] = word >> shift;
85 }
86 }
87 }
88
89 -static void spi_qup_fifo_write(struct spi_qup *controller,
90 +static void spi_qup_read(struct spi_qup *controller,
91 struct spi_transfer *xfer)
92 {
93 - const u8 *tx_buf = xfer->tx_buf;
94 - u32 word, state, data;
95 - int idx, w_size;
96 + u32 remainder, words_per_block, num_words;
97 + bool is_block_mode = controller->mode == QUP_IO_M_MODE_BLOCK;
98 +
99 + remainder = DIV_ROUND_UP(xfer->len - controller->rx_bytes,
100 + controller->w_size);
101 + words_per_block = controller->in_blk_sz >> 2;
102 +
103 + do {
104 + /* ACK by clearing service flag */
105 + writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
106 + controller->base + QUP_OPERATIONAL);
107 +
108 + if (is_block_mode) {
109 + num_words = (remainder > words_per_block) ?
110 + words_per_block : remainder;
111 + } else {
112 + if (!spi_qup_is_flag_set(controller,
113 + QUP_OP_IN_FIFO_NOT_EMPTY))
114 + break;
115
116 - w_size = controller->w_size;
117 + num_words = 1;
118 + }
119 +
120 + /* read up to the maximum transfer size available */
121 + spi_qup_read_from_fifo(controller, xfer, num_words);
122
123 - while (controller->tx_bytes < xfer->len) {
124 + remainder -= num_words;
125
126 - state = readl_relaxed(controller->base + QUP_OPERATIONAL);
127 - if (state & QUP_OP_OUT_FIFO_FULL)
128 + /* if block mode, check to see if next block is available */
129 + if (is_block_mode && !spi_qup_is_flag_set(controller,
130 + QUP_OP_IN_BLOCK_READ_REQ))
131 break;
132
133 + } while (remainder);
134 +
135 + /*
136 + * Due to extra stickiness of the QUP_OP_IN_SERVICE_FLAG during block
137 + * mode reads, it has to be cleared again at the very end
138 + */
139 + if (is_block_mode && spi_qup_is_flag_set(controller,
140 + QUP_OP_MAX_INPUT_DONE_FLAG))
141 + writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
142 + controller->base + QUP_OPERATIONAL);
143 +
144 +}
145 +
146 +static void spi_qup_write_to_fifo(struct spi_qup *controller,
147 + struct spi_transfer *xfer, u32 num_words)
148 +{
149 + const u8 *tx_buf = xfer->tx_buf;
150 + int i, num_bytes;
151 + u32 word, data;
152 +
153 + for (; num_words; num_words--) {
154 word = 0;
155 - for (idx = 0; idx < w_size; idx++, controller->tx_bytes++) {
156
157 - if (!tx_buf) {
158 - controller->tx_bytes += w_size;
159 - break;
160 + num_bytes = min_t(int, xfer->len - controller->tx_bytes,
161 + controller->w_size);
162 + if (tx_buf)
163 + for (i = 0; i < num_bytes; i++) {
164 + data = tx_buf[controller->tx_bytes + i];
165 + word |= data << (BITS_PER_BYTE * (3 - i));
166 }
167
168 - data = tx_buf[controller->tx_bytes];
169 - word |= data << (BITS_PER_BYTE * (3 - idx));
170 - }
171 + controller->tx_bytes += num_bytes;
172
173 writel_relaxed(word, controller->base + QUP_OUTPUT_FIFO);
174 }
175 @@ -290,6 +337,44 @@ static void spi_qup_dma_done(void *data)
176 complete(done);
177 }
178
179 +static void spi_qup_write(struct spi_qup *controller,
180 + struct spi_transfer *xfer)
181 +{
182 + bool is_block_mode = controller->mode == QUP_IO_M_MODE_BLOCK;
183 + u32 remainder, words_per_block, num_words;
184 +
185 + remainder = DIV_ROUND_UP(xfer->len - controller->tx_bytes,
186 + controller->w_size);
187 + words_per_block = controller->out_blk_sz >> 2;
188 +
189 + do {
190 + /* ACK by clearing service flag */
191 + writel_relaxed(QUP_OP_OUT_SERVICE_FLAG,
192 + controller->base + QUP_OPERATIONAL);
193 +
194 + if (is_block_mode) {
195 + num_words = (remainder > words_per_block) ?
196 + words_per_block : remainder;
197 + } else {
198 + if (spi_qup_is_flag_set(controller,
199 + QUP_OP_OUT_FIFO_FULL))
200 + break;
201 +
202 + num_words = 1;
203 + }
204 +
205 + spi_qup_write_to_fifo(controller, xfer, num_words);
206 +
207 + remainder -= num_words;
208 +
209 + /* if block mode, check to see if next block is available */
210 + if (is_block_mode && !spi_qup_is_flag_set(controller,
211 + QUP_OP_OUT_BLOCK_WRITE_REQ))
212 + break;
213 +
214 + } while (remainder);
215 +}
216 +
217 static int spi_qup_prep_sg(struct spi_master *master, struct spi_transfer *xfer,
218 enum dma_transfer_direction dir,
219 dma_async_tx_callback callback,
220 @@ -347,11 +432,13 @@ unsigned long timeout)
221 return ret;
222 }
223
224 - if (xfer->rx_buf)
225 - rx_done = spi_qup_dma_done;
226 + if (!qup->qup_v1) {
227 + if (xfer->rx_buf)
228 + rx_done = spi_qup_dma_done;
229
230 - if (xfer->tx_buf)
231 - tx_done = spi_qup_dma_done;
232 + if (xfer->tx_buf)
233 + tx_done = spi_qup_dma_done;
234 + }
235
236 if (xfer->rx_buf) {
237 ret = spi_qup_prep_sg(master, xfer, DMA_DEV_TO_MEM, rx_done,
238 @@ -398,7 +485,8 @@ static int spi_qup_do_pio(struct spi_mas
239 return ret;
240 }
241
242 - spi_qup_fifo_write(qup, xfer);
243 + if (qup->mode == QUP_IO_M_MODE_FIFO)
244 + spi_qup_write(qup, xfer);
245
246 ret = spi_qup_set_state(qup, QUP_STATE_RUN);
247 if (ret) {
248 @@ -431,10 +519,11 @@ static irqreturn_t spi_qup_qup_irq(int i
249
250 writel_relaxed(qup_err, controller->base + QUP_ERROR_FLAGS);
251 writel_relaxed(spi_err, controller->base + SPI_ERROR_FLAGS);
252 - writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
253
254 if (!xfer) {
255 - dev_err_ratelimited(controller->dev, "unexpected irq %08x %08x %08x\n",
256 + writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
257 + dev_err_ratelimited(controller->dev,
258 + "unexpected irq %08x %08x %08x\n",
259 qup_err, spi_err, opflags);
260 return IRQ_HANDLED;
261 }
262 @@ -460,12 +549,20 @@ static irqreturn_t spi_qup_qup_irq(int i
263 error = -EIO;
264 }
265
266 - if (!spi_qup_is_dma_xfer(controller->mode)) {
267 + if (spi_qup_is_dma_xfer(controller->mode)) {
268 + writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
269 + if (opflags & QUP_OP_IN_SERVICE_FLAG &&
270 + opflags & QUP_OP_MAX_INPUT_DONE_FLAG)
271 + complete(&controller->done);
272 + if (opflags & QUP_OP_OUT_SERVICE_FLAG &&
273 + opflags & QUP_OP_MAX_OUTPUT_DONE_FLAG)
274 + complete(&controller->dma_tx_done);
275 + } else {
276 if (opflags & QUP_OP_IN_SERVICE_FLAG)
277 - spi_qup_fifo_read(controller, xfer);
278 + spi_qup_read(controller, xfer);
279
280 if (opflags & QUP_OP_OUT_SERVICE_FLAG)
281 - spi_qup_fifo_write(controller, xfer);
282 + spi_qup_write(controller, xfer);
283 }
284
285 spin_lock_irqsave(&controller->lock, flags);
286 @@ -473,6 +570,9 @@ static irqreturn_t spi_qup_qup_irq(int i
287 controller->xfer = xfer;
288 spin_unlock_irqrestore(&controller->lock, flags);
289
290 + /* re-read opflags as flags may have changed due to actions above */
291 + opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
292 +
293 if ((controller->rx_bytes == xfer->len &&
294 (opflags & QUP_OP_MAX_INPUT_DONE_FLAG)) || error)
295 complete(&controller->done);
296 @@ -516,11 +616,13 @@ static int spi_qup_io_config(struct spi_
297 /* must be zero for FIFO */
298 writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT);
299 writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
300 -
301 } else if (spi->master->can_dma &&
302 spi->master->can_dma(spi->master, spi, xfer) &&
303 spi->master->cur_msg_mapped) {
304 controller->mode = QUP_IO_M_MODE_BAM;
305 + writel_relaxed(n_words, controller->base + QUP_MX_INPUT_CNT);
306 + writel_relaxed(n_words, controller->base + QUP_MX_OUTPUT_CNT);
307 + /* must be zero for BLOCK and BAM */
308 writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
309 writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
310