1 From 4dc7631bbf7c7ac7548026ce45d889235e4f5892 Mon Sep 17 00:00:00 2001
2 From: Andy Gross <andy.gross@linaro.org>
3 Date: Sun, 31 Jan 2016 21:28:13 -0600
4 Subject: [PATCH 09/37] spi: qup: Fix block mode to work correctly
6 This patch corrects the behavior of the BLOCK transactions. During block
7 transactions, the controller must be read/written to in block size transactions.
9 Signed-off-by: Andy Gross <andy.gross@linaro.org>
11 drivers/spi/spi-qup.c | 182 ++++++++++++++++++++++++++++++++++++++-----------
12 1 file changed, 142 insertions(+), 40 deletions(-)
14 --- a/drivers/spi/spi-qup.c
15 +++ b/drivers/spi/spi-qup.c
17 #define QUP_IO_M_MODE_BAM 3
19 /* QUP_OPERATIONAL fields */
20 +#define QUP_OP_IN_BLOCK_READ_REQ BIT(13)
21 +#define QUP_OP_OUT_BLOCK_WRITE_REQ BIT(12)
22 #define QUP_OP_MAX_INPUT_DONE_FLAG BIT(11)
23 #define QUP_OP_MAX_OUTPUT_DONE_FLAG BIT(10)
24 #define QUP_OP_IN_SERVICE_FLAG BIT(9)
25 @@ -155,6 +157,12 @@ struct spi_qup {
26 struct dma_slave_config tx_conf;
29 +static inline bool spi_qup_is_flag_set(struct spi_qup *controller, u32 flag)
31 + u32 opflag = readl_relaxed(controller->base + QUP_OPERATIONAL);
33 + return opflag & flag;
36 static inline bool spi_qup_is_dma_xfer(int mode)
38 @@ -216,29 +224,26 @@ static int spi_qup_set_state(struct spi_
42 -static void spi_qup_fifo_read(struct spi_qup *controller,
43 - struct spi_transfer *xfer)
44 +static void spi_qup_read_from_fifo(struct spi_qup *controller,
45 + struct spi_transfer *xfer, u32 num_words)
47 u8 *rx_buf = xfer->rx_buf;
49 - int idx, shift, w_size;
51 - w_size = controller->w_size;
53 - while (controller->rx_bytes < xfer->len) {
54 + int i, shift, num_bytes;
57 - state = readl_relaxed(controller->base + QUP_OPERATIONAL);
58 - if (0 == (state & QUP_OP_IN_FIFO_NOT_EMPTY))
60 + for (; num_words; num_words--) {
62 word = readl_relaxed(controller->base + QUP_INPUT_FIFO);
64 + num_bytes = min_t(int, xfer->len - controller->rx_bytes,
65 + controller->w_size);
68 - controller->rx_bytes += w_size;
69 + controller->rx_bytes += num_bytes;
73 - for (idx = 0; idx < w_size; idx++, controller->rx_bytes++) {
74 + for (i = 0; i < num_bytes; i++, controller->rx_bytes++) {
76 * The data format depends on bytes per SPI word:
78 @@ -246,38 +251,80 @@ static void spi_qup_fifo_read(struct spi
81 shift = BITS_PER_BYTE;
82 - shift *= (w_size - idx - 1);
83 + shift *= (controller->w_size - i - 1);
84 rx_buf[controller->rx_bytes] = word >> shift;
89 -static void spi_qup_fifo_write(struct spi_qup *controller,
90 +static void spi_qup_read(struct spi_qup *controller,
91 struct spi_transfer *xfer)
93 - const u8 *tx_buf = xfer->tx_buf;
94 - u32 word, state, data;
96 + u32 remainder, words_per_block, num_words;
97 + bool is_block_mode = controller->mode == QUP_IO_M_MODE_BLOCK;
99 - w_size = controller->w_size;
100 + remainder = DIV_ROUND_UP(xfer->len - controller->rx_bytes,
101 + controller->w_size);
102 + words_per_block = controller->in_blk_sz >> 2;
104 - while (controller->tx_bytes < xfer->len) {
106 + /* ACK by clearing service flag */
107 + writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
108 + controller->base + QUP_OPERATIONAL);
110 - state = readl_relaxed(controller->base + QUP_OPERATIONAL);
111 - if (state & QUP_OP_OUT_FIFO_FULL)
112 + if (is_block_mode) {
113 + num_words = (remainder > words_per_block) ?
114 + words_per_block : remainder;
116 + if (!spi_qup_is_flag_set(controller,
117 + QUP_OP_IN_FIFO_NOT_EMPTY))
123 + /* read up to the maximum transfer size available */
124 + spi_qup_read_from_fifo(controller, xfer, num_words);
126 + remainder -= num_words;
128 + /* if block mode, check to see if next block is available */
129 + if (is_block_mode && !spi_qup_is_flag_set(controller,
130 + QUP_OP_IN_BLOCK_READ_REQ))
133 + } while (remainder);
136 + * Due to extra stickiness of the QUP_OP_IN_SERVICE_FLAG during block
137 + * mode reads, it has to be cleared again at the very end
139 + if (is_block_mode && spi_qup_is_flag_set(controller,
140 + QUP_OP_MAX_INPUT_DONE_FLAG))
141 + writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
142 + controller->base + QUP_OPERATIONAL);
146 +static void spi_qup_write_to_fifo(struct spi_qup *controller,
147 + struct spi_transfer *xfer, u32 num_words)
149 + const u8 *tx_buf = xfer->tx_buf;
153 + for (; num_words; num_words--) {
155 - for (idx = 0; idx < w_size; idx++, controller->tx_bytes++) {
158 - controller->tx_bytes += w_size;
160 + num_bytes = min_t(int, xfer->len - controller->tx_bytes,
161 + controller->w_size);
163 + for (i = 0; i < num_bytes; i++) {
164 + data = tx_buf[controller->tx_bytes + i];
165 + word |= data << (BITS_PER_BYTE * (3 - i));
168 - data = tx_buf[controller->tx_bytes];
169 - word |= data << (BITS_PER_BYTE * (3 - idx));
171 + controller->tx_bytes += num_bytes;
173 writel_relaxed(word, controller->base + QUP_OUTPUT_FIFO);
175 @@ -290,6 +337,44 @@ static void spi_qup_dma_done(void *data)
179 +static void spi_qup_write(struct spi_qup *controller,
180 + struct spi_transfer *xfer)
182 + bool is_block_mode = controller->mode == QUP_IO_M_MODE_BLOCK;
183 + u32 remainder, words_per_block, num_words;
185 + remainder = DIV_ROUND_UP(xfer->len - controller->tx_bytes,
186 + controller->w_size);
187 + words_per_block = controller->out_blk_sz >> 2;
190 + /* ACK by clearing service flag */
191 + writel_relaxed(QUP_OP_OUT_SERVICE_FLAG,
192 + controller->base + QUP_OPERATIONAL);
194 + if (is_block_mode) {
195 + num_words = (remainder > words_per_block) ?
196 + words_per_block : remainder;
198 + if (spi_qup_is_flag_set(controller,
199 + QUP_OP_OUT_FIFO_FULL))
205 + spi_qup_write_to_fifo(controller, xfer, num_words);
207 + remainder -= num_words;
209 + /* if block mode, check to see if next block is available */
210 + if (is_block_mode && !spi_qup_is_flag_set(controller,
211 + QUP_OP_OUT_BLOCK_WRITE_REQ))
214 + } while (remainder);
217 static int spi_qup_prep_sg(struct spi_master *master, struct spi_transfer *xfer,
218 enum dma_transfer_direction dir,
219 dma_async_tx_callback callback,
220 @@ -347,11 +432,13 @@ unsigned long timeout)
225 - rx_done = spi_qup_dma_done;
226 + if (!qup->qup_v1) {
228 + rx_done = spi_qup_dma_done;
231 - tx_done = spi_qup_dma_done;
233 + tx_done = spi_qup_dma_done;
237 ret = spi_qup_prep_sg(master, xfer, DMA_DEV_TO_MEM, rx_done,
238 @@ -398,7 +485,8 @@ static int spi_qup_do_pio(struct spi_mas
242 - spi_qup_fifo_write(qup, xfer);
243 + if (qup->mode == QUP_IO_M_MODE_FIFO)
244 + spi_qup_write(qup, xfer);
246 ret = spi_qup_set_state(qup, QUP_STATE_RUN);
248 @@ -431,10 +519,11 @@ static irqreturn_t spi_qup_qup_irq(int i
250 writel_relaxed(qup_err, controller->base + QUP_ERROR_FLAGS);
251 writel_relaxed(spi_err, controller->base + SPI_ERROR_FLAGS);
252 - writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
255 - dev_err_ratelimited(controller->dev, "unexpected irq %08x %08x %08x\n",
256 + writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
257 + dev_err_ratelimited(controller->dev,
258 + "unexpected irq %08x %08x %08x\n",
259 qup_err, spi_err, opflags);
262 @@ -460,12 +549,20 @@ static irqreturn_t spi_qup_qup_irq(int i
266 - if (!spi_qup_is_dma_xfer(controller->mode)) {
267 + if (spi_qup_is_dma_xfer(controller->mode)) {
268 + writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
269 + if (opflags & QUP_OP_IN_SERVICE_FLAG &&
270 + opflags & QUP_OP_MAX_INPUT_DONE_FLAG)
271 + complete(&controller->done);
272 + if (opflags & QUP_OP_OUT_SERVICE_FLAG &&
273 + opflags & QUP_OP_MAX_OUTPUT_DONE_FLAG)
274 + complete(&controller->dma_tx_done);
276 if (opflags & QUP_OP_IN_SERVICE_FLAG)
277 - spi_qup_fifo_read(controller, xfer);
278 + spi_qup_read(controller, xfer);
280 if (opflags & QUP_OP_OUT_SERVICE_FLAG)
281 - spi_qup_fifo_write(controller, xfer);
282 + spi_qup_write(controller, xfer);
285 spin_lock_irqsave(&controller->lock, flags);
286 @@ -473,6 +570,9 @@ static irqreturn_t spi_qup_qup_irq(int i
287 controller->xfer = xfer;
288 spin_unlock_irqrestore(&controller->lock, flags);
290 + /* re-read opflags as flags may have changed due to actions above */
291 + opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
293 if ((controller->rx_bytes == xfer->len &&
294 (opflags & QUP_OP_MAX_INPUT_DONE_FLAG)) || error)
295 complete(&controller->done);
296 @@ -516,11 +616,13 @@ static int spi_qup_io_config(struct spi_
297 /* must be zero for FIFO */
298 writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT);
299 writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
301 } else if (spi->master->can_dma &&
302 spi->master->can_dma(spi->master, spi, xfer) &&
303 spi->master->cur_msg_mapped) {
304 controller->mode = QUP_IO_M_MODE_BAM;
305 + writel_relaxed(n_words, controller->base + QUP_MX_INPUT_CNT);
306 + writel_relaxed(n_words, controller->base + QUP_MX_OUTPUT_CNT);
307 + /* must be zero for BLOCK and BAM */
308 writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
309 writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);