1 From 148f77310a9ddf4db5036066458d7aed92cea9ae Mon Sep 17 00:00:00 2001
2 From: Andy Gross <andy.gross@linaro.org>
3 Date: Sun, 31 Jan 2016 21:28:13 -0600
4 Subject: [PATCH] spi: qup: Fix block mode to work correctly
6 This patch corrects the behavior of the BLOCK
7 transactions. During block transactions, the controller
8 must be read/written to in block size transactions.
10 Signed-off-by: Andy Gross <andy.gross@linaro.org>
12 Change-Id: I4b4f4d25be57e6e8148f6f0d24bed376eb287ecf
14 drivers/spi/spi-qup.c | 181 +++++++++++++++++++++++++++++++++++++++-----------
15 1 file changed, 141 insertions(+), 40 deletions(-)
17 diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
18 index 089c5e8..e487416 100644
19 --- a/drivers/spi/spi-qup.c
20 +++ b/drivers/spi/spi-qup.c
22 #define QUP_IO_M_MODE_BAM 3
24 /* QUP_OPERATIONAL fields */
25 +#define QUP_OP_IN_BLOCK_READ_REQ BIT(13)
26 +#define QUP_OP_OUT_BLOCK_WRITE_REQ BIT(12)
27 #define QUP_OP_MAX_INPUT_DONE_FLAG BIT(11)
28 #define QUP_OP_MAX_OUTPUT_DONE_FLAG BIT(10)
29 #define QUP_OP_IN_SERVICE_FLAG BIT(9)
30 @@ -156,6 +158,12 @@ struct spi_qup {
31 struct dma_slave_config tx_conf;
34 +static inline bool spi_qup_is_flag_set(struct spi_qup *controller, u32 flag)
36 + u32 opflag = readl_relaxed(controller->base + QUP_OPERATIONAL);
38 + return opflag & flag;
41 static inline bool spi_qup_is_dma_xfer(int mode)
43 @@ -217,29 +225,26 @@ static int spi_qup_set_state(struct spi_qup *controller, u32 state)
47 -static void spi_qup_fifo_read(struct spi_qup *controller,
48 - struct spi_transfer *xfer)
49 +static void spi_qup_read_from_fifo(struct spi_qup *controller,
50 + struct spi_transfer *xfer, u32 num_words)
52 u8 *rx_buf = xfer->rx_buf;
54 - int idx, shift, w_size;
56 - w_size = controller->w_size;
58 - while (controller->rx_bytes < xfer->len) {
59 + int i, shift, num_bytes;
62 - state = readl_relaxed(controller->base + QUP_OPERATIONAL);
63 - if (0 == (state & QUP_OP_IN_FIFO_NOT_EMPTY))
65 + for (; num_words; num_words--) {
67 word = readl_relaxed(controller->base + QUP_INPUT_FIFO);
69 + num_bytes = min_t(int, xfer->len - controller->rx_bytes,
70 + controller->w_size);
73 - controller->rx_bytes += w_size;
74 + controller->rx_bytes += num_bytes;
78 - for (idx = 0; idx < w_size; idx++, controller->rx_bytes++) {
79 + for (i = 0; i < num_bytes; i++, controller->rx_bytes++) {
81 * The data format depends on bytes per SPI word:
83 @@ -247,38 +252,80 @@ static void spi_qup_fifo_read(struct spi_qup *controller,
86 shift = BITS_PER_BYTE;
87 - shift *= (w_size - idx - 1);
88 + shift *= (controller->w_size - i - 1);
89 rx_buf[controller->rx_bytes] = word >> shift;
94 -static void spi_qup_fifo_write(struct spi_qup *controller,
95 +static void spi_qup_read(struct spi_qup *controller,
96 struct spi_transfer *xfer)
98 - const u8 *tx_buf = xfer->tx_buf;
99 - u32 word, state, data;
101 + u32 remainder, words_per_block, num_words;
102 + bool is_block_mode = controller->mode == QUP_IO_M_MODE_BLOCK;
104 + remainder = DIV_ROUND_UP(xfer->len - controller->rx_bytes,
105 + controller->w_size);
106 + words_per_block = controller->in_blk_sz >> 2;
109 + /* ACK by clearing service flag */
110 + writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
111 + controller->base + QUP_OPERATIONAL);
113 + if (is_block_mode) {
114 + num_words = (remainder > words_per_block) ?
115 + words_per_block : remainder;
117 + if (!spi_qup_is_flag_set(controller,
118 + QUP_OP_IN_FIFO_NOT_EMPTY))
124 - w_size = controller->w_size;
125 + /* read up to the maximum transfer size available */
126 + spi_qup_read_from_fifo(controller, xfer, num_words);
128 - while (controller->tx_bytes < xfer->len) {
129 + remainder -= num_words;
131 - state = readl_relaxed(controller->base + QUP_OPERATIONAL);
132 - if (state & QUP_OP_OUT_FIFO_FULL)
133 + /* if block mode, check to see if next block is available */
134 + if (is_block_mode && !spi_qup_is_flag_set(controller,
135 + QUP_OP_IN_BLOCK_READ_REQ))
138 + } while (remainder);
141 + * Due to extra stickiness of the QUP_OP_IN_SERVICE_FLAG during block
142 + * mode reads, it has to be cleared again at the very end
144 + if (is_block_mode && spi_qup_is_flag_set(controller,
145 + QUP_OP_MAX_INPUT_DONE_FLAG))
146 + writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
147 + controller->base + QUP_OPERATIONAL);
151 +static void spi_qup_write_to_fifo(struct spi_qup *controller,
152 + struct spi_transfer *xfer, u32 num_words)
154 + const u8 *tx_buf = xfer->tx_buf;
158 + for (; num_words; num_words--) {
160 - for (idx = 0; idx < w_size; idx++, controller->tx_bytes++) {
163 - controller->tx_bytes += w_size;
165 + num_bytes = min_t(int, xfer->len - controller->tx_bytes,
166 + controller->w_size);
168 + for (i = 0; i < num_bytes; i++) {
169 + data = tx_buf[controller->tx_bytes + i];
170 + word |= data << (BITS_PER_BYTE * (3 - i));
173 - data = tx_buf[controller->tx_bytes];
174 - word |= data << (BITS_PER_BYTE * (3 - idx));
176 + controller->tx_bytes += num_bytes;
178 writel_relaxed(word, controller->base + QUP_OUTPUT_FIFO);
180 @@ -291,6 +338,44 @@ static void spi_qup_dma_done(void *data)
184 +static void spi_qup_write(struct spi_qup *controller,
185 + struct spi_transfer *xfer)
187 + bool is_block_mode = controller->mode == QUP_IO_M_MODE_BLOCK;
188 + u32 remainder, words_per_block, num_words;
190 + remainder = DIV_ROUND_UP(xfer->len - controller->tx_bytes,
191 + controller->w_size);
192 + words_per_block = controller->out_blk_sz >> 2;
195 + /* ACK by clearing service flag */
196 + writel_relaxed(QUP_OP_OUT_SERVICE_FLAG,
197 + controller->base + QUP_OPERATIONAL);
199 + if (is_block_mode) {
200 + num_words = (remainder > words_per_block) ?
201 + words_per_block : remainder;
203 + if (spi_qup_is_flag_set(controller,
204 + QUP_OP_OUT_FIFO_FULL))
210 + spi_qup_write_to_fifo(controller, xfer, num_words);
212 + remainder -= num_words;
214 + /* if block mode, check to see if next block is available */
215 + if (is_block_mode && !spi_qup_is_flag_set(controller,
216 + QUP_OP_OUT_BLOCK_WRITE_REQ))
219 + } while (remainder);
222 static int spi_qup_prep_sg(struct spi_master *master, struct spi_transfer *xfer,
223 enum dma_transfer_direction dir,
224 dma_async_tx_callback callback,
225 @@ -348,11 +433,13 @@ unsigned long timeout)
230 - rx_done = spi_qup_dma_done;
231 + if (!qup->qup_v1) {
233 + rx_done = spi_qup_dma_done;
236 - tx_done = spi_qup_dma_done;
238 + tx_done = spi_qup_dma_done;
242 ret = spi_qup_prep_sg(master, xfer, DMA_DEV_TO_MEM, rx_done,
243 @@ -401,7 +488,7 @@ static int spi_qup_do_pio(struct spi_master *master, struct spi_transfer *xfer,
246 if (qup->mode == QUP_IO_M_MODE_FIFO)
247 - spi_qup_fifo_write(qup, xfer);
248 + spi_qup_write(qup, xfer);
250 ret = spi_qup_set_state(qup, QUP_STATE_RUN);
252 @@ -434,10 +521,11 @@ static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
254 writel_relaxed(qup_err, controller->base + QUP_ERROR_FLAGS);
255 writel_relaxed(spi_err, controller->base + SPI_ERROR_FLAGS);
256 - writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
259 - dev_err_ratelimited(controller->dev, "unexpected irq %08x %08x %08x\n",
260 + writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
261 + dev_err_ratelimited(controller->dev,
262 + "unexpected irq %08x %08x %08x\n",
263 qup_err, spi_err, opflags);
266 @@ -463,12 +551,20 @@ static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
270 - if (!spi_qup_is_dma_xfer(controller->mode)) {
271 + if (spi_qup_is_dma_xfer(controller->mode)) {
272 + writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
273 + if (opflags & QUP_OP_IN_SERVICE_FLAG &&
274 + opflags & QUP_OP_MAX_INPUT_DONE_FLAG)
275 + complete(&controller->done);
276 + if (opflags & QUP_OP_OUT_SERVICE_FLAG &&
277 + opflags & QUP_OP_MAX_OUTPUT_DONE_FLAG)
278 + complete(&controller->dma_tx_done);
280 if (opflags & QUP_OP_IN_SERVICE_FLAG)
281 - spi_qup_fifo_read(controller, xfer);
282 + spi_qup_read(controller, xfer);
284 if (opflags & QUP_OP_OUT_SERVICE_FLAG)
285 - spi_qup_fifo_write(controller, xfer);
286 + spi_qup_write(controller, xfer);
289 spin_lock_irqsave(&controller->lock, flags);
290 @@ -476,6 +572,9 @@ static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
291 controller->xfer = xfer;
292 spin_unlock_irqrestore(&controller->lock, flags);
294 + /* re-read opflags as flags may have changed due to actions above */
295 + opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
297 if ((controller->rx_bytes == xfer->len &&
298 (opflags & QUP_OP_MAX_INPUT_DONE_FLAG)) || error)
299 complete(&controller->done);
300 @@ -519,11 +618,13 @@ static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
301 /* must be zero for FIFO */
302 writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT);
303 writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
304 - controller->use_dma = 0;
305 } else if (spi->master->can_dma &&
306 spi->master->can_dma(spi->master, spi, xfer) &&
307 spi->master->cur_msg_mapped) {
308 controller->mode = QUP_IO_M_MODE_BAM;
309 + writel_relaxed(n_words, controller->base + QUP_MX_INPUT_CNT);
310 + writel_relaxed(n_words, controller->base + QUP_MX_OUTPUT_CNT);
311 + /* must be zero for BLOCK and BAM */
312 writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
313 writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);