1 From b56c1e35cc550fd014fa601ca56b964d88fd44a9 Mon Sep 17 00:00:00 2001
2 From: Andy Gross <andy.gross@linaro.org>
3 Date: Sun, 31 Jan 2016 21:28:13 -0600
4 Subject: [PATCH 06/69] spi: qup: Fix block mode to work correctly
6 This patch corrects the behavior of the BLOCK transactions. During block
7 transactions, the controller must be read/written to in block size transactions.
9 Signed-off-by: Andy Gross <andy.gross@linaro.org>
11 drivers/spi/spi-qup.c | 182 +++++++++++++++++++++++++++++++++++++++-----------
12 1 file changed, 142 insertions(+), 40 deletions(-)
14 diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
15 index 72037e2b159a..10c7eb139c9e 100644
16 --- a/drivers/spi/spi-qup.c
17 +++ b/drivers/spi/spi-qup.c
19 #define QUP_IO_M_MODE_BAM 3
21 /* QUP_OPERATIONAL fields */
22 +#define QUP_OP_IN_BLOCK_READ_REQ BIT(13)
23 +#define QUP_OP_OUT_BLOCK_WRITE_REQ BIT(12)
24 #define QUP_OP_MAX_INPUT_DONE_FLAG BIT(11)
25 #define QUP_OP_MAX_OUTPUT_DONE_FLAG BIT(10)
26 #define QUP_OP_IN_SERVICE_FLAG BIT(9)
27 @@ -155,6 +157,12 @@ struct spi_qup {
28 struct dma_slave_config tx_conf;
31 +static inline bool spi_qup_is_flag_set(struct spi_qup *controller, u32 flag)
33 + u32 opflag = readl_relaxed(controller->base + QUP_OPERATIONAL);
35 + return opflag & flag;
38 static inline bool spi_qup_is_dma_xfer(int mode)
40 @@ -216,29 +224,26 @@ static int spi_qup_set_state(struct spi_qup *controller, u32 state)
44 -static void spi_qup_fifo_read(struct spi_qup *controller,
45 - struct spi_transfer *xfer)
46 +static void spi_qup_read_from_fifo(struct spi_qup *controller,
47 + struct spi_transfer *xfer, u32 num_words)
49 u8 *rx_buf = xfer->rx_buf;
51 - int idx, shift, w_size;
53 - w_size = controller->w_size;
55 - while (controller->rx_bytes < xfer->len) {
56 + int i, shift, num_bytes;
59 - state = readl_relaxed(controller->base + QUP_OPERATIONAL);
60 - if (0 == (state & QUP_OP_IN_FIFO_NOT_EMPTY))
62 + for (; num_words; num_words--) {
64 word = readl_relaxed(controller->base + QUP_INPUT_FIFO);
66 + num_bytes = min_t(int, xfer->len - controller->rx_bytes,
67 + controller->w_size);
70 - controller->rx_bytes += w_size;
71 + controller->rx_bytes += num_bytes;
75 - for (idx = 0; idx < w_size; idx++, controller->rx_bytes++) {
76 + for (i = 0; i < num_bytes; i++, controller->rx_bytes++) {
78 * The data format depends on bytes per SPI word:
80 @@ -246,38 +251,80 @@ static void spi_qup_fifo_read(struct spi_qup *controller,
83 shift = BITS_PER_BYTE;
84 - shift *= (w_size - idx - 1);
85 + shift *= (controller->w_size - i - 1);
86 rx_buf[controller->rx_bytes] = word >> shift;
91 -static void spi_qup_fifo_write(struct spi_qup *controller,
92 +static void spi_qup_read(struct spi_qup *controller,
93 struct spi_transfer *xfer)
95 - const u8 *tx_buf = xfer->tx_buf;
96 - u32 word, state, data;
98 + u32 remainder, words_per_block, num_words;
99 + bool is_block_mode = controller->mode == QUP_IO_M_MODE_BLOCK;
101 + remainder = DIV_ROUND_UP(xfer->len - controller->rx_bytes,
102 + controller->w_size);
103 + words_per_block = controller->in_blk_sz >> 2;
106 + /* ACK by clearing service flag */
107 + writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
108 + controller->base + QUP_OPERATIONAL);
110 + if (is_block_mode) {
111 + num_words = (remainder > words_per_block) ?
112 + words_per_block : remainder;
114 + if (!spi_qup_is_flag_set(controller,
115 + QUP_OP_IN_FIFO_NOT_EMPTY))
121 - w_size = controller->w_size;
122 + /* read up to the maximum transfer size available */
123 + spi_qup_read_from_fifo(controller, xfer, num_words);
125 - while (controller->tx_bytes < xfer->len) {
126 + remainder -= num_words;
128 - state = readl_relaxed(controller->base + QUP_OPERATIONAL);
129 - if (state & QUP_OP_OUT_FIFO_FULL)
130 + /* if block mode, check to see if next block is available */
131 + if (is_block_mode && !spi_qup_is_flag_set(controller,
132 + QUP_OP_IN_BLOCK_READ_REQ))
135 + } while (remainder);
138 + * Due to extra stickiness of the QUP_OP_IN_SERVICE_FLAG during block
139 + * mode reads, it has to be cleared again at the very end
141 + if (is_block_mode && spi_qup_is_flag_set(controller,
142 + QUP_OP_MAX_INPUT_DONE_FLAG))
143 + writel_relaxed(QUP_OP_IN_SERVICE_FLAG,
144 + controller->base + QUP_OPERATIONAL);
148 +static void spi_qup_write_to_fifo(struct spi_qup *controller,
149 + struct spi_transfer *xfer, u32 num_words)
151 + const u8 *tx_buf = xfer->tx_buf;
155 + for (; num_words; num_words--) {
157 - for (idx = 0; idx < w_size; idx++, controller->tx_bytes++) {
160 - controller->tx_bytes += w_size;
162 + num_bytes = min_t(int, xfer->len - controller->tx_bytes,
163 + controller->w_size);
165 + for (i = 0; i < num_bytes; i++) {
166 + data = tx_buf[controller->tx_bytes + i];
167 + word |= data << (BITS_PER_BYTE * (3 - i));
170 - data = tx_buf[controller->tx_bytes];
171 - word |= data << (BITS_PER_BYTE * (3 - idx));
173 + controller->tx_bytes += num_bytes;
175 writel_relaxed(word, controller->base + QUP_OUTPUT_FIFO);
177 @@ -290,6 +337,44 @@ static void spi_qup_dma_done(void *data)
181 +static void spi_qup_write(struct spi_qup *controller,
182 + struct spi_transfer *xfer)
184 + bool is_block_mode = controller->mode == QUP_IO_M_MODE_BLOCK;
185 + u32 remainder, words_per_block, num_words;
187 + remainder = DIV_ROUND_UP(xfer->len - controller->tx_bytes,
188 + controller->w_size);
189 + words_per_block = controller->out_blk_sz >> 2;
192 + /* ACK by clearing service flag */
193 + writel_relaxed(QUP_OP_OUT_SERVICE_FLAG,
194 + controller->base + QUP_OPERATIONAL);
196 + if (is_block_mode) {
197 + num_words = (remainder > words_per_block) ?
198 + words_per_block : remainder;
200 + if (spi_qup_is_flag_set(controller,
201 + QUP_OP_OUT_FIFO_FULL))
207 + spi_qup_write_to_fifo(controller, xfer, num_words);
209 + remainder -= num_words;
211 + /* if block mode, check to see if next block is available */
212 + if (is_block_mode && !spi_qup_is_flag_set(controller,
213 + QUP_OP_OUT_BLOCK_WRITE_REQ))
216 + } while (remainder);
219 static int spi_qup_prep_sg(struct spi_master *master, struct spi_transfer *xfer,
220 enum dma_transfer_direction dir,
221 dma_async_tx_callback callback,
222 @@ -347,11 +432,13 @@ unsigned long timeout)
227 - rx_done = spi_qup_dma_done;
228 + if (!qup->qup_v1) {
230 + rx_done = spi_qup_dma_done;
233 - tx_done = spi_qup_dma_done;
235 + tx_done = spi_qup_dma_done;
239 ret = spi_qup_prep_sg(master, xfer, DMA_DEV_TO_MEM, rx_done,
240 @@ -398,7 +485,8 @@ static int spi_qup_do_pio(struct spi_master *master, struct spi_transfer *xfer,
244 - spi_qup_fifo_write(qup, xfer);
245 + if (qup->mode == QUP_IO_M_MODE_FIFO)
246 + spi_qup_write(qup, xfer);
248 ret = spi_qup_set_state(qup, QUP_STATE_RUN);
250 @@ -431,10 +519,11 @@ static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
252 writel_relaxed(qup_err, controller->base + QUP_ERROR_FLAGS);
253 writel_relaxed(spi_err, controller->base + SPI_ERROR_FLAGS);
254 - writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
257 - dev_err_ratelimited(controller->dev, "unexpected irq %08x %08x %08x\n",
258 + writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
259 + dev_err_ratelimited(controller->dev,
260 + "unexpected irq %08x %08x %08x\n",
261 qup_err, spi_err, opflags);
264 @@ -460,12 +549,20 @@ static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
268 - if (!spi_qup_is_dma_xfer(controller->mode)) {
269 + if (spi_qup_is_dma_xfer(controller->mode)) {
270 + writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
271 + if (opflags & QUP_OP_IN_SERVICE_FLAG &&
272 + opflags & QUP_OP_MAX_INPUT_DONE_FLAG)
273 + complete(&controller->done);
274 + if (opflags & QUP_OP_OUT_SERVICE_FLAG &&
275 + opflags & QUP_OP_MAX_OUTPUT_DONE_FLAG)
276 + complete(&controller->dma_tx_done);
278 if (opflags & QUP_OP_IN_SERVICE_FLAG)
279 - spi_qup_fifo_read(controller, xfer);
280 + spi_qup_read(controller, xfer);
282 if (opflags & QUP_OP_OUT_SERVICE_FLAG)
283 - spi_qup_fifo_write(controller, xfer);
284 + spi_qup_write(controller, xfer);
287 spin_lock_irqsave(&controller->lock, flags);
288 @@ -473,6 +570,9 @@ static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
289 controller->xfer = xfer;
290 spin_unlock_irqrestore(&controller->lock, flags);
292 + /* re-read opflags as flags may have changed due to actions above */
293 + opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
295 if ((controller->rx_bytes == xfer->len &&
296 (opflags & QUP_OP_MAX_INPUT_DONE_FLAG)) || error)
297 complete(&controller->done);
298 @@ -516,11 +616,13 @@ static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
299 /* must be zero for FIFO */
300 writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT);
301 writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
303 } else if (spi->master->can_dma &&
304 spi->master->can_dma(spi->master, spi, xfer) &&
305 spi->master->cur_msg_mapped) {
306 controller->mode = QUP_IO_M_MODE_BAM;
307 + writel_relaxed(n_words, controller->base + QUP_MX_INPUT_CNT);
308 + writel_relaxed(n_words, controller->base + QUP_MX_OUTPUT_CNT);
309 + /* must be zero for BLOCK and BAM */
310 writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
311 writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);