ipq806x: add v4.9 support
[openwrt/staging/yousong.git] / target / linux / ipq806x / patches-4.9 / 0008-spi-qup-Fix-DMA-mode-to-work-correctly.patch
1 From 715d008b67b21fb8bfefaeeefa5b8ddf23777872 Mon Sep 17 00:00:00 2001
2 From: Andy Gross <andy.gross@linaro.org>
3 Date: Tue, 2 Feb 2016 17:00:53 -0600
4 Subject: [PATCH 08/37] spi: qup: Fix DMA mode to work correctly
5
6 This patch fixes a few issues with the DMA mode. The QUP needs to be
7 placed in the run mode before the DMA transactions are executed. The
8 conditions for being able to DMA vary between revisions of the QUP.
9 This is due to v1.1.1 using ADM DMA and later revisions using BAM.
10
11 Signed-off-by: Andy Gross <andy.gross@linaro.org>
12 ---
13 drivers/spi/spi-qup.c | 94 ++++++++++++++++++++++++++++++++-----------------
14 1 file changed, 62 insertions(+), 32 deletions(-)
15
16 --- a/drivers/spi/spi-qup.c
17 +++ b/drivers/spi/spi-qup.c
18 @@ -142,6 +142,7 @@ struct spi_qup {
19
20 struct spi_transfer *xfer;
21 struct completion done;
22 + struct completion dma_tx_done;
23 int error;
24 int w_size; /* bytes per SPI word */
25 int n_words;
26 @@ -284,16 +285,16 @@ static void spi_qup_fifo_write(struct sp
27
28 static void spi_qup_dma_done(void *data)
29 {
30 - struct spi_qup *qup = data;
31 + struct completion *done = data;
32
33 - complete(&qup->done);
34 + complete(done);
35 }
36
37 static int spi_qup_prep_sg(struct spi_master *master, struct spi_transfer *xfer,
38 enum dma_transfer_direction dir,
39 - dma_async_tx_callback callback)
40 + dma_async_tx_callback callback,
41 + void *data)
42 {
43 - struct spi_qup *qup = spi_master_get_devdata(master);
44 unsigned long flags = DMA_PREP_INTERRUPT | DMA_PREP_FENCE;
45 struct dma_async_tx_descriptor *desc;
46 struct scatterlist *sgl;
47 @@ -312,11 +313,11 @@ static int spi_qup_prep_sg(struct spi_ma
48 }
49
50 desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags);
51 - if (!desc)
52 - return -EINVAL;
53 + if (IS_ERR_OR_NULL(desc))
54 + return desc ? PTR_ERR(desc) : -EINVAL;
55
56 desc->callback = callback;
57 - desc->callback_param = qup;
58 + desc->callback_param = data;
59
60 cookie = dmaengine_submit(desc);
61
62 @@ -332,18 +333,29 @@ static void spi_qup_dma_terminate(struct
63 dmaengine_terminate_all(master->dma_rx);
64 }
65
66 -static int spi_qup_do_dma(struct spi_master *master, struct spi_transfer *xfer)
67 +static int spi_qup_do_dma(struct spi_master *master, struct spi_transfer *xfer,
68 +unsigned long timeout)
69 {
70 + struct spi_qup *qup = spi_master_get_devdata(master);
71 dma_async_tx_callback rx_done = NULL, tx_done = NULL;
72 int ret;
73
74 + /* before issuing the descriptors, set the QUP to run */
75 + ret = spi_qup_set_state(qup, QUP_STATE_RUN);
76 + if (ret) {
77 + dev_warn(qup->dev, "cannot set RUN state\n");
78 + return ret;
79 + }
80 +
81 if (xfer->rx_buf)
82 rx_done = spi_qup_dma_done;
83 - else if (xfer->tx_buf)
84 +
85 + if (xfer->tx_buf)
86 tx_done = spi_qup_dma_done;
87
88 if (xfer->rx_buf) {
89 - ret = spi_qup_prep_sg(master, xfer, DMA_DEV_TO_MEM, rx_done);
90 + ret = spi_qup_prep_sg(master, xfer, DMA_DEV_TO_MEM, rx_done,
91 + &qup->done);
92 if (ret)
93 return ret;
94
95 @@ -351,17 +363,25 @@ static int spi_qup_do_dma(struct spi_mas
96 }
97
98 if (xfer->tx_buf) {
99 - ret = spi_qup_prep_sg(master, xfer, DMA_MEM_TO_DEV, tx_done);
100 + ret = spi_qup_prep_sg(master, xfer, DMA_MEM_TO_DEV, tx_done,
101 + &qup->dma_tx_done);
102 if (ret)
103 return ret;
104
105 dma_async_issue_pending(master->dma_tx);
106 }
107
108 - return 0;
109 + if (xfer->rx_buf && !wait_for_completion_timeout(&qup->done, timeout))
110 + return -ETIMEDOUT;
111 +
112 + if (xfer->tx_buf && !wait_for_completion_timeout(&qup->dma_tx_done, timeout))
113 + ret = -ETIMEDOUT;
114 +
115 + return ret;
116 }
117
118 -static int spi_qup_do_pio(struct spi_master *master, struct spi_transfer *xfer)
119 +static int spi_qup_do_pio(struct spi_master *master, struct spi_transfer *xfer,
120 + unsigned long timeout)
121 {
122 struct spi_qup *qup = spi_master_get_devdata(master);
123 int ret;
124 @@ -380,6 +400,15 @@ static int spi_qup_do_pio(struct spi_mas
125
126 spi_qup_fifo_write(qup, xfer);
127
128 + ret = spi_qup_set_state(qup, QUP_STATE_RUN);
129 + if (ret) {
130 + dev_warn(qup->dev, "cannot set RUN state\n");
131 + return ret;
132 + }
133 +
134 + if (!wait_for_completion_timeout(&qup->done, timeout))
135 + return -ETIMEDOUT;
136 +
137 return 0;
138 }
139
140 @@ -428,7 +457,6 @@ static irqreturn_t spi_qup_qup_irq(int i
141 dev_warn(controller->dev, "CLK_OVER_RUN\n");
142 if (spi_err & SPI_ERROR_CLK_UNDER_RUN)
143 dev_warn(controller->dev, "CLK_UNDER_RUN\n");
144 -
145 error = -EIO;
146 }
147
148 @@ -617,6 +645,7 @@ static int spi_qup_transfer_one(struct s
149 timeout = 100 * msecs_to_jiffies(timeout);
150
151 reinit_completion(&controller->done);
152 + reinit_completion(&controller->dma_tx_done);
153
154 spin_lock_irqsave(&controller->lock, flags);
155 controller->xfer = xfer;
156 @@ -626,21 +655,13 @@ static int spi_qup_transfer_one(struct s
157 spin_unlock_irqrestore(&controller->lock, flags);
158
159 if (spi_qup_is_dma_xfer(controller->mode))
160 - ret = spi_qup_do_dma(master, xfer);
161 + ret = spi_qup_do_dma(master, xfer, timeout);
162 else
163 - ret = spi_qup_do_pio(master, xfer);
164 + ret = spi_qup_do_pio(master, xfer, timeout);
165
166 if (ret)
167 goto exit;
168
169 - if (spi_qup_set_state(controller, QUP_STATE_RUN)) {
170 - dev_warn(controller->dev, "cannot set EXECUTE state\n");
171 - goto exit;
172 - }
173 -
174 - if (!wait_for_completion_timeout(&controller->done, timeout))
175 - ret = -ETIMEDOUT;
176 -
177 exit:
178 spi_qup_set_state(controller, QUP_STATE_RESET);
179 spin_lock_irqsave(&controller->lock, flags);
180 @@ -662,15 +683,23 @@ static bool spi_qup_can_dma(struct spi_m
181 size_t dma_align = dma_get_cache_alignment();
182 int n_words;
183
184 - if (xfer->rx_buf && (xfer->len % qup->in_blk_sz ||
185 - IS_ERR_OR_NULL(master->dma_rx) ||
186 - !IS_ALIGNED((size_t)xfer->rx_buf, dma_align)))
187 - return false;
188 + if (xfer->rx_buf) {
189 + if (!IS_ALIGNED((size_t)xfer->rx_buf, dma_align) ||
190 + IS_ERR_OR_NULL(master->dma_rx))
191 + return false;
192
193 - if (xfer->tx_buf && (xfer->len % qup->out_blk_sz ||
194 - IS_ERR_OR_NULL(master->dma_tx) ||
195 - !IS_ALIGNED((size_t)xfer->tx_buf, dma_align)))
196 - return false;
197 + if (qup->qup_v1 && (xfer->len % qup->in_blk_sz))
198 + return false;
199 + }
200 +
201 + if (xfer->tx_buf) {
202 + if (!IS_ALIGNED((size_t)xfer->tx_buf, dma_align) ||
203 + IS_ERR_OR_NULL(master->dma_tx))
204 + return false;
205 +
206 + if (qup->qup_v1 && (xfer->len % qup->out_blk_sz))
207 + return false;
208 + }
209
210 n_words = xfer->len / DIV_ROUND_UP(xfer->bits_per_word, 8);
211 if (n_words <= (qup->in_fifo_sz / sizeof(u32)))
212 @@ -836,6 +865,7 @@ static int spi_qup_probe(struct platform
213
214 spin_lock_init(&controller->lock);
215 init_completion(&controller->done);
216 + init_completion(&controller->dma_tx_done);
217
218 iomode = readl_relaxed(base + QUP_IO_M_MODES);
219