ipq806x: sync with latest patches sent by QCA
[openwrt/staging/dedeckeh.git] / target / linux / ipq806x / patches-4.4 / 710-spi-qup-Make-sure-mode-is-only-determined-once.patch
1 From 93f99afbc534e00d72d58336061823055ee820f1 Mon Sep 17 00:00:00 2001
2 From: Andy Gross <andy.gross@linaro.org>
3 Date: Tue, 12 Apr 2016 09:11:47 -0500
4 Subject: [PATCH] spi: qup: Make sure mode is only determined once
5
6 This patch calculates the mode once. All decisions on the current
7 transaction
8 is made using the mode instead of use_dma
9
10 Signed-off-by: Andy Gross <andy.gross@linaro.org>
11
12 Change-Id: If3cdd924355e037d77dc8201a72895fac0461aa5
13 ---
14 drivers/spi/spi-qup.c | 96 +++++++++++++++++++--------------------------------
15 1 file changed, 36 insertions(+), 60 deletions(-)
16
17 diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
18 index eb2cb8c..714fd4e 100644
19 --- a/drivers/spi/spi-qup.c
20 +++ b/drivers/spi/spi-qup.c
21 @@ -150,13 +150,20 @@ struct spi_qup {
22 int rx_bytes;
23 int qup_v1;
24
25 - int use_dma;
26 + int mode;
27 struct dma_slave_config rx_conf;
28 struct dma_slave_config tx_conf;
29 - int mode;
30 };
31
32
33 +static inline bool spi_qup_is_dma_xfer(int mode)
34 +{
35 + if (mode == QUP_IO_M_MODE_DMOV || mode == QUP_IO_M_MODE_BAM)
36 + return true;
37 +
38 + return false;
39 +}
40 +
41 static inline bool spi_qup_is_valid_state(struct spi_qup *controller)
42 {
43 u32 opstate = readl_relaxed(controller->base + QUP_STATE);
44 @@ -427,7 +434,7 @@ static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
45 error = -EIO;
46 }
47
48 - if (!controller->use_dma) {
49 + if (!spi_qup_is_dma_xfer(controller->mode)) {
50 if (opflags & QUP_OP_IN_SERVICE_FLAG)
51 spi_qup_fifo_read(controller, xfer);
52
53 @@ -446,43 +453,11 @@ static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
54 return IRQ_HANDLED;
55 }
56
57 -static u32
58 -spi_qup_get_mode(struct spi_master *master, struct spi_transfer *xfer)
59 -{
60 - struct spi_qup *qup = spi_master_get_devdata(master);
61 - u32 mode;
62 - size_t dma_align = dma_get_cache_alignment();
63 -
64 - qup->w_size = 4;
65 -
66 - if (xfer->bits_per_word <= 8)
67 - qup->w_size = 1;
68 - else if (xfer->bits_per_word <= 16)
69 - qup->w_size = 2;
70 -
71 - qup->n_words = xfer->len / qup->w_size;
72 -
73 - if (!IS_ERR_OR_NULL(master->dma_rx) &&
74 - IS_ALIGNED((size_t)xfer->tx_buf, dma_align) &&
75 - IS_ALIGNED((size_t)xfer->rx_buf, dma_align) &&
76 - !is_vmalloc_addr(xfer->tx_buf) &&
77 - !is_vmalloc_addr(xfer->rx_buf) &&
78 - (xfer->len > 3*qup->in_blk_sz))
79 - qup->use_dma = 1;
80 -
81 - if (qup->n_words <= (qup->in_fifo_sz / sizeof(u32)))
82 - mode = QUP_IO_M_MODE_FIFO;
83 - else
84 - mode = QUP_IO_M_MODE_BLOCK;
85 -
86 - return mode;
87 -}
88 -
89 /* set clock freq ... bits per word */
90 static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
91 {
92 struct spi_qup *controller = spi_master_get_devdata(spi->master);
93 - u32 config, iomode, mode, control;
94 + u32 config, iomode, control;
95 int ret, n_words;
96
97 if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) {
98 @@ -503,24 +478,22 @@ static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
99 return -EIO;
100 }
101
102 - controller->mode = mode = spi_qup_get_mode(spi->master, xfer);
103 + controller->w_size = DIV_ROUND_UP(xfer->bits_per_word, 8);
104 + controller->n_words = xfer->len / controller->w_size;
105 n_words = controller->n_words;
106
107 - if (mode == QUP_IO_M_MODE_FIFO) {
108 + if (n_words <= (controller->in_fifo_sz / sizeof(u32))) {
109 + controller->mode = QUP_IO_M_MODE_FIFO;
110 writel_relaxed(n_words, controller->base + QUP_MX_READ_CNT);
111 writel_relaxed(n_words, controller->base + QUP_MX_WRITE_CNT);
112 /* must be zero for FIFO */
113 writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT);
114 writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
115 controller->use_dma = 0;
116 - } else if (!controller->use_dma) {
117 - writel_relaxed(n_words, controller->base + QUP_MX_INPUT_CNT);
118 - writel_relaxed(n_words, controller->base + QUP_MX_OUTPUT_CNT);
119 - /* must be zero for BLOCK and BAM */
120 - writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
121 - writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
122 - } else {
123 - mode = QUP_IO_M_MODE_BAM;
124 + } else if (spi->master->can_dma &&
125 + spi->master->can_dma(spi->master, spi, xfer) &&
126 + spi->master->cur_msg_mapped) {
127 + controller->mode = QUP_IO_M_MODE_BAM;
128 writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
129 writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
130
131 @@ -541,19 +514,26 @@ static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
132
133 writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
134 }
135 + } else {
136 + controller->mode = QUP_IO_M_MODE_BLOCK;
137 + writel_relaxed(n_words, controller->base + QUP_MX_INPUT_CNT);
138 + writel_relaxed(n_words, controller->base + QUP_MX_OUTPUT_CNT);
139 + /* must be zero for BLOCK and BAM */
140 + writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
141 + writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
142 }
143
144 iomode = readl_relaxed(controller->base + QUP_IO_M_MODES);
145 /* Set input and output transfer mode */
146 iomode &= ~(QUP_IO_M_INPUT_MODE_MASK | QUP_IO_M_OUTPUT_MODE_MASK);
147
148 - if (!controller->use_dma)
149 + if (!spi_qup_is_dma_xfer(controller->mode))
150 iomode &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN);
151 else
152 iomode |= QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN;
153
154 - iomode |= (mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT);
155 - iomode |= (mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT);
156 + iomode |= (controller->mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT);
157 + iomode |= (controller->mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT);
158
159 writel_relaxed(iomode, controller->base + QUP_IO_M_MODES);
160
161 @@ -594,7 +574,7 @@ static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
162 config |= xfer->bits_per_word - 1;
163 config |= QUP_CONFIG_SPI_MODE;
164
165 - if (controller->use_dma) {
166 + if (spi_qup_is_dma_xfer(controller->mode)) {
167 if (!xfer->tx_buf)
168 config |= QUP_CONFIG_NO_OUTPUT;
169 if (!xfer->rx_buf)
170 @@ -612,7 +592,7 @@ static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
171 * status change in BAM mode
172 */
173
174 - if (mode == QUP_IO_M_MODE_BAM)
175 + if (spi_qup_is_dma_xfer(controller->mode))
176 mask = QUP_OP_IN_SERVICE_FLAG | QUP_OP_OUT_SERVICE_FLAG;
177
178 writel_relaxed(mask, controller->base + QUP_OPERATIONAL_MASK);
179 @@ -646,7 +626,7 @@ static int spi_qup_transfer_one(struct spi_master *master,
180 controller->tx_bytes = 0;
181 spin_unlock_irqrestore(&controller->lock, flags);
182
183 - if (controller->use_dma)
184 + if (spi_qup_is_dma_xfer(controller->mode))
185 ret = spi_qup_do_dma(master, xfer);
186 else
187 ret = spi_qup_do_pio(master, xfer);
188 @@ -670,7 +650,7 @@ exit:
189 ret = controller->error;
190 spin_unlock_irqrestore(&controller->lock, flags);
191
192 - if (ret && controller->use_dma)
193 + if (ret && spi_qup_is_dma_xfer(controller->mode))
194 spi_qup_dma_terminate(master, xfer);
195
196 return ret;
197 @@ -681,9 +661,7 @@ static bool spi_qup_can_dma(struct spi_master *master, struct spi_device *spi,
198 {
199 struct spi_qup *qup = spi_master_get_devdata(master);
200 size_t dma_align = dma_get_cache_alignment();
201 - u32 mode;
202 -
203 - qup->use_dma = 0;
204 + int n_words;
205
206 if (xfer->rx_buf && (xfer->len % qup->in_blk_sz ||
207 IS_ERR_OR_NULL(master->dma_rx) ||
208 @@ -695,12 +673,10 @@ static bool spi_qup_can_dma(struct spi_master *master, struct spi_device *spi,
209 !IS_ALIGNED((size_t)xfer->tx_buf, dma_align)))
210 return false;
211
212 - mode = spi_qup_get_mode(master, xfer);
213 - if (mode == QUP_IO_M_MODE_FIFO)
214 + n_words = xfer->len / DIV_ROUND_UP(xfer->bits_per_word, 8);
215 + if (n_words <= (qup->in_fifo_sz / sizeof(u32)))
216 return false;
217
218 - qup->use_dma = 1;
219 -
220 return true;
221 }
222
223 --
224 2.7.2
225