93a2972599eb03318ad00d1d315bbabe74a833ed
[openwrt/openwrt.git] / target / linux / lantiq / patches-4.1 / 0033-SPI-MIPS-lantiq-adds-spi-xway.patch
1 From e75df4f96373e5d16f8ca13aa031e54cdcfeda62 Mon Sep 17 00:00:00 2001
2 From: John Crispin <blogic@openwrt.org>
3 Date: Wed, 13 Mar 2013 09:29:37 +0100
4 Subject: [PATCH 33/36] SPI: MIPS: lantiq: adds spi-xway
5
6 This patch adds support for the SPI core found on several Lantiq SoCs.
7 The Driver has been runtime tested in combination with m25p80 Flash Devices
8 on Amazon_SE and VR9.
9
10 Signed-off-by: Daniel Schwierzeck <daniel.schwierzeck@googlemail.com>
11 Signed-off-by: John Crispin <blogic@openwrt.org>
12 ---
13 drivers/spi/Kconfig | 8 +
14 drivers/spi/Makefile | 1 +
15 drivers/spi/spi-xway.c | 977 ++++++++++++++++++++++++++++++++++++++++++++++++
16 3 files changed, 986 insertions(+)
17 create mode 100644 drivers/spi/spi-xway.c
18
19 --- a/drivers/spi/Kconfig
20 +++ b/drivers/spi/Kconfig
21 @@ -626,6 +626,14 @@ config SPI_NUC900
22 help
23 SPI driver for Nuvoton NUC900 series ARM SoCs
24
25 +config SPI_XWAY
26 + tristate "Lantiq SPI controller"
27 + depends on LANTIQ
28 + select SPI_BITBANG
29 + help
30 + This driver supports the Lantiq SoC SPI controller in master
31 + mode.
32 +
33 #
34 # Add new SPI master controllers in alphabetical order above this line
35 #
36 --- a/drivers/spi/Makefile
37 +++ b/drivers/spi/Makefile
38 @@ -90,3 +90,4 @@ obj-$(CONFIG_SPI_TXX9) += spi-txx9.o
39 obj-$(CONFIG_SPI_XCOMM) += spi-xcomm.o
40 obj-$(CONFIG_SPI_XILINX) += spi-xilinx.o
41 obj-$(CONFIG_SPI_XTENSA_XTFPGA) += spi-xtensa-xtfpga.o
42 +obj-$(CONFIG_SPI_XWAY) += spi-xway.o
43 --- /dev/null
44 +++ b/drivers/spi/spi-xway.c
45 @@ -0,0 +1,1003 @@
46 +/*
47 + * Lantiq SoC SPI controller
48 + *
49 + * Copyright (C) 2011 Daniel Schwierzeck <daniel.schwierzeck@googlemail.com>
50 + * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
51 + *
52 + * This program is free software; you can distribute it and/or modify it
53 + * under the terms of the GNU General Public License (Version 2) as
54 + * published by the Free Software Foundation.
55 + */
56 +
57 +#include <linux/init.h>
58 +#include <linux/module.h>
59 +#include <linux/workqueue.h>
60 +#include <linux/platform_device.h>
61 +#include <linux/io.h>
62 +#include <linux/sched.h>
63 +#include <linux/delay.h>
64 +#include <linux/interrupt.h>
65 +#include <linux/completion.h>
66 +#include <linux/spinlock.h>
67 +#include <linux/err.h>
68 +#include <linux/clk.h>
69 +#include <linux/spi/spi.h>
70 +#include <linux/spi/spi_bitbang.h>
71 +#include <linux/of_irq.h>
72 +
73 +#include <lantiq_soc.h>
74 +
75 +#define LTQ_SPI_CLC 0x00 /* Clock control */
76 +#define LTQ_SPI_PISEL 0x04 /* Port input select */
77 +#define LTQ_SPI_ID 0x08 /* Identification */
78 +#define LTQ_SPI_CON 0x10 /* Control */
79 +#define LTQ_SPI_STAT 0x14 /* Status */
80 +#define LTQ_SPI_WHBSTATE 0x18 /* Write HW modified state */
81 +#define LTQ_SPI_TB 0x20 /* Transmit buffer */
82 +#define LTQ_SPI_RB 0x24 /* Receive buffer */
83 +#define LTQ_SPI_RXFCON 0x30 /* Receive FIFO control */
84 +#define LTQ_SPI_TXFCON 0x34 /* Transmit FIFO control */
85 +#define LTQ_SPI_FSTAT 0x38 /* FIFO status */
86 +#define LTQ_SPI_BRT 0x40 /* Baudrate timer */
87 +#define LTQ_SPI_BRSTAT 0x44 /* Baudrate timer status */
88 +#define LTQ_SPI_SFCON 0x60 /* Serial frame control */
89 +#define LTQ_SPI_SFSTAT 0x64 /* Serial frame status */
90 +#define LTQ_SPI_GPOCON 0x70 /* General purpose output control */
91 +#define LTQ_SPI_GPOSTAT 0x74 /* General purpose output status */
92 +#define LTQ_SPI_FGPO 0x78 /* Forced general purpose output */
93 +#define LTQ_SPI_RXREQ 0x80 /* Receive request */
94 +#define LTQ_SPI_RXCNT 0x84 /* Receive count */
95 +#define LTQ_SPI_DMACON 0xEC /* DMA control */
96 +#define LTQ_SPI_IRNEN 0xF4 /* Interrupt node enable */
97 +#define LTQ_SPI_IRNICR 0xF8 /* Interrupt node interrupt capture */
98 +#define LTQ_SPI_IRNCR 0xFC /* Interrupt node control */
99 +
100 +#define LTQ_SPI_CLC_SMC_SHIFT 16 /* Clock divider for sleep mode */
101 +#define LTQ_SPI_CLC_SMC_MASK 0xFF
102 +#define LTQ_SPI_CLC_RMC_SHIFT 8 /* Clock divider for normal run mode */
103 +#define LTQ_SPI_CLC_RMC_MASK 0xFF
104 +#define LTQ_SPI_CLC_DISS BIT(1) /* Disable status bit */
105 +#define LTQ_SPI_CLC_DISR BIT(0) /* Disable request bit */
106 +
107 +#define LTQ_SPI_ID_TXFS_SHIFT 24 /* Implemented TX FIFO size */
108 +#define LTQ_SPI_ID_TXFS_MASK 0x3F
109 +#define LTQ_SPI_ID_RXFS_SHIFT 16 /* Implemented RX FIFO size */
110 +#define LTQ_SPI_ID_RXFS_MASK 0x3F
111 +#define LTQ_SPI_ID_REV_MASK 0x1F /* Hardware revision number */
112 +#define LTQ_SPI_ID_CFG BIT(5) /* DMA interface support */
113 +
114 +#define LTQ_SPI_CON_BM_SHIFT 16 /* Data width selection */
115 +#define LTQ_SPI_CON_BM_MASK 0x1F
116 +#define LTQ_SPI_CON_EM BIT(24) /* Echo mode */
117 +#define LTQ_SPI_CON_IDLE BIT(23) /* Idle bit value */
118 +#define LTQ_SPI_CON_ENBV BIT(22) /* Enable byte valid control */
119 +#define LTQ_SPI_CON_RUEN BIT(12) /* Receive underflow error enable */
120 +#define LTQ_SPI_CON_TUEN BIT(11) /* Transmit underflow error enable */
121 +#define LTQ_SPI_CON_AEN BIT(10) /* Abort error enable */
122 +#define LTQ_SPI_CON_REN BIT(9) /* Receive overflow error enable */
123 +#define LTQ_SPI_CON_TEN BIT(8) /* Transmit overflow error enable */
124 +#define LTQ_SPI_CON_LB BIT(7) /* Loopback control */
125 +#define LTQ_SPI_CON_PO BIT(6) /* Clock polarity control */
126 +#define LTQ_SPI_CON_PH BIT(5) /* Clock phase control */
127 +#define LTQ_SPI_CON_HB BIT(4) /* Heading control */
128 +#define LTQ_SPI_CON_RXOFF BIT(1) /* Switch receiver off */
129 +#define LTQ_SPI_CON_TXOFF BIT(0) /* Switch transmitter off */
130 +
131 +#define LTQ_SPI_STAT_RXBV_MASK 0x7
132 +#define LTQ_SPI_STAT_RXBV_SHIFT 28
133 +#define LTQ_SPI_STAT_BSY BIT(13) /* Busy flag */
134 +#define LTQ_SPI_STAT_RUE BIT(12) /* Receive underflow error flag */
135 +#define LTQ_SPI_STAT_TUE BIT(11) /* Transmit underflow error flag */
136 +#define LTQ_SPI_STAT_AE BIT(10) /* Abort error flag */
137 +#define LTQ_SPI_STAT_RE BIT(9) /* Receive error flag */
138 +#define LTQ_SPI_STAT_TE BIT(8) /* Transmit error flag */
139 +#define LTQ_SPI_STAT_MS BIT(1) /* Master/slave select bit */
140 +#define LTQ_SPI_STAT_EN BIT(0) /* Enable bit */
141 +
142 +#define LTQ_SPI_WHBSTATE_SETTUE BIT(15) /* Set transmit underflow error flag */
143 +#define LTQ_SPI_WHBSTATE_SETAE BIT(14) /* Set abort error flag */
144 +#define LTQ_SPI_WHBSTATE_SETRE BIT(13) /* Set receive error flag */
145 +#define LTQ_SPI_WHBSTATE_SETTE BIT(12) /* Set transmit error flag */
146 +#define LTQ_SPI_WHBSTATE_CLRTUE BIT(11) /* Clear transmit underflow error
147 + flag */
148 +#define LTQ_SPI_WHBSTATE_CLRAE BIT(10) /* Clear abort error flag */
149 +#define LTQ_SPI_WHBSTATE_CLRRE BIT(9) /* Clear receive error flag */
150 +#define LTQ_SPI_WHBSTATE_CLRTE BIT(8) /* Clear transmit error flag */
151 +#define LTQ_SPI_WHBSTATE_SETME BIT(7) /* Set mode error flag */
152 +#define LTQ_SPI_WHBSTATE_CLRME BIT(6) /* Clear mode error flag */
153 +#define LTQ_SPI_WHBSTATE_SETRUE BIT(5) /* Set receive underflow error flag */
154 +#define LTQ_SPI_WHBSTATE_CLRRUE BIT(4) /* Clear receive underflow error flag */
155 +#define LTQ_SPI_WHBSTATE_SETMS BIT(3) /* Set master select bit */
156 +#define LTQ_SPI_WHBSTATE_CLRMS BIT(2) /* Clear master select bit */
157 +#define LTQ_SPI_WHBSTATE_SETEN BIT(1) /* Set enable bit (operational mode) */
158 +#define LTQ_SPI_WHBSTATE_CLREN BIT(0) /* Clear enable bit (config mode */
159 +#define LTQ_SPI_WHBSTATE_CLR_ERRORS 0x0F50
160 +
161 +#define LTQ_SPI_RXFCON_RXFITL_SHIFT 8 /* FIFO interrupt trigger level */
162 +#define LTQ_SPI_RXFCON_RXFITL_MASK 0x3F
163 +#define LTQ_SPI_RXFCON_RXFLU BIT(1) /* FIFO flush */
164 +#define LTQ_SPI_RXFCON_RXFEN BIT(0) /* FIFO enable */
165 +
166 +#define LTQ_SPI_TXFCON_TXFITL_SHIFT 8 /* FIFO interrupt trigger level */
167 +#define LTQ_SPI_TXFCON_TXFITL_MASK 0x3F
168 +#define LTQ_SPI_TXFCON_TXFLU BIT(1) /* FIFO flush */
169 +#define LTQ_SPI_TXFCON_TXFEN BIT(0) /* FIFO enable */
170 +
171 +#define LTQ_SPI_FSTAT_RXFFL_MASK 0x3f
172 +#define LTQ_SPI_FSTAT_RXFFL_SHIFT 0
173 +#define LTQ_SPI_FSTAT_TXFFL_MASK 0x3f
174 +#define LTQ_SPI_FSTAT_TXFFL_SHIFT 8
175 +
176 +#define LTQ_SPI_GPOCON_ISCSBN_SHIFT 8
177 +#define LTQ_SPI_GPOCON_INVOUTN_SHIFT 0
178 +
179 +#define LTQ_SPI_FGPO_SETOUTN_SHIFT 8
180 +#define LTQ_SPI_FGPO_CLROUTN_SHIFT 0
181 +
182 +#define LTQ_SPI_RXREQ_RXCNT_MASK 0xFFFF /* Receive count value */
183 +#define LTQ_SPI_RXCNT_TODO_MASK 0xFFFF /* Recevie to-do value */
184 +
185 +#define LTQ_SPI_IRNEN_F BIT(3) /* Frame end interrupt request */
186 +#define LTQ_SPI_IRNEN_E BIT(2) /* Error end interrupt request */
187 +#define LTQ_SPI_IRNEN_T BIT(0) /* Transmit end interrupt request */
188 +#define LTQ_SPI_IRNEN_R BIT(1) /* Receive end interrupt request */
189 +#define LTQ_SPI_IRNEN_T_XWAY BIT(1) /* Transmit end interrupt request */
190 +#define LTQ_SPI_IRNEN_R_XWAY BIT(0) /* Receive end interrupt request */
191 +#define LTQ_SPI_IRNEN_ALL 0xF
192 +
193 +struct ltq_spi {
194 + struct spi_bitbang bitbang;
195 + struct completion done;
196 + spinlock_t lock;
197 +
198 + struct device *dev;
199 + void __iomem *base;
200 + struct clk *fpiclk;
201 + struct clk *spiclk;
202 +
203 + int status;
204 + int irq[3];
205 +
206 + const u8 *tx;
207 + u8 *rx;
208 + u32 tx_cnt;
209 + u32 rx_cnt;
210 + u32 len;
211 + struct spi_transfer *curr_transfer;
212 +
213 + u32 (*get_tx) (struct ltq_spi *);
214 +
215 + u16 txfs;
216 + u16 rxfs;
217 + unsigned dma_support:1;
218 + unsigned cfg_mode:1;
219 +
220 + u32 irnen_t;
221 + u32 irnen_r;
222 +};
223 +
224 +static inline struct ltq_spi *ltq_spi_to_hw(struct spi_device *spi)
225 +{
226 + return spi_master_get_devdata(spi->master);
227 +}
228 +
229 +static inline u32 ltq_spi_reg_read(struct ltq_spi *hw, u32 reg)
230 +{
231 + return ioread32be(hw->base + reg);
232 +}
233 +
234 +static inline void ltq_spi_reg_write(struct ltq_spi *hw, u32 val, u32 reg)
235 +{
236 + iowrite32be(val, hw->base + reg);
237 +}
238 +
239 +static inline void ltq_spi_reg_setbit(struct ltq_spi *hw, u32 bits, u32 reg)
240 +{
241 + u32 val;
242 +
243 + val = ltq_spi_reg_read(hw, reg);
244 + val |= bits;
245 + ltq_spi_reg_write(hw, val, reg);
246 +}
247 +
248 +static inline void ltq_spi_reg_clearbit(struct ltq_spi *hw, u32 bits, u32 reg)
249 +{
250 + u32 val;
251 +
252 + val = ltq_spi_reg_read(hw, reg);
253 + val &= ~bits;
254 + ltq_spi_reg_write(hw, val, reg);
255 +}
256 +
257 +static void ltq_spi_hw_enable(struct ltq_spi *hw)
258 +{
259 + u32 clc;
260 +
261 + /* Power-up module */
262 + clk_enable(hw->spiclk);
263 +
264 + /*
265 + * Set clock divider for run mode to 1 to
266 + * run at same frequency as FPI bus
267 + */
268 + clc = (1 << LTQ_SPI_CLC_RMC_SHIFT);
269 + ltq_spi_reg_write(hw, clc, LTQ_SPI_CLC);
270 +}
271 +
272 +static void ltq_spi_hw_disable(struct ltq_spi *hw)
273 +{
274 + /* Set clock divider to 0 and set module disable bit */
275 + ltq_spi_reg_write(hw, LTQ_SPI_CLC_DISS, LTQ_SPI_CLC);
276 +
277 + /* Power-down module */
278 + clk_disable(hw->spiclk);
279 +}
280 +
281 +static void ltq_spi_reset_fifos(struct ltq_spi *hw)
282 +{
283 + u32 val;
284 +
285 + /*
286 + * Enable and flush FIFOs. Set interrupt trigger level to
287 + * half of FIFO count implemented in hardware.
288 + */
289 + if (hw->txfs > 1) {
290 + val = hw->txfs << (LTQ_SPI_TXFCON_TXFITL_SHIFT - 1);
291 + val |= LTQ_SPI_TXFCON_TXFEN | LTQ_SPI_TXFCON_TXFLU;
292 + ltq_spi_reg_write(hw, val, LTQ_SPI_TXFCON);
293 + }
294 +
295 + if (hw->rxfs > 1) {
296 + val = hw->rxfs << (LTQ_SPI_RXFCON_RXFITL_SHIFT - 1);
297 + val |= LTQ_SPI_RXFCON_RXFEN | LTQ_SPI_RXFCON_RXFLU;
298 + ltq_spi_reg_write(hw, val, LTQ_SPI_RXFCON);
299 + }
300 +}
301 +
302 +static inline int ltq_spi_wait_ready(struct ltq_spi *hw)
303 +{
304 + u32 stat;
305 + unsigned long timeout;
306 +
307 + timeout = jiffies + msecs_to_jiffies(200);
308 +
309 + do {
310 + stat = ltq_spi_reg_read(hw, LTQ_SPI_STAT);
311 + if (!(stat & LTQ_SPI_STAT_BSY))
312 + return 0;
313 +
314 + cond_resched();
315 + } while (!time_after_eq(jiffies, timeout));
316 +
317 + dev_err(hw->dev, "SPI wait ready timed out stat: %x\n", stat);
318 +
319 + return -ETIMEDOUT;
320 +}
321 +
322 +static void ltq_spi_config_mode_set(struct ltq_spi *hw)
323 +{
324 + if (hw->cfg_mode)
325 + return;
326 +
327 + /*
328 + * Putting the SPI module in config mode is only safe if no
329 + * transfer is in progress as indicated by busy flag STATE.BSY.
330 + */
331 + if (ltq_spi_wait_ready(hw)) {
332 + ltq_spi_reset_fifos(hw);
333 + hw->status = -ETIMEDOUT;
334 + }
335 + ltq_spi_reg_write(hw, LTQ_SPI_WHBSTATE_CLREN, LTQ_SPI_WHBSTATE);
336 +
337 + hw->cfg_mode = 1;
338 +}
339 +
340 +static void ltq_spi_run_mode_set(struct ltq_spi *hw)
341 +{
342 + if (!hw->cfg_mode)
343 + return;
344 +
345 + ltq_spi_reg_write(hw, LTQ_SPI_WHBSTATE_SETEN, LTQ_SPI_WHBSTATE);
346 +
347 + hw->cfg_mode = 0;
348 +}
349 +
350 +static u32 ltq_spi_tx_word_u8(struct ltq_spi *hw)
351 +{
352 + const u8 *tx = hw->tx;
353 + u32 data = *tx++;
354 +
355 + hw->tx_cnt++;
356 + hw->tx++;
357 +
358 + return data;
359 +}
360 +
361 +static u32 ltq_spi_tx_word_u16(struct ltq_spi *hw)
362 +{
363 + const u16 *tx = (u16 *) hw->tx;
364 + u32 data = *tx++;
365 +
366 + hw->tx_cnt += 2;
367 + hw->tx += 2;
368 +
369 + return data;
370 +}
371 +
372 +static u32 ltq_spi_tx_word_u32(struct ltq_spi *hw)
373 +{
374 + const u32 *tx = (u32 *) hw->tx;
375 + u32 data = *tx++;
376 +
377 + hw->tx_cnt += 4;
378 + hw->tx += 4;
379 +
380 + return data;
381 +}
382 +
383 +static void ltq_spi_bits_per_word_set(struct spi_device *spi)
384 +{
385 + struct ltq_spi *hw = ltq_spi_to_hw(spi);
386 + u32 bm;
387 + u8 bits_per_word = spi->bits_per_word;
388 +
389 + /*
390 + * Use either default value of SPI device or value
391 + * from current transfer.
392 + */
393 + if (hw->curr_transfer && hw->curr_transfer->bits_per_word)
394 + bits_per_word = hw->curr_transfer->bits_per_word;
395 +
396 + if (bits_per_word <= 8)
397 + hw->get_tx = ltq_spi_tx_word_u8;
398 + else if (bits_per_word <= 16)
399 + hw->get_tx = ltq_spi_tx_word_u16;
400 + else if (bits_per_word <= 32)
401 + hw->get_tx = ltq_spi_tx_word_u32;
402 +
403 + /* CON.BM value = bits_per_word - 1 */
404 + bm = (bits_per_word - 1) << LTQ_SPI_CON_BM_SHIFT;
405 +
406 + ltq_spi_reg_clearbit(hw, LTQ_SPI_CON_BM_MASK <<
407 + LTQ_SPI_CON_BM_SHIFT, LTQ_SPI_CON);
408 + ltq_spi_reg_setbit(hw, bm, LTQ_SPI_CON);
409 +}
410 +
411 +static void ltq_spi_speed_set(struct spi_device *spi)
412 +{
413 + struct ltq_spi *hw = ltq_spi_to_hw(spi);
414 + u32 br, max_speed_hz, spi_clk;
415 + u32 speed_hz = spi->max_speed_hz;
416 +
417 + /*
418 + * Use either default value of SPI device or value
419 + * from current transfer.
420 + */
421 + if (hw->curr_transfer && hw->curr_transfer->speed_hz)
422 + speed_hz = hw->curr_transfer->speed_hz;
423 +
424 + /*
425 + * SPI module clock is derived from FPI bus clock dependent on
426 + * divider value in CLC.RMS which is always set to 1.
427 + */
428 + spi_clk = clk_get_rate(hw->fpiclk);
429 +
430 + /*
431 + * Maximum SPI clock frequency in master mode is half of
432 + * SPI module clock frequency. Maximum reload value of
433 + * baudrate generator BR is 2^16.
434 + */
435 + max_speed_hz = spi_clk / 2;
436 + if (speed_hz >= max_speed_hz)
437 + br = 0;
438 + else
439 + br = (max_speed_hz / speed_hz) - 1;
440 +
441 + if (br > 0xFFFF)
442 + br = 0xFFFF;
443 +
444 + ltq_spi_reg_write(hw, br, LTQ_SPI_BRT);
445 +}
446 +
447 +static void ltq_spi_clockmode_set(struct spi_device *spi)
448 +{
449 + struct ltq_spi *hw = ltq_spi_to_hw(spi);
450 + u32 con;
451 +
452 + con = ltq_spi_reg_read(hw, LTQ_SPI_CON);
453 +
454 + /*
455 + * SPI mode mapping in CON register:
456 + * Mode CPOL CPHA CON.PO CON.PH
457 + * 0 0 0 0 1
458 + * 1 0 1 0 0
459 + * 2 1 0 1 1
460 + * 3 1 1 1 0
461 + */
462 + if (spi->mode & SPI_CPHA)
463 + con &= ~LTQ_SPI_CON_PH;
464 + else
465 + con |= LTQ_SPI_CON_PH;
466 +
467 + if (spi->mode & SPI_CPOL)
468 + con |= LTQ_SPI_CON_PO;
469 + else
470 + con &= ~LTQ_SPI_CON_PO;
471 +
472 + /* Set heading control */
473 + if (spi->mode & SPI_LSB_FIRST)
474 + con &= ~LTQ_SPI_CON_HB;
475 + else
476 + con |= LTQ_SPI_CON_HB;
477 +
478 + ltq_spi_reg_write(hw, con, LTQ_SPI_CON);
479 +}
480 +
481 +static void ltq_spi_xmit_set(struct ltq_spi *hw, struct spi_transfer *t)
482 +{
483 + u32 con;
484 +
485 + con = ltq_spi_reg_read(hw, LTQ_SPI_CON);
486 +
487 + if (t) {
488 + if (t->tx_buf && t->rx_buf) {
489 + con &= ~(LTQ_SPI_CON_TXOFF | LTQ_SPI_CON_RXOFF);
490 + } else if (t->rx_buf) {
491 + con &= ~LTQ_SPI_CON_RXOFF;
492 + con |= LTQ_SPI_CON_TXOFF;
493 + } else if (t->tx_buf) {
494 + con &= ~LTQ_SPI_CON_TXOFF;
495 + con |= LTQ_SPI_CON_RXOFF;
496 + }
497 + } else
498 + con |= (LTQ_SPI_CON_TXOFF | LTQ_SPI_CON_RXOFF);
499 +
500 + ltq_spi_reg_write(hw, con, LTQ_SPI_CON);
501 +}
502 +
503 +static void ltq_spi_internal_cs_activate(struct spi_device *spi)
504 +{
505 + struct ltq_spi *hw = ltq_spi_to_hw(spi);
506 + u32 fgpo;
507 +
508 + fgpo = (1 << (spi->chip_select + LTQ_SPI_FGPO_CLROUTN_SHIFT));
509 + ltq_spi_reg_setbit(hw, fgpo, LTQ_SPI_FGPO);
510 +}
511 +
512 +static void ltq_spi_internal_cs_deactivate(struct spi_device *spi)
513 +{
514 + struct ltq_spi *hw = ltq_spi_to_hw(spi);
515 + u32 fgpo;
516 +
517 + fgpo = (1 << (spi->chip_select + LTQ_SPI_FGPO_SETOUTN_SHIFT));
518 + ltq_spi_reg_setbit(hw, fgpo, LTQ_SPI_FGPO);
519 +}
520 +
521 +static void ltq_spi_chipselect(struct spi_device *spi, int cs)
522 +{
523 + struct ltq_spi *hw = ltq_spi_to_hw(spi);
524 +
525 + if (ltq_spi_wait_ready(hw))
526 + dev_err(&spi->dev, "wait failed\n");
527 +
528 + switch (cs) {
529 + case BITBANG_CS_ACTIVE:
530 + ltq_spi_bits_per_word_set(spi);
531 + ltq_spi_speed_set(spi);
532 + ltq_spi_clockmode_set(spi);
533 + ltq_spi_run_mode_set(hw);
534 + ltq_spi_internal_cs_activate(spi);
535 + break;
536 +
537 + case BITBANG_CS_INACTIVE:
538 + ltq_spi_internal_cs_deactivate(spi);
539 + ltq_spi_config_mode_set(hw);
540 + break;
541 + }
542 +}
543 +
544 +static int ltq_spi_setup_transfer(struct spi_device *spi,
545 + struct spi_transfer *t)
546 +{
547 + struct ltq_spi *hw = ltq_spi_to_hw(spi);
548 + u8 bits_per_word = spi->bits_per_word;
549 +
550 + hw->curr_transfer = t;
551 +
552 + if (t && t->bits_per_word)
553 + bits_per_word = t->bits_per_word;
554 +
555 + if (bits_per_word > 32)
556 + return -EINVAL;
557 +
558 + return 0;
559 +}
560 +
561 +static int ltq_spi_setup(struct spi_device *spi)
562 +{
563 + struct ltq_spi *hw = ltq_spi_to_hw(spi);
564 + u32 gpocon, fgpo;
565 +
566 + /* Set default word length to 8 if not set */
567 + if (!spi->bits_per_word)
568 + spi->bits_per_word = 8;
569 +
570 + if (spi->bits_per_word > 32)
571 + return -EINVAL;
572 +
573 + /*
574 + * Up to six GPIOs can be connected to the SPI module
575 + * via GPIO alternate function to control the chip select lines.
576 + */
577 + gpocon = (1 << (spi->chip_select +
578 + LTQ_SPI_GPOCON_ISCSBN_SHIFT));
579 +
580 + if (spi->mode & SPI_CS_HIGH)
581 + gpocon |= (1 << spi->chip_select);
582 +
583 + fgpo = (1 << (spi->chip_select + LTQ_SPI_FGPO_SETOUTN_SHIFT));
584 +
585 + ltq_spi_reg_setbit(hw, gpocon, LTQ_SPI_GPOCON);
586 + ltq_spi_reg_setbit(hw, fgpo, LTQ_SPI_FGPO);
587 +
588 + return 0;
589 +}
590 +
591 +static void ltq_spi_cleanup(struct spi_device *spi)
592 +{
593 +
594 +}
595 +
596 +static void ltq_spi_txfifo_write(struct ltq_spi *hw)
597 +{
598 + u32 fstat, data;
599 + u16 fifo_space;
600 +
601 + /* Determine how much FIFOs are free for TX data */
602 + fstat = ltq_spi_reg_read(hw, LTQ_SPI_FSTAT);
603 + fifo_space = hw->txfs - ((fstat >> LTQ_SPI_FSTAT_TXFFL_SHIFT) &
604 + LTQ_SPI_FSTAT_TXFFL_MASK);
605 +
606 + if (!fifo_space)
607 + return;
608 +
609 + while (hw->tx_cnt < hw->len && fifo_space) {
610 + data = hw->get_tx(hw);
611 + ltq_spi_reg_write(hw, data, LTQ_SPI_TB);
612 + fifo_space--;
613 + }
614 +}
615 +
616 +static void ltq_spi_rxfifo_read(struct ltq_spi *hw)
617 +{
618 + u32 fstat, data, *rx32;
619 + u16 fifo_fill;
620 + u8 rxbv, shift, *rx8;
621 +
622 + /* Determine how much FIFOs are filled with RX data */
623 + fstat = ltq_spi_reg_read(hw, LTQ_SPI_FSTAT);
624 + fifo_fill = ((fstat >> LTQ_SPI_FSTAT_RXFFL_SHIFT)
625 + & LTQ_SPI_FSTAT_RXFFL_MASK);
626 +
627 + if (!fifo_fill)
628 + return;
629 +
630 + /*
631 + * The 32 bit FIFO is always used completely independent from the
632 + * bits_per_word value. Thus four bytes have to be read at once
633 + * per FIFO.
634 + */
635 + rx32 = (u32 *) hw->rx;
636 + while (hw->len - hw->rx_cnt >= 4 && fifo_fill) {
637 + *rx32++ = ltq_spi_reg_read(hw, LTQ_SPI_RB);
638 + hw->rx_cnt += 4;
639 + hw->rx += 4;
640 + fifo_fill--;
641 + }
642 +
643 + /*
644 + * If there are remaining bytes, read byte count from STAT.RXBV
645 + * register and read the data byte-wise.
646 + */
647 + while (fifo_fill && hw->rx_cnt < hw->len) {
648 + rxbv = (ltq_spi_reg_read(hw, LTQ_SPI_STAT) >>
649 + LTQ_SPI_STAT_RXBV_SHIFT) & LTQ_SPI_STAT_RXBV_MASK;
650 + data = ltq_spi_reg_read(hw, LTQ_SPI_RB);
651 +
652 + shift = (rxbv - 1) * 8;
653 + rx8 = hw->rx;
654 +
655 + while (rxbv) {
656 + *rx8++ = (data >> shift) & 0xFF;
657 + rxbv--;
658 + shift -= 8;
659 + hw->rx_cnt++;
660 + hw->rx++;
661 + }
662 +
663 + fifo_fill--;
664 + }
665 +}
666 +
667 +static void ltq_spi_rxreq_set(struct ltq_spi *hw)
668 +{
669 + u32 rxreq, rxreq_max, rxtodo;
670 + u32 fstat, fifo_fill;
671 +
672 + rxtodo = ltq_spi_reg_read(hw, LTQ_SPI_RXCNT) & LTQ_SPI_RXCNT_TODO_MASK;
673 +
674 + /*
675 + * Check if there is remaining data in the FIFO before starting a new
676 + * receive request. The controller might have processed some more data
677 + * since the last FIFO poll.
678 + */
679 + fstat = ltq_spi_reg_read(hw, LTQ_SPI_FSTAT);
680 + fifo_fill = ((fstat >> LTQ_SPI_FSTAT_RXFFL_SHIFT)
681 + & LTQ_SPI_FSTAT_RXFFL_MASK);
682 + if (fifo_fill)
683 + return;
684 +
685 + /*
686 + * In RX-only mode the serial clock is activated only after writing
687 + * the expected amount of RX bytes into RXREQ register.
688 + * To avoid receive overflows at high clocks it is better to request
689 + * only the amount of bytes that fits into all FIFOs. This value
690 + * depends on the FIFO size implemented in hardware.
691 + */
692 + rxreq = hw->len - hw->rx_cnt;
693 + rxreq_max = hw->rxfs << 2;
694 + rxreq = min(rxreq_max, rxreq);
695 +
696 + if (!rxtodo && rxreq)
697 + ltq_spi_reg_write(hw, rxreq, LTQ_SPI_RXREQ);
698 +}
699 +
700 +static inline void ltq_spi_complete(struct ltq_spi *hw)
701 +{
702 + complete(&hw->done);
703 +}
704 +
705 +irqreturn_t ltq_spi_tx_irq(int irq, void *data)
706 +{
707 + struct ltq_spi *hw = data;
708 + unsigned long flags;
709 + int completed = 0;
710 +
711 + spin_lock_irqsave(&hw->lock, flags);
712 +
713 + if (hw->tx_cnt < hw->len)
714 + ltq_spi_txfifo_write(hw);
715 +
716 + if (hw->tx_cnt == hw->len)
717 + completed = 1;
718 +
719 + spin_unlock_irqrestore(&hw->lock, flags);
720 +
721 + if (completed)
722 + ltq_spi_complete(hw);
723 +
724 + return IRQ_HANDLED;
725 +}
726 +
727 +irqreturn_t ltq_spi_rx_irq(int irq, void *data)
728 +{
729 + struct ltq_spi *hw = data;
730 + unsigned long flags;
731 + int completed = 0;
732 +
733 + spin_lock_irqsave(&hw->lock, flags);
734 +
735 + if (hw->rx_cnt < hw->len) {
736 + ltq_spi_rxfifo_read(hw);
737 +
738 + if (hw->tx && hw->tx_cnt < hw->len)
739 + ltq_spi_txfifo_write(hw);
740 + }
741 +
742 + if (hw->rx_cnt == hw->len)
743 + completed = 1;
744 + else if (!hw->tx)
745 + ltq_spi_rxreq_set(hw);
746 +
747 + spin_unlock_irqrestore(&hw->lock, flags);
748 +
749 + if (completed)
750 + ltq_spi_complete(hw);
751 +
752 + return IRQ_HANDLED;
753 +}
754 +
755 +irqreturn_t ltq_spi_err_irq(int irq, void *data)
756 +{
757 + struct ltq_spi *hw = data;
758 + unsigned long flags;
759 +
760 + spin_lock_irqsave(&hw->lock, flags);
761 +
762 + /* Disable all interrupts */
763 + ltq_spi_reg_clearbit(hw, LTQ_SPI_IRNEN_ALL, LTQ_SPI_IRNEN);
764 +
765 + dev_err(hw->dev, "error %x\n", ltq_spi_reg_read(hw, LTQ_SPI_STAT));
766 +
767 + /* Clear all error flags */
768 + ltq_spi_reg_write(hw, LTQ_SPI_WHBSTATE_CLR_ERRORS, LTQ_SPI_WHBSTATE);
769 +
770 + /* Flush FIFOs */
771 + ltq_spi_reg_setbit(hw, LTQ_SPI_RXFCON_RXFLU, LTQ_SPI_RXFCON);
772 + ltq_spi_reg_setbit(hw, LTQ_SPI_TXFCON_TXFLU, LTQ_SPI_TXFCON);
773 +
774 + hw->status = -EIO;
775 + spin_unlock_irqrestore(&hw->lock, flags);
776 +
777 + ltq_spi_complete(hw);
778 +
779 + return IRQ_HANDLED;
780 +}
781 +
782 +static int ltq_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
783 +{
784 + struct ltq_spi *hw = ltq_spi_to_hw(spi);
785 + u32 irq_flags = 0;
786 +
787 + hw->tx = t->tx_buf;
788 + hw->rx = t->rx_buf;
789 + hw->len = t->len;
790 + hw->tx_cnt = 0;
791 + hw->rx_cnt = 0;
792 + hw->status = 0;
793 + init_completion(&hw->done);
794 +
795 + ltq_spi_xmit_set(hw, t);
796 +
797 + /* Enable error interrupts */
798 + ltq_spi_reg_setbit(hw, LTQ_SPI_IRNEN_E, LTQ_SPI_IRNEN);
799 +
800 + if (hw->tx) {
801 + /* Initially fill TX FIFO with as much data as possible */
802 + ltq_spi_txfifo_write(hw);
803 + irq_flags |= hw->irnen_t;
804 +
805 + /* Always enable RX interrupt in Full Duplex mode */
806 + if (hw->rx)
807 + irq_flags |= hw->irnen_r;
808 + } else if (hw->rx) {
809 + /* Start RX clock */
810 + ltq_spi_rxreq_set(hw);
811 +
812 + /* Enable RX interrupt to receive data from RX FIFOs */
813 + irq_flags |= hw->irnen_r;
814 + }
815 +
816 + /* Enable TX or RX interrupts */
817 + ltq_spi_reg_setbit(hw, irq_flags, LTQ_SPI_IRNEN);
818 + wait_for_completion(&hw->done);
819 +
820 + /* Disable all interrupts */
821 + ltq_spi_reg_clearbit(hw, LTQ_SPI_IRNEN_ALL, LTQ_SPI_IRNEN);
822 +
823 + /*
824 + * Return length of current transfer for bitbang utility code if
825 + * no errors occured during transmission.
826 + */
827 + if (!hw->status)
828 + hw->status = hw->len;
829 +
830 + return hw->status;
831 +}
832 +
833 +static const struct ltq_spi_irq_map {
834 + char *name;
835 + irq_handler_t handler;
836 +} ltq_spi_irqs[] = {
837 + { "spi_rx", ltq_spi_rx_irq },
838 + { "spi_tx", ltq_spi_tx_irq },
839 + { "spi_err", ltq_spi_err_irq },
840 +};
841 +
842 +static int ltq_spi_probe(struct platform_device *pdev)
843 +{
844 + struct resource irqres[3];
845 + struct spi_master *master;
846 + struct resource *r;
847 + struct ltq_spi *hw;
848 + int ret, i;
849 + u32 data, id;
850 +
851 + if (of_irq_to_resource_table(pdev->dev.of_node, irqres, 3) != 3) {
852 + dev_err(&pdev->dev, "IRQ settings missing in device tree\n");
853 + return -EINVAL;
854 + }
855 +
856 + master = spi_alloc_master(&pdev->dev, sizeof(struct ltq_spi));
857 + if (!master) {
858 + dev_err(&pdev->dev, "spi_alloc_master\n");
859 + ret = -ENOMEM;
860 + goto err;
861 + }
862 +
863 + hw = spi_master_get_devdata(master);
864 +
865 + r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
866 + if (r == NULL) {
867 + dev_err(&pdev->dev, "platform_get_resource\n");
868 + ret = -ENOENT;
869 + goto err_master;
870 + }
871 +
872 + r = devm_request_mem_region(&pdev->dev, r->start, resource_size(r),
873 + pdev->name);
874 + if (!r) {
875 + dev_err(&pdev->dev, "failed to request memory region\n");
876 + ret = -ENXIO;
877 + goto err_master;
878 + }
879 +
880 + hw->base = devm_ioremap_nocache(&pdev->dev, r->start, resource_size(r));
881 + if (!hw->base) {
882 + dev_err(&pdev->dev, "failed to remap memory region\n");
883 + ret = -ENXIO;
884 + goto err_master;
885 + }
886 +
887 + memset(hw->irq, 0, sizeof(hw->irq));
888 + for (i = 0; i < ARRAY_SIZE(ltq_spi_irqs); i++) {
889 + hw->irq[i] = irqres[i].start;
890 + ret = request_irq(hw->irq[i], ltq_spi_irqs[i].handler,
891 + 0, ltq_spi_irqs[i].name, hw);
892 + if (ret) {
893 + dev_err(&pdev->dev, "failed to request %s irq (%d)\n",
894 + ltq_spi_irqs[i].name, hw->irq[i]);
895 + goto err_irq;
896 + }
897 + }
898 +
899 + hw->fpiclk = clk_get_fpi();
900 + if (IS_ERR(hw->fpiclk)) {
901 + dev_err(&pdev->dev, "failed to get fpi clock\n");
902 + ret = PTR_ERR(hw->fpiclk);
903 + goto err_clk;
904 + }
905 +
906 + hw->spiclk = clk_get(&pdev->dev, NULL);
907 + if (IS_ERR(hw->spiclk)) {
908 + dev_err(&pdev->dev, "failed to get spi clock gate\n");
909 + ret = PTR_ERR(hw->spiclk);
910 + goto err_clk;
911 + }
912 +
913 + hw->bitbang.master = spi_master_get(master);
914 + hw->bitbang.chipselect = ltq_spi_chipselect;
915 + hw->bitbang.setup_transfer = ltq_spi_setup_transfer;
916 + hw->bitbang.txrx_bufs = ltq_spi_txrx_bufs;
917 +
918 + if (of_machine_is_compatible("lantiq,ase")) {
919 + master->num_chipselect = 3;
920 +
921 + hw->irnen_t = LTQ_SPI_IRNEN_T_XWAY;
922 + hw->irnen_r = LTQ_SPI_IRNEN_R_XWAY;
923 + } else {
924 + master->num_chipselect = 6;
925 +
926 + hw->irnen_t = LTQ_SPI_IRNEN_T;
927 + hw->irnen_r = LTQ_SPI_IRNEN_R;
928 + }
929 +
930 + master->bus_num = pdev->id;
931 + master->setup = ltq_spi_setup;
932 + master->cleanup = ltq_spi_cleanup;
933 + master->dev.of_node = pdev->dev.of_node;
934 +
935 + hw->dev = &pdev->dev;
936 + init_completion(&hw->done);
937 + spin_lock_init(&hw->lock);
938 +
939 + ltq_spi_hw_enable(hw);
940 +
941 + /* Read module capabilities */
942 + id = ltq_spi_reg_read(hw, LTQ_SPI_ID);
943 + hw->txfs = (id >> LTQ_SPI_ID_TXFS_SHIFT) & LTQ_SPI_ID_TXFS_MASK;
944 + hw->rxfs = (id >> LTQ_SPI_ID_RXFS_SHIFT) & LTQ_SPI_ID_RXFS_MASK;
945 + hw->dma_support = (id & LTQ_SPI_ID_CFG) ? 1 : 0;
946 +
947 + ltq_spi_config_mode_set(hw);
948 +
949 + /* Enable error checking, disable TX/RX, set idle value high */
950 + data = LTQ_SPI_CON_RUEN | LTQ_SPI_CON_AEN |
951 + LTQ_SPI_CON_TEN | LTQ_SPI_CON_REN |
952 + LTQ_SPI_CON_TXOFF | LTQ_SPI_CON_RXOFF | LTQ_SPI_CON_IDLE;
953 + ltq_spi_reg_write(hw, data, LTQ_SPI_CON);
954 +
955 + /* Enable master mode and clear error flags */
956 + ltq_spi_reg_write(hw, LTQ_SPI_WHBSTATE_SETMS |
957 + LTQ_SPI_WHBSTATE_CLR_ERRORS, LTQ_SPI_WHBSTATE);
958 +
959 + /* Reset GPIO/CS registers */
960 + ltq_spi_reg_write(hw, 0x0, LTQ_SPI_GPOCON);
961 + ltq_spi_reg_write(hw, 0xFF00, LTQ_SPI_FGPO);
962 +
963 + /* Enable and flush FIFOs */
964 + ltq_spi_reset_fifos(hw);
965 +
966 + ret = spi_bitbang_start(&hw->bitbang);
967 + if (ret) {
968 + dev_err(&pdev->dev, "spi_bitbang_start failed\n");
969 + goto err_bitbang;
970 + }
971 +
972 + platform_set_drvdata(pdev, hw);
973 +
974 + pr_info("Lantiq SoC SPI controller rev %u (TXFS %u, RXFS %u, DMA %u)\n",
975 + id & LTQ_SPI_ID_REV_MASK, hw->txfs, hw->rxfs, hw->dma_support);
976 +
977 + return 0;
978 +
979 +err_bitbang:
980 + ltq_spi_hw_disable(hw);
981 +
982 +err_clk:
983 + if (hw->fpiclk)
984 + clk_put(hw->fpiclk);
985 + if (hw->spiclk)
986 + clk_put(hw->spiclk);
987 +
988 +err_irq:
989 + clk_put(hw->fpiclk);
990 +
991 + for (; i > 0; i--)
992 + free_irq(hw->irq[i], hw);
993 +
994 +err_master:
995 + spi_master_put(master);
996 +
997 +err:
998 + return ret;
999 +}
1000 +
1001 +static int ltq_spi_remove(struct platform_device *pdev)
1002 +{
1003 + struct ltq_spi *hw = platform_get_drvdata(pdev);
1004 + int i;
1005 +
1006 + spi_bitbang_stop(&hw->bitbang);
1007 +
1008 + platform_set_drvdata(pdev, NULL);
1009 +
1010 + ltq_spi_config_mode_set(hw);
1011 + ltq_spi_hw_disable(hw);
1012 +
1013 + for (i = 0; i < ARRAY_SIZE(hw->irq); i++)
1014 + if (0 < hw->irq[i])
1015 + free_irq(hw->irq[i], hw);
1016 +
1017 + if (hw->fpiclk)
1018 + clk_put(hw->fpiclk);
1019 + if (hw->spiclk)
1020 + clk_put(hw->spiclk);
1021 +
1022 + spi_master_put(hw->bitbang.master);
1023 +
1024 + return 0;
1025 +}
1026 +
1027 +static const struct of_device_id ltq_spi_match[] = {
1028 + { .compatible = "lantiq,spi-xway" },
1029 + {},
1030 +};
1031 +MODULE_DEVICE_TABLE(of, ltq_spi_match);
1032 +
1033 +static struct platform_driver ltq_spi_driver = {
1034 + .probe = ltq_spi_probe,
1035 + .remove = ltq_spi_remove,
1036 + .driver = {
1037 + .name = "spi-xway",
1038 + .owner = THIS_MODULE,
1039 + .of_match_table = ltq_spi_match,
1040 + },
1041 +};
1042 +
1043 +module_platform_driver(ltq_spi_driver);
1044 +
1045 +MODULE_DESCRIPTION("Lantiq SoC SPI controller driver");
1046 +MODULE_AUTHOR("Daniel Schwierzeck <daniel.schwierzeck@googlemail.com>");
1047 +MODULE_LICENSE("GPL");
1048 +MODULE_ALIAS("platform:spi-xway");