kernel: update linux 3.2 to 3.2.13 and refresh patches
[openwrt/staging/mkresin.git] / target / linux / lantiq / patches-3.2 / 0042-SPI-MIPS-lantiq-adds-spi-xway.patch
1 From b257baf20b44e97770a2654a07f196fcbcd46e92 Mon Sep 17 00:00:00 2001
2 From: John Crispin <blogic@openwrt.org>
3 Date: Mon, 10 Oct 2011 22:29:13 +0200
4 Subject: [PATCH 42/70] SPI: MIPS: lantiq: adds spi xway
5
6 ---
7 .../mips/include/asm/mach-lantiq/lantiq_platform.h | 9 +
8 .../mips/include/asm/mach-lantiq/xway/lantiq_irq.h | 2 +
9 drivers/spi/Kconfig | 8 +
10 drivers/spi/Makefile | 1 +
11 drivers/spi/spi-xway.c | 1068 ++++++++++++++++++++
12 5 files changed, 1088 insertions(+), 0 deletions(-)
13 create mode 100644 drivers/spi/spi-xway.c
14
15 --- a/arch/mips/include/asm/mach-lantiq/lantiq_platform.h
16 +++ b/arch/mips/include/asm/mach-lantiq/lantiq_platform.h
17 @@ -50,4 +50,13 @@ struct ltq_eth_data {
18 int mii_mode;
19 };
20
21 +
22 +struct ltq_spi_platform_data {
23 + u16 num_chipselect;
24 +};
25 +
26 +struct ltq_spi_controller_data {
27 + unsigned gpio;
28 +};
29 +
30 #endif
31 --- a/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h
32 +++ b/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h
33 @@ -27,6 +27,8 @@
34
35 #define LTQ_SSC_TIR (INT_NUM_IM0_IRL0 + 15)
36 #define LTQ_SSC_RIR (INT_NUM_IM0_IRL0 + 14)
37 +#define LTQ_SSC_TIR_AR9 (INT_NUM_IM0_IRL0 + 14)
38 +#define LTQ_SSC_RIR_AR9 (INT_NUM_IM0_IRL0 + 15)
39 #define LTQ_SSC_EIR (INT_NUM_IM0_IRL0 + 16)
40
41 #define LTQ_MEI_DYING_GASP_INT (INT_NUM_IM1_IRL0 + 21)
42 --- a/drivers/spi/Kconfig
43 +++ b/drivers/spi/Kconfig
44 @@ -393,6 +393,14 @@ config SPI_NUC900
45 help
46 SPI driver for Nuvoton NUC900 series ARM SoCs
47
48 +config SPI_XWAY
49 + tristate "Lantiq XWAY SPI controller"
50 + depends on LANTIQ && SOC_TYPE_XWAY
51 + select SPI_BITBANG
52 + help
53 + This driver supports the Lantiq SoC SPI controller in master
54 + mode.
55 +
56 #
57 # Add new SPI master controllers in alphabetical order above this line
58 #
59 --- a/drivers/spi/Makefile
60 +++ b/drivers/spi/Makefile
61 @@ -60,4 +60,5 @@ obj-$(CONFIG_SPI_TLE62X0) += spi-tle62x
62 obj-$(CONFIG_SPI_TOPCLIFF_PCH) += spi-topcliff-pch.o
63 obj-$(CONFIG_SPI_TXX9) += spi-txx9.o
64 obj-$(CONFIG_SPI_XILINX) += spi-xilinx.o
65 +obj-$(CONFIG_SPI_XWAY) += spi-xway.o
66
67 --- /dev/null
68 +++ b/drivers/spi/spi-xway.c
69 @@ -0,0 +1,1068 @@
70 +/*
71 + * Lantiq SoC SPI controller
72 + *
73 + * Copyright (C) 2011 Daniel Schwierzeck <daniel.schwierzeck@googlemail.com>
74 + *
75 + * This program is free software; you can distribute it and/or modify it
76 + * under the terms of the GNU General Public License (Version 2) as
77 + * published by the Free Software Foundation.
78 + */
79 +
80 +#include <linux/init.h>
81 +#include <linux/module.h>
82 +#include <linux/workqueue.h>
83 +#include <linux/platform_device.h>
84 +#include <linux/io.h>
85 +#include <linux/sched.h>
86 +#include <linux/delay.h>
87 +#include <linux/interrupt.h>
88 +#include <linux/completion.h>
89 +#include <linux/spinlock.h>
90 +#include <linux/err.h>
91 +#include <linux/clk.h>
92 +#include <linux/gpio.h>
93 +#include <linux/spi/spi.h>
94 +#include <linux/spi/spi_bitbang.h>
95 +
96 +#include <lantiq_soc.h>
97 +#include <lantiq_platform.h>
98 +
99 +#define LTQ_SPI_CLC 0x00 /* Clock control */
100 +#define LTQ_SPI_PISEL 0x04 /* Port input select */
101 +#define LTQ_SPI_ID 0x08 /* Identification */
102 +#define LTQ_SPI_CON 0x10 /* Control */
103 +#define LTQ_SPI_STAT 0x14 /* Status */
104 +#define LTQ_SPI_WHBSTATE 0x18 /* Write HW modified state */
105 +#define LTQ_SPI_TB 0x20 /* Transmit buffer */
106 +#define LTQ_SPI_RB 0x24 /* Receive buffer */
107 +#define LTQ_SPI_RXFCON 0x30 /* Receive FIFO control */
108 +#define LTQ_SPI_TXFCON 0x34 /* Transmit FIFO control */
109 +#define LTQ_SPI_FSTAT 0x38 /* FIFO status */
110 +#define LTQ_SPI_BRT 0x40 /* Baudrate timer */
111 +#define LTQ_SPI_BRSTAT 0x44 /* Baudrate timer status */
112 +#define LTQ_SPI_SFCON 0x60 /* Serial frame control */
113 +#define LTQ_SPI_SFSTAT 0x64 /* Serial frame status */
114 +#define LTQ_SPI_GPOCON 0x70 /* General purpose output control */
115 +#define LTQ_SPI_GPOSTAT 0x74 /* General purpose output status */
116 +#define LTQ_SPI_FGPO 0x78 /* Forced general purpose output */
117 +#define LTQ_SPI_RXREQ 0x80 /* Receive request */
118 +#define LTQ_SPI_RXCNT 0x84 /* Receive count */
119 +#define LTQ_SPI_DMACON 0xEC /* DMA control */
120 +#define LTQ_SPI_IRNEN 0xF4 /* Interrupt node enable */
121 +#define LTQ_SPI_IRNICR 0xF8 /* Interrupt node interrupt capture */
122 +#define LTQ_SPI_IRNCR 0xFC /* Interrupt node control */
123 +
124 +#define LTQ_SPI_CLC_SMC_SHIFT 16 /* Clock divider for sleep mode */
125 +#define LTQ_SPI_CLC_SMC_MASK 0xFF
126 +#define LTQ_SPI_CLC_RMC_SHIFT 8 /* Clock divider for normal run mode */
127 +#define LTQ_SPI_CLC_RMC_MASK 0xFF
128 +#define LTQ_SPI_CLC_DISS BIT(1) /* Disable status bit */
129 +#define LTQ_SPI_CLC_DISR BIT(0) /* Disable request bit */
130 +
131 +#define LTQ_SPI_ID_TXFS_SHIFT 24 /* Implemented TX FIFO size */
132 +#define LTQ_SPI_ID_TXFS_MASK 0x3F
133 +#define LTQ_SPI_ID_RXFS_SHIFT 16 /* Implemented RX FIFO size */
134 +#define LTQ_SPI_ID_RXFS_MASK 0x3F
135 +#define LTQ_SPI_ID_REV_MASK 0x1F /* Hardware revision number */
136 +#define LTQ_SPI_ID_CFG BIT(5) /* DMA interface support */
137 +
138 +#define LTQ_SPI_CON_BM_SHIFT 16 /* Data width selection */
139 +#define LTQ_SPI_CON_BM_MASK 0x1F
140 +#define LTQ_SPI_CON_EM BIT(24) /* Echo mode */
141 +#define LTQ_SPI_CON_IDLE BIT(23) /* Idle bit value */
142 +#define LTQ_SPI_CON_ENBV BIT(22) /* Enable byte valid control */
143 +#define LTQ_SPI_CON_RUEN BIT(12) /* Receive underflow error enable */
144 +#define LTQ_SPI_CON_TUEN BIT(11) /* Transmit underflow error enable */
145 +#define LTQ_SPI_CON_AEN BIT(10) /* Abort error enable */
146 +#define LTQ_SPI_CON_REN BIT(9) /* Receive overflow error enable */
147 +#define LTQ_SPI_CON_TEN BIT(8) /* Transmit overflow error enable */
148 +#define LTQ_SPI_CON_LB BIT(7) /* Loopback control */
149 +#define LTQ_SPI_CON_PO BIT(6) /* Clock polarity control */
150 +#define LTQ_SPI_CON_PH BIT(5) /* Clock phase control */
151 +#define LTQ_SPI_CON_HB BIT(4) /* Heading control */
152 +#define LTQ_SPI_CON_RXOFF BIT(1) /* Switch receiver off */
153 +#define LTQ_SPI_CON_TXOFF BIT(0) /* Switch transmitter off */
154 +
155 +#define LTQ_SPI_STAT_RXBV_MASK 0x7
156 +#define LTQ_SPI_STAT_RXBV_SHIFT 28
157 +#define LTQ_SPI_STAT_BSY BIT(13) /* Busy flag */
158 +#define LTQ_SPI_STAT_RUE BIT(12) /* Receive underflow error flag */
159 +#define LTQ_SPI_STAT_TUE BIT(11) /* Transmit underflow error flag */
160 +#define LTQ_SPI_STAT_AE BIT(10) /* Abort error flag */
161 +#define LTQ_SPI_STAT_RE BIT(9) /* Receive error flag */
162 +#define LTQ_SPI_STAT_TE BIT(8) /* Transmit error flag */
163 +#define LTQ_SPI_STAT_MS BIT(1) /* Master/slave select bit */
164 +#define LTQ_SPI_STAT_EN BIT(0) /* Enable bit */
165 +
166 +#define LTQ_SPI_WHBSTATE_SETTUE BIT(15) /* Set transmit underflow error flag */
167 +#define LTQ_SPI_WHBSTATE_SETAE BIT(14) /* Set abort error flag */
168 +#define LTQ_SPI_WHBSTATE_SETRE BIT(13) /* Set receive error flag */
169 +#define LTQ_SPI_WHBSTATE_SETTE BIT(12) /* Set transmit error flag */
170 +#define LTQ_SPI_WHBSTATE_CLRTUE BIT(11) /* Clear transmit underflow error flag */
171 +#define LTQ_SPI_WHBSTATE_CLRAE BIT(10) /* Clear abort error flag */
172 +#define LTQ_SPI_WHBSTATE_CLRRE BIT(9) /* Clear receive error flag */
173 +#define LTQ_SPI_WHBSTATE_CLRTE BIT(8) /* Clear transmit error flag */
174 +#define LTQ_SPI_WHBSTATE_SETME BIT(7) /* Set mode error flag */
175 +#define LTQ_SPI_WHBSTATE_CLRME BIT(6) /* Clear mode error flag */
176 +#define LTQ_SPI_WHBSTATE_SETRUE BIT(5) /* Set receive underflow error flag */
177 +#define LTQ_SPI_WHBSTATE_CLRRUE BIT(4) /* Clear receive underflow error flag */
178 +#define LTQ_SPI_WHBSTATE_SETMS BIT(3) /* Set master select bit */
179 +#define LTQ_SPI_WHBSTATE_CLRMS BIT(2) /* Clear master select bit */
180 +#define LTQ_SPI_WHBSTATE_SETEN BIT(1) /* Set enable bit (operational mode) */
181 +#define LTQ_SPI_WHBSTATE_CLREN BIT(0) /* Clear enable bit (config mode */
182 +#define LTQ_SPI_WHBSTATE_CLR_ERRORS 0x0F50
183 +
184 +#define LTQ_SPI_RXFCON_RXFITL_SHIFT 8 /* FIFO interrupt trigger level */
185 +#define LTQ_SPI_RXFCON_RXFITL_MASK 0x3F
186 +#define LTQ_SPI_RXFCON_RXFLU BIT(1) /* FIFO flush */
187 +#define LTQ_SPI_RXFCON_RXFEN BIT(0) /* FIFO enable */
188 +
189 +#define LTQ_SPI_TXFCON_TXFITL_SHIFT 8 /* FIFO interrupt trigger level */
190 +#define LTQ_SPI_TXFCON_TXFITL_MASK 0x3F
191 +#define LTQ_SPI_TXFCON_TXFLU BIT(1) /* FIFO flush */
192 +#define LTQ_SPI_TXFCON_TXFEN BIT(0) /* FIFO enable */
193 +
194 +#define LTQ_SPI_FSTAT_RXFFL_MASK 0x3f
195 +#define LTQ_SPI_FSTAT_RXFFL_SHIFT 0
196 +#define LTQ_SPI_FSTAT_TXFFL_MASK 0x3f
197 +#define LTQ_SPI_FSTAT_TXFFL_SHIFT 8
198 +
199 +#define LTQ_SPI_GPOCON_ISCSBN_SHIFT 8
200 +#define LTQ_SPI_GPOCON_INVOUTN_SHIFT 0
201 +
202 +#define LTQ_SPI_FGPO_SETOUTN_SHIFT 8
203 +#define LTQ_SPI_FGPO_CLROUTN_SHIFT 0
204 +
205 +#define LTQ_SPI_RXREQ_RXCNT_MASK 0xFFFF /* Receive count value */
206 +#define LTQ_SPI_RXCNT_TODO_MASK 0xFFFF /* Recevie to-do value */
207 +
208 +#define LTQ_SPI_IRNEN_F BIT(3) /* Frame end interrupt request */
209 +#define LTQ_SPI_IRNEN_E BIT(2) /* Error end interrupt request */
210 +#define LTQ_SPI_IRNEN_T BIT(1) /* Transmit end interrupt request */
211 +#define LTQ_SPI_IRNEN_R BIT(0) /* Receive end interrupt request */
212 +#define LTQ_SPI_IRNEN_ALL 0xF
213 +
214 +/* Hard-wired GPIOs used by SPI controller */
215 +#define LTQ_SPI_GPIO_DI 16
216 +#define LTQ_SPI_GPIO_DO 17
217 +#define LTQ_SPI_GPIO_CLK 18
218 +
219 +struct ltq_spi {
220 + struct spi_bitbang bitbang;
221 + struct completion done;
222 + spinlock_t lock;
223 +
224 + struct device *dev;
225 + void __iomem *base;
226 + struct clk *fpiclk;
227 + struct clk *spiclk;
228 +
229 + int status;
230 + int irq[3];
231 +
232 + const u8 *tx;
233 + u8 *rx;
234 + u32 tx_cnt;
235 + u32 rx_cnt;
236 + u32 len;
237 + struct spi_transfer *curr_transfer;
238 +
239 + u32 (*get_tx) (struct ltq_spi *);
240 +
241 + u16 txfs;
242 + u16 rxfs;
243 + unsigned dma_support:1;
244 + unsigned cfg_mode:1;
245 +
246 +};
247 +
248 +struct ltq_spi_controller_state {
249 + void (*cs_activate) (struct spi_device *);
250 + void (*cs_deactivate) (struct spi_device *);
251 +};
252 +
253 +struct ltq_spi_irq_map {
254 + char *name;
255 + irq_handler_t handler;
256 +};
257 +
258 +struct ltq_spi_cs_gpio_map {
259 + unsigned gpio;
260 + unsigned mux;
261 +};
262 +
263 +static inline struct ltq_spi *ltq_spi_to_hw(struct spi_device *spi)
264 +{
265 + return spi_master_get_devdata(spi->master);
266 +}
267 +
268 +static inline u32 ltq_spi_reg_read(struct ltq_spi *hw, u32 reg)
269 +{
270 + return ioread32be(hw->base + reg);
271 +}
272 +
273 +static inline void ltq_spi_reg_write(struct ltq_spi *hw, u32 val, u32 reg)
274 +{
275 + iowrite32be(val, hw->base + reg);
276 +}
277 +
278 +static inline void ltq_spi_reg_setbit(struct ltq_spi *hw, u32 bits, u32 reg)
279 +{
280 + u32 val;
281 +
282 + val = ltq_spi_reg_read(hw, reg);
283 + val |= bits;
284 + ltq_spi_reg_write(hw, val, reg);
285 +}
286 +
287 +static inline void ltq_spi_reg_clearbit(struct ltq_spi *hw, u32 bits, u32 reg)
288 +{
289 + u32 val;
290 +
291 + val = ltq_spi_reg_read(hw, reg);
292 + val &= ~bits;
293 + ltq_spi_reg_write(hw, val, reg);
294 +}
295 +
296 +static void ltq_spi_hw_enable(struct ltq_spi *hw)
297 +{
298 + u32 clc;
299 +
300 + /* Power-up mdule */
301 + clk_enable(hw->spiclk);
302 +
303 + /*
304 + * Set clock divider for run mode to 1 to
305 + * run at same frequency as FPI bus
306 + */
307 + clc = (1 << LTQ_SPI_CLC_RMC_SHIFT);
308 + ltq_spi_reg_write(hw, clc, LTQ_SPI_CLC);
309 +}
310 +
311 +static void ltq_spi_hw_disable(struct ltq_spi *hw)
312 +{
313 + /* Set clock divider to 0 and set module disable bit */
314 + ltq_spi_reg_write(hw, LTQ_SPI_CLC_DISS, LTQ_SPI_CLC);
315 +
316 + /* Power-down mdule */
317 + clk_disable(hw->spiclk);
318 +}
319 +
320 +static void ltq_spi_reset_fifos(struct ltq_spi *hw)
321 +{
322 + u32 val;
323 +
324 + /*
325 + * Enable and flush FIFOs. Set interrupt trigger level to
326 + * half of FIFO count implemented in hardware.
327 + */
328 + if (hw->txfs > 1) {
329 + val = hw->txfs << (LTQ_SPI_TXFCON_TXFITL_SHIFT - 1);
330 + val |= LTQ_SPI_TXFCON_TXFEN | LTQ_SPI_TXFCON_TXFLU;
331 + ltq_spi_reg_write(hw, val, LTQ_SPI_TXFCON);
332 + }
333 +
334 + if (hw->rxfs > 1) {
335 + val = hw->rxfs << (LTQ_SPI_RXFCON_RXFITL_SHIFT - 1);
336 + val |= LTQ_SPI_RXFCON_RXFEN | LTQ_SPI_RXFCON_RXFLU;
337 + ltq_spi_reg_write(hw, val, LTQ_SPI_RXFCON);
338 + }
339 +}
340 +
341 +static inline int ltq_spi_wait_ready(struct ltq_spi *hw)
342 +{
343 + u32 stat;
344 + unsigned long timeout;
345 +
346 + timeout = jiffies + msecs_to_jiffies(200);
347 +
348 + do {
349 + stat = ltq_spi_reg_read(hw, LTQ_SPI_STAT);
350 + if (!(stat & LTQ_SPI_STAT_BSY))
351 + return 0;
352 +
353 + cond_resched();
354 + } while (!time_after_eq(jiffies, timeout));
355 +
356 + dev_err(hw->dev, "SPI wait ready timed out\n");
357 +
358 + return -ETIMEDOUT;
359 +}
360 +
361 +static void ltq_spi_config_mode_set(struct ltq_spi *hw)
362 +{
363 + if (hw->cfg_mode)
364 + return;
365 +
366 + /*
367 + * Putting the SPI module in config mode is only safe if no
368 + * transfer is in progress as indicated by busy flag STATE.BSY.
369 + */
370 + if (ltq_spi_wait_ready(hw)) {
371 + ltq_spi_reset_fifos(hw);
372 + hw->status = -ETIMEDOUT;
373 + }
374 + ltq_spi_reg_write(hw, LTQ_SPI_WHBSTATE_CLREN, LTQ_SPI_WHBSTATE);
375 +
376 + hw->cfg_mode = 1;
377 +}
378 +
379 +static void ltq_spi_run_mode_set(struct ltq_spi *hw)
380 +{
381 + if (!hw->cfg_mode)
382 + return;
383 +
384 + ltq_spi_reg_write(hw, LTQ_SPI_WHBSTATE_SETEN, LTQ_SPI_WHBSTATE);
385 +
386 + hw->cfg_mode = 0;
387 +}
388 +
389 +static u32 ltq_spi_tx_word_u8(struct ltq_spi *hw)
390 +{
391 + const u8 *tx = hw->tx;
392 + u32 data = *tx++;
393 +
394 + hw->tx_cnt++;
395 + hw->tx++;
396 +
397 + return data;
398 +}
399 +
400 +static u32 ltq_spi_tx_word_u16(struct ltq_spi *hw)
401 +{
402 + const u16 *tx = (u16 *) hw->tx;
403 + u32 data = *tx++;
404 +
405 + hw->tx_cnt += 2;
406 + hw->tx += 2;
407 +
408 + return data;
409 +}
410 +
411 +static u32 ltq_spi_tx_word_u32(struct ltq_spi *hw)
412 +{
413 + const u32 *tx = (u32 *) hw->tx;
414 + u32 data = *tx++;
415 +
416 + hw->tx_cnt += 4;
417 + hw->tx += 4;
418 +
419 + return data;
420 +}
421 +
422 +static void ltq_spi_bits_per_word_set(struct spi_device *spi)
423 +{
424 + struct ltq_spi *hw = ltq_spi_to_hw(spi);
425 + u32 bm;
426 + u8 bits_per_word = spi->bits_per_word;
427 +
428 + /*
429 + * Use either default value of SPI device or value
430 + * from current transfer.
431 + */
432 + if (hw->curr_transfer && hw->curr_transfer->bits_per_word)
433 + bits_per_word = hw->curr_transfer->bits_per_word;
434 +
435 + if (bits_per_word <= 8)
436 + hw->get_tx = ltq_spi_tx_word_u8;
437 + else if (bits_per_word <= 16)
438 + hw->get_tx = ltq_spi_tx_word_u16;
439 + else if (bits_per_word <= 32)
440 + hw->get_tx = ltq_spi_tx_word_u32;
441 +
442 + /* CON.BM value = bits_per_word - 1 */
443 + bm = (bits_per_word - 1) << LTQ_SPI_CON_BM_SHIFT;
444 +
445 + ltq_spi_reg_clearbit(hw, LTQ_SPI_CON_BM_MASK <<
446 + LTQ_SPI_CON_BM_SHIFT, LTQ_SPI_CON);
447 + ltq_spi_reg_setbit(hw, bm, LTQ_SPI_CON);
448 +}
449 +
450 +static void ltq_spi_speed_set(struct spi_device *spi)
451 +{
452 + struct ltq_spi *hw = ltq_spi_to_hw(spi);
453 + u32 br, max_speed_hz, spi_clk;
454 + u32 speed_hz = spi->max_speed_hz;
455 +
456 + /*
457 + * Use either default value of SPI device or value
458 + * from current transfer.
459 + */
460 + if (hw->curr_transfer && hw->curr_transfer->speed_hz)
461 + speed_hz = hw->curr_transfer->speed_hz;
462 +
463 + /*
464 + * SPI module clock is derived from FPI bus clock dependent on
465 + * divider value in CLC.RMS which is always set to 1.
466 + */
467 + spi_clk = clk_get_rate(hw->fpiclk);
468 +
469 + /*
470 + * Maximum SPI clock frequency in master mode is half of
471 + * SPI module clock frequency. Maximum reload value of
472 + * baudrate generator BR is 2^16.
473 + */
474 + max_speed_hz = spi_clk / 2;
475 + if (speed_hz >= max_speed_hz)
476 + br = 0;
477 + else
478 + br = (max_speed_hz / speed_hz) - 1;
479 +
480 + if (br > 0xFFFF)
481 + br = 0xFFFF;
482 +
483 + ltq_spi_reg_write(hw, br, LTQ_SPI_BRT);
484 +}
485 +
486 +static void ltq_spi_clockmode_set(struct spi_device *spi)
487 +{
488 + struct ltq_spi *hw = ltq_spi_to_hw(spi);
489 + u32 con;
490 +
491 + con = ltq_spi_reg_read(hw, LTQ_SPI_CON);
492 +
493 + /*
494 + * SPI mode mapping in CON register:
495 + * Mode CPOL CPHA CON.PO CON.PH
496 + * 0 0 0 0 1
497 + * 1 0 1 0 0
498 + * 2 1 0 1 1
499 + * 3 1 1 1 0
500 + */
501 + if (spi->mode & SPI_CPHA)
502 + con &= ~LTQ_SPI_CON_PH;
503 + else
504 + con |= LTQ_SPI_CON_PH;
505 +
506 + if (spi->mode & SPI_CPOL)
507 + con |= LTQ_SPI_CON_PO;
508 + else
509 + con &= ~LTQ_SPI_CON_PO;
510 +
511 + /* Set heading control */
512 + if (spi->mode & SPI_LSB_FIRST)
513 + con &= ~LTQ_SPI_CON_HB;
514 + else
515 + con |= LTQ_SPI_CON_HB;
516 +
517 + ltq_spi_reg_write(hw, con, LTQ_SPI_CON);
518 +}
519 +
520 +static void ltq_spi_xmit_set(struct ltq_spi *hw, struct spi_transfer *t)
521 +{
522 + u32 con;
523 +
524 + con = ltq_spi_reg_read(hw, LTQ_SPI_CON);
525 +
526 + if (t) {
527 + if (t->tx_buf && t->rx_buf) {
528 + con &= ~(LTQ_SPI_CON_TXOFF | LTQ_SPI_CON_RXOFF);
529 + } else if (t->rx_buf) {
530 + con &= ~LTQ_SPI_CON_RXOFF;
531 + con |= LTQ_SPI_CON_TXOFF;
532 + } else if (t->tx_buf) {
533 + con &= ~LTQ_SPI_CON_TXOFF;
534 + con |= LTQ_SPI_CON_RXOFF;
535 + }
536 + } else
537 + con |= (LTQ_SPI_CON_TXOFF | LTQ_SPI_CON_RXOFF);
538 +
539 + ltq_spi_reg_write(hw, con, LTQ_SPI_CON);
540 +}
541 +
542 +static void ltq_spi_gpio_cs_activate(struct spi_device *spi)
543 +{
544 + struct ltq_spi_controller_data *cdata = spi->controller_data;
545 + int val = spi->mode & SPI_CS_HIGH ? 1 : 0;
546 +
547 + gpio_set_value(cdata->gpio, val);
548 +}
549 +
550 +static void ltq_spi_gpio_cs_deactivate(struct spi_device *spi)
551 +{
552 + struct ltq_spi_controller_data *cdata = spi->controller_data;
553 + int val = spi->mode & SPI_CS_HIGH ? 0 : 1;
554 +
555 + gpio_set_value(cdata->gpio, val);
556 +}
557 +
558 +static void ltq_spi_internal_cs_activate(struct spi_device *spi)
559 +{
560 + struct ltq_spi *hw = ltq_spi_to_hw(spi);
561 + u32 fgpo;
562 +
563 + fgpo = (1 << (spi->chip_select + LTQ_SPI_FGPO_CLROUTN_SHIFT));
564 + ltq_spi_reg_setbit(hw, fgpo, LTQ_SPI_FGPO);
565 +}
566 +
567 +static void ltq_spi_internal_cs_deactivate(struct spi_device *spi)
568 +{
569 + struct ltq_spi *hw = ltq_spi_to_hw(spi);
570 + u32 fgpo;
571 +
572 + fgpo = (1 << (spi->chip_select + LTQ_SPI_FGPO_SETOUTN_SHIFT));
573 + ltq_spi_reg_setbit(hw, fgpo, LTQ_SPI_FGPO);
574 +}
575 +
576 +static void ltq_spi_chipselect(struct spi_device *spi, int cs)
577 +{
578 + struct ltq_spi *hw = ltq_spi_to_hw(spi);
579 + struct ltq_spi_controller_state *cstate = spi->controller_state;
580 +
581 + switch (cs) {
582 + case BITBANG_CS_ACTIVE:
583 + ltq_spi_bits_per_word_set(spi);
584 + ltq_spi_speed_set(spi);
585 + ltq_spi_clockmode_set(spi);
586 + ltq_spi_run_mode_set(hw);
587 +
588 + cstate->cs_activate(spi);
589 + break;
590 +
591 + case BITBANG_CS_INACTIVE:
592 + cstate->cs_deactivate(spi);
593 +
594 + ltq_spi_config_mode_set(hw);
595 +
596 + break;
597 + }
598 +}
599 +
600 +static int ltq_spi_setup_transfer(struct spi_device *spi,
601 + struct spi_transfer *t)
602 +{
603 + struct ltq_spi *hw = ltq_spi_to_hw(spi);
604 + u8 bits_per_word = spi->bits_per_word;
605 +
606 + hw->curr_transfer = t;
607 +
608 + if (t && t->bits_per_word)
609 + bits_per_word = t->bits_per_word;
610 +
611 + if (bits_per_word > 32)
612 + return -EINVAL;
613 +
614 + ltq_spi_config_mode_set(hw);
615 +
616 + return 0;
617 +}
618 +
619 +static const struct ltq_spi_cs_gpio_map ltq_spi_cs[] = {
620 + { 15, 2 },
621 + { 22, 2 },
622 + { 13, 1 },
623 + { 10, 1 },
624 + { 9, 1 },
625 + { 11, 3 },
626 +};
627 +
628 +static int ltq_spi_setup(struct spi_device *spi)
629 +{
630 + struct ltq_spi *hw = ltq_spi_to_hw(spi);
631 + struct ltq_spi_controller_data *cdata = spi->controller_data;
632 + struct ltq_spi_controller_state *cstate;
633 + u32 gpocon, fgpo;
634 + int ret;
635 +
636 + /* Set default word length to 8 if not set */
637 + if (!spi->bits_per_word)
638 + spi->bits_per_word = 8;
639 +
640 + if (spi->bits_per_word > 32)
641 + return -EINVAL;
642 +
643 + if (!spi->controller_state) {
644 + cstate = kzalloc(sizeof(struct ltq_spi_controller_state),
645 + GFP_KERNEL);
646 + if (!cstate)
647 + return -ENOMEM;
648 +
649 + spi->controller_state = cstate;
650 + } else
651 + return 0;
652 +
653 + /*
654 + * Up to six GPIOs can be connected to the SPI module
655 + * via GPIO alternate function to control the chip select lines.
656 + * For more flexibility in board layout this driver can also control
657 + * the CS lines via GPIO API. If GPIOs should be used, board setup code
658 + * have to register the SPI device with struct ltq_spi_controller_data
659 + * attached.
660 + */
661 + if (cdata && cdata->gpio) {
662 + ret = gpio_request(cdata->gpio, "spi-cs");
663 + if (ret)
664 + return -EBUSY;
665 +
666 + ret = spi->mode & SPI_CS_HIGH ? 0 : 1;
667 + gpio_direction_output(cdata->gpio, ret);
668 +
669 + cstate->cs_activate = ltq_spi_gpio_cs_activate;
670 + cstate->cs_deactivate = ltq_spi_gpio_cs_deactivate;
671 + } else {
672 + ret = ltq_gpio_request(&spi->dev, ltq_spi_cs[spi->chip_select].gpio,
673 + ltq_spi_cs[spi->chip_select].mux,
674 + 1, "spi-cs");
675 + if (ret)
676 + return -EBUSY;
677 +
678 + gpocon = (1 << (spi->chip_select +
679 + LTQ_SPI_GPOCON_ISCSBN_SHIFT));
680 +
681 + if (spi->mode & SPI_CS_HIGH)
682 + gpocon |= (1 << spi->chip_select);
683 +
684 + fgpo = (1 << (spi->chip_select + LTQ_SPI_FGPO_SETOUTN_SHIFT));
685 +
686 + ltq_spi_reg_setbit(hw, gpocon, LTQ_SPI_GPOCON);
687 + ltq_spi_reg_setbit(hw, fgpo, LTQ_SPI_FGPO);
688 +
689 + cstate->cs_activate = ltq_spi_internal_cs_activate;
690 + cstate->cs_deactivate = ltq_spi_internal_cs_deactivate;
691 + }
692 +
693 + return 0;
694 +}
695 +
696 +static void ltq_spi_cleanup(struct spi_device *spi)
697 +{
698 + struct ltq_spi_controller_data *cdata = spi->controller_data;
699 + struct ltq_spi_controller_state *cstate = spi->controller_state;
700 + unsigned gpio;
701 +
702 + if (cdata && cdata->gpio)
703 + gpio = cdata->gpio;
704 + else
705 + gpio = ltq_spi_cs[spi->chip_select].gpio;
706 +
707 + gpio_free(gpio);
708 + kfree(cstate);
709 +}
710 +
711 +static void ltq_spi_txfifo_write(struct ltq_spi *hw)
712 +{
713 + u32 fstat, data;
714 + u16 fifo_space;
715 +
716 + /* Determine how much FIFOs are free for TX data */
717 + fstat = ltq_spi_reg_read(hw, LTQ_SPI_FSTAT);
718 + fifo_space = hw->txfs - ((fstat >> LTQ_SPI_FSTAT_TXFFL_SHIFT) &
719 + LTQ_SPI_FSTAT_TXFFL_MASK);
720 +
721 + if (!fifo_space)
722 + return;
723 +
724 + while (hw->tx_cnt < hw->len && fifo_space) {
725 + data = hw->get_tx(hw);
726 + ltq_spi_reg_write(hw, data, LTQ_SPI_TB);
727 + fifo_space--;
728 + }
729 +}
730 +
731 +static void ltq_spi_rxfifo_read(struct ltq_spi *hw)
732 +{
733 + u32 fstat, data, *rx32;
734 + u16 fifo_fill;
735 + u8 rxbv, shift, *rx8;
736 +
737 + /* Determine how much FIFOs are filled with RX data */
738 + fstat = ltq_spi_reg_read(hw, LTQ_SPI_FSTAT);
739 + fifo_fill = ((fstat >> LTQ_SPI_FSTAT_RXFFL_SHIFT)
740 + & LTQ_SPI_FSTAT_RXFFL_MASK);
741 +
742 + if (!fifo_fill)
743 + return;
744 +
745 + /*
746 + * The 32 bit FIFO is always used completely independent from the
747 + * bits_per_word value. Thus four bytes have to be read at once
748 + * per FIFO.
749 + */
750 + rx32 = (u32 *) hw->rx;
751 + while (hw->len - hw->rx_cnt >= 4 && fifo_fill) {
752 + *rx32++ = ltq_spi_reg_read(hw, LTQ_SPI_RB);
753 + hw->rx_cnt += 4;
754 + hw->rx += 4;
755 + fifo_fill--;
756 + }
757 +
758 + /*
759 + * If there are remaining bytes, read byte count from STAT.RXBV
760 + * register and read the data byte-wise.
761 + */
762 + while (fifo_fill && hw->rx_cnt < hw->len) {
763 + rxbv = (ltq_spi_reg_read(hw, LTQ_SPI_STAT) >>
764 + LTQ_SPI_STAT_RXBV_SHIFT) & LTQ_SPI_STAT_RXBV_MASK;
765 + data = ltq_spi_reg_read(hw, LTQ_SPI_RB);
766 +
767 + shift = (rxbv - 1) * 8;
768 + rx8 = hw->rx;
769 +
770 + while (rxbv) {
771 + *rx8++ = (data >> shift) & 0xFF;
772 + rxbv--;
773 + shift -= 8;
774 + hw->rx_cnt++;
775 + hw->rx++;
776 + }
777 +
778 + fifo_fill--;
779 + }
780 +}
781 +
782 +static void ltq_spi_rxreq_set(struct ltq_spi *hw)
783 +{
784 + u32 rxreq, rxreq_max, rxtodo;
785 +
786 + rxtodo = ltq_spi_reg_read(hw, LTQ_SPI_RXCNT) & LTQ_SPI_RXCNT_TODO_MASK;
787 +
788 + /*
789 + * In RX-only mode the serial clock is activated only after writing
790 + * the expected amount of RX bytes into RXREQ register.
791 + * To avoid receive overflows at high clocks it is better to request
792 + * only the amount of bytes that fits into all FIFOs. This value
793 + * depends on the FIFO size implemented in hardware.
794 + */
795 + rxreq = hw->len - hw->rx_cnt;
796 + rxreq_max = hw->rxfs << 2;
797 + rxreq = min(rxreq_max, rxreq);
798 +
799 + if (!rxtodo && rxreq)
800 + ltq_spi_reg_write(hw, rxreq, LTQ_SPI_RXREQ);
801 +}
802 +
803 +static inline void ltq_spi_complete(struct ltq_spi *hw)
804 +{
805 + complete(&hw->done);
806 +}
807 +
808 +irqreturn_t ltq_spi_tx_irq(int irq, void *data)
809 +{
810 + struct ltq_spi *hw = data;
811 + unsigned long flags;
812 + int completed = 0;
813 +
814 + spin_lock_irqsave(&hw->lock, flags);
815 +
816 + if (hw->tx_cnt < hw->len)
817 + ltq_spi_txfifo_write(hw);
818 +
819 + if (hw->tx_cnt == hw->len)
820 + completed = 1;
821 +
822 + spin_unlock_irqrestore(&hw->lock, flags);
823 +
824 + if (completed)
825 + ltq_spi_complete(hw);
826 +
827 + return IRQ_HANDLED;
828 +}
829 +
830 +irqreturn_t ltq_spi_rx_irq(int irq, void *data)
831 +{
832 + struct ltq_spi *hw = data;
833 + unsigned long flags;
834 + int completed = 0;
835 +
836 + spin_lock_irqsave(&hw->lock, flags);
837 +
838 + if (hw->rx_cnt < hw->len) {
839 + ltq_spi_rxfifo_read(hw);
840 +
841 + if (hw->tx && hw->tx_cnt < hw->len)
842 + ltq_spi_txfifo_write(hw);
843 + }
844 +
845 + if (hw->rx_cnt == hw->len)
846 + completed = 1;
847 + else if (!hw->tx)
848 + ltq_spi_rxreq_set(hw);
849 +
850 + spin_unlock_irqrestore(&hw->lock, flags);
851 +
852 + if (completed)
853 + ltq_spi_complete(hw);
854 +
855 + return IRQ_HANDLED;
856 +}
857 +
858 +irqreturn_t ltq_spi_err_irq(int irq, void *data)
859 +{
860 + struct ltq_spi *hw = data;
861 + unsigned long flags;
862 +
863 + spin_lock_irqsave(&hw->lock, flags);
864 +
865 + /* Disable all interrupts */
866 + ltq_spi_reg_clearbit(hw, LTQ_SPI_IRNEN_ALL, LTQ_SPI_IRNEN);
867 +
868 + /* Clear all error flags */
869 + ltq_spi_reg_write(hw, LTQ_SPI_WHBSTATE_CLR_ERRORS, LTQ_SPI_WHBSTATE);
870 +
871 + /* Flush FIFOs */
872 + ltq_spi_reg_setbit(hw, LTQ_SPI_RXFCON_RXFLU, LTQ_SPI_RXFCON);
873 + ltq_spi_reg_setbit(hw, LTQ_SPI_TXFCON_TXFLU, LTQ_SPI_TXFCON);
874 +
875 + hw->status = -EIO;
876 + spin_unlock_irqrestore(&hw->lock, flags);
877 +
878 + ltq_spi_complete(hw);
879 +
880 + return IRQ_HANDLED;
881 +}
882 +
883 +static int ltq_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
884 +{
885 + struct ltq_spi *hw = ltq_spi_to_hw(spi);
886 + u32 irq_flags = 0;
887 +
888 + hw->tx = t->tx_buf;
889 + hw->rx = t->rx_buf;
890 + hw->len = t->len;
891 + hw->tx_cnt = 0;
892 + hw->rx_cnt = 0;
893 + hw->status = 0;
894 + INIT_COMPLETION(hw->done);
895 +
896 + ltq_spi_xmit_set(hw, t);
897 +
898 + /* Enable error interrupts */
899 + ltq_spi_reg_setbit(hw, LTQ_SPI_IRNEN_E, LTQ_SPI_IRNEN);
900 +
901 + if (hw->tx) {
902 + /* Initially fill TX FIFO with as much data as possible */
903 + ltq_spi_txfifo_write(hw);
904 + irq_flags |= LTQ_SPI_IRNEN_T;
905 +
906 + /* Always enable RX interrupt in Full Duplex mode */
907 + if (hw->rx)
908 + irq_flags |= LTQ_SPI_IRNEN_R;
909 + } else if (hw->rx) {
910 + /* Start RX clock */
911 + ltq_spi_rxreq_set(hw);
912 +
913 + /* Enable RX interrupt to receive data from RX FIFOs */
914 + irq_flags |= LTQ_SPI_IRNEN_R;
915 + }
916 +
917 + /* Enable TX or RX interrupts */
918 + ltq_spi_reg_setbit(hw, irq_flags, LTQ_SPI_IRNEN);
919 + wait_for_completion_interruptible(&hw->done);
920 +
921 + /* Disable all interrupts */
922 + ltq_spi_reg_clearbit(hw, LTQ_SPI_IRNEN_ALL, LTQ_SPI_IRNEN);
923 +
924 + /*
925 + * Return length of current transfer for bitbang utility code if
926 + * no errors occured during transmission.
927 + */
928 + if (!hw->status)
929 + hw->status = hw->len;
930 +
931 + return hw->status;
932 +}
933 +
934 +static const struct ltq_spi_irq_map ltq_spi_irqs[] = {
935 + { "spi_tx", ltq_spi_tx_irq },
936 + { "spi_rx", ltq_spi_rx_irq },
937 + { "spi_err", ltq_spi_err_irq },
938 +};
939 +
940 +static int __init ltq_spi_probe(struct platform_device *pdev)
941 +{
942 + struct spi_master *master;
943 + struct resource *r;
944 + struct ltq_spi *hw;
945 + struct ltq_spi_platform_data *pdata = pdev->dev.platform_data;
946 + int ret, i;
947 + u32 data, id;
948 +
949 + master = spi_alloc_master(&pdev->dev, sizeof(struct ltq_spi));
950 + if (!master) {
951 + dev_err(&pdev->dev, "spi_alloc_master\n");
952 + ret = -ENOMEM;
953 + goto err;
954 + }
955 +
956 + hw = spi_master_get_devdata(master);
957 +
958 + r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
959 + if (r == NULL) {
960 + dev_err(&pdev->dev, "platform_get_resource\n");
961 + ret = -ENOENT;
962 + goto err_master;
963 + }
964 +
965 + r = devm_request_mem_region(&pdev->dev, r->start, resource_size(r),
966 + pdev->name);
967 + if (!r) {
968 + dev_err(&pdev->dev, "devm_request_mem_region\n");
969 + ret = -ENXIO;
970 + goto err_master;
971 + }
972 +
973 + hw->base = devm_ioremap_nocache(&pdev->dev, r->start, resource_size(r));
974 + if (!hw->base) {
975 + dev_err(&pdev->dev, "devm_ioremap_nocache\n");
976 + ret = -ENXIO;
977 + goto err_master;
978 + }
979 +
980 + hw->fpiclk = clk_get_fpi();
981 + if (IS_ERR(hw->fpiclk)) {
982 + dev_err(&pdev->dev, "clk_get\n");
983 + ret = PTR_ERR(hw->fpiclk);
984 + goto err_master;
985 + }
986 +
987 + hw->spiclk = clk_get(&pdev->dev, NULL);
988 + if (IS_ERR(hw->spiclk)) {
989 + dev_err(&pdev->dev, "clk_get\n");
990 + ret = PTR_ERR(hw->spiclk);
991 + goto err_master;
992 + }
993 +
994 + memset(hw->irq, 0, sizeof(hw->irq));
995 + for (i = 0; i < ARRAY_SIZE(ltq_spi_irqs); i++) {
996 + ret = platform_get_irq_byname(pdev, ltq_spi_irqs[i].name);
997 + if (0 > ret) {
998 + dev_err(&pdev->dev, "platform_get_irq_byname\n");
999 + goto err_irq;
1000 + }
1001 +
1002 + hw->irq[i] = ret;
1003 + ret = request_irq(hw->irq[i], ltq_spi_irqs[i].handler,
1004 + 0, ltq_spi_irqs[i].name, hw);
1005 + if (ret) {
1006 + dev_err(&pdev->dev, "request_irq\n");
1007 + goto err_irq;
1008 + }
1009 + }
1010 +
1011 + hw->bitbang.master = spi_master_get(master);
1012 + hw->bitbang.chipselect = ltq_spi_chipselect;
1013 + hw->bitbang.setup_transfer = ltq_spi_setup_transfer;
1014 + hw->bitbang.txrx_bufs = ltq_spi_txrx_bufs;
1015 +
1016 + master->bus_num = pdev->id;
1017 + master->num_chipselect = pdata->num_chipselect;
1018 + master->setup = ltq_spi_setup;
1019 + master->cleanup = ltq_spi_cleanup;
1020 +
1021 + hw->dev = &pdev->dev;
1022 + init_completion(&hw->done);
1023 + spin_lock_init(&hw->lock);
1024 +
1025 + /* Set GPIO alternate functions to SPI */
1026 + ltq_gpio_request(&pdev->dev, LTQ_SPI_GPIO_DI, 2, 0, "spi-di");
1027 + ltq_gpio_request(&pdev->dev, LTQ_SPI_GPIO_DO, 2, 1, "spi-do");
1028 + ltq_gpio_request(&pdev->dev, LTQ_SPI_GPIO_CLK, 2, 1, "spi-clk");
1029 +
1030 + ltq_spi_hw_enable(hw);
1031 +
1032 + /* Read module capabilities */
1033 + id = ltq_spi_reg_read(hw, LTQ_SPI_ID);
1034 + hw->txfs = (id >> LTQ_SPI_ID_TXFS_SHIFT) & LTQ_SPI_ID_TXFS_MASK;
1035 + hw->rxfs = (id >> LTQ_SPI_ID_TXFS_SHIFT) & LTQ_SPI_ID_TXFS_MASK;
1036 + hw->dma_support = (id & LTQ_SPI_ID_CFG) ? 1 : 0;
1037 +
1038 + ltq_spi_config_mode_set(hw);
1039 +
1040 + /* Enable error checking, disable TX/RX, set idle value high */
1041 + data = LTQ_SPI_CON_RUEN | LTQ_SPI_CON_AEN |
1042 + LTQ_SPI_CON_TEN | LTQ_SPI_CON_REN |
1043 + LTQ_SPI_CON_TXOFF | LTQ_SPI_CON_RXOFF | LTQ_SPI_CON_IDLE;
1044 + ltq_spi_reg_write(hw, data, LTQ_SPI_CON);
1045 +
1046 + /* Enable master mode and clear error flags */
1047 + ltq_spi_reg_write(hw, LTQ_SPI_WHBSTATE_SETMS |
1048 + LTQ_SPI_WHBSTATE_CLR_ERRORS, LTQ_SPI_WHBSTATE);
1049 +
1050 + /* Reset GPIO/CS registers */
1051 + ltq_spi_reg_write(hw, 0x0, LTQ_SPI_GPOCON);
1052 + ltq_spi_reg_write(hw, 0xFF00, LTQ_SPI_FGPO);
1053 +
1054 + /* Enable and flush FIFOs */
1055 + ltq_spi_reset_fifos(hw);
1056 +
1057 + ret = spi_bitbang_start(&hw->bitbang);
1058 + if (ret) {
1059 + dev_err(&pdev->dev, "spi_bitbang_start\n");
1060 + goto err_bitbang;
1061 + }
1062 +
1063 + platform_set_drvdata(pdev, hw);
1064 +
1065 + pr_info("Lantiq SoC SPI controller rev %u (TXFS %u, RXFS %u, DMA %u)\n",
1066 + id & LTQ_SPI_ID_REV_MASK, hw->txfs, hw->rxfs, hw->dma_support);
1067 +
1068 + return 0;
1069 +
1070 +err_bitbang:
1071 + ltq_spi_hw_disable(hw);
1072 +
1073 +err_irq:
1074 + clk_put(hw->fpiclk);
1075 +
1076 + for (; i > 0; i--)
1077 + free_irq(hw->irq[i], hw);
1078 +
1079 +err_master:
1080 + spi_master_put(master);
1081 +
1082 +err:
1083 + return ret;
1084 +}
1085 +
1086 +static int __exit ltq_spi_remove(struct platform_device *pdev)
1087 +{
1088 + struct ltq_spi *hw = platform_get_drvdata(pdev);
1089 + int ret, i;
1090 +
1091 + ret = spi_bitbang_stop(&hw->bitbang);
1092 + if (ret)
1093 + return ret;
1094 +
1095 + platform_set_drvdata(pdev, NULL);
1096 +
1097 + ltq_spi_config_mode_set(hw);
1098 + ltq_spi_hw_disable(hw);
1099 +
1100 + for (i = 0; i < ARRAY_SIZE(hw->irq); i++)
1101 + if (0 < hw->irq[i])
1102 + free_irq(hw->irq[i], hw);
1103 +
1104 + gpio_free(LTQ_SPI_GPIO_DI);
1105 + gpio_free(LTQ_SPI_GPIO_DO);
1106 + gpio_free(LTQ_SPI_GPIO_CLK);
1107 +
1108 + clk_put(hw->fpiclk);
1109 + spi_master_put(hw->bitbang.master);
1110 +
1111 + return 0;
1112 +}
1113 +
1114 +static struct platform_driver ltq_spi_driver = {
1115 + .driver = {
1116 + .name = "ltq_spi",
1117 + .owner = THIS_MODULE,
1118 + },
1119 + .remove = __exit_p(ltq_spi_remove),
1120 +};
1121 +
1122 +static int __init ltq_spi_init(void)
1123 +{
1124 + return platform_driver_probe(&ltq_spi_driver, ltq_spi_probe);
1125 +}
1126 +module_init(ltq_spi_init);
1127 +
1128 +static void __exit ltq_spi_exit(void)
1129 +{
1130 + platform_driver_unregister(&ltq_spi_driver);
1131 +}
1132 +module_exit(ltq_spi_exit);
1133 +
1134 +MODULE_DESCRIPTION("Lantiq SoC SPI controller driver");
1135 +MODULE_AUTHOR("Daniel Schwierzeck <daniel.schwierzeck@googlemail.com>");
1136 +MODULE_LICENSE("GPL");
1137 +MODULE_ALIAS("platform:ltq-spi");