[lantiq] move files/ -> files-3.3/
[openwrt/svn-archive/archive.git] / target / linux / lantiq / files-3.3 / drivers / spi / spi-xway.c
1 /*
2 * Lantiq SoC SPI controller
3 *
4 * Copyright (C) 2011 Daniel Schwierzeck <daniel.schwierzeck@googlemail.com>
5 *
6 * This program is free software; you can distribute it and/or modify it
7 * under the terms of the GNU General Public License (Version 2) as
8 * published by the Free Software Foundation.
9 */
10
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/workqueue.h>
14 #include <linux/platform_device.h>
15 #include <linux/io.h>
16 #include <linux/sched.h>
17 #include <linux/delay.h>
18 #include <linux/interrupt.h>
19 #include <linux/completion.h>
20 #include <linux/spinlock.h>
21 #include <linux/err.h>
22 #include <linux/clk.h>
23 #include <linux/gpio.h>
24 #include <linux/spi/spi.h>
25 #include <linux/spi/spi_bitbang.h>
26
27 #include <lantiq_soc.h>
28 #include <lantiq_platform.h>
29
30 #define LTQ_SPI_CLC 0x00 /* Clock control */
31 #define LTQ_SPI_PISEL 0x04 /* Port input select */
32 #define LTQ_SPI_ID 0x08 /* Identification */
33 #define LTQ_SPI_CON 0x10 /* Control */
34 #define LTQ_SPI_STAT 0x14 /* Status */
35 #define LTQ_SPI_WHBSTATE 0x18 /* Write HW modified state */
36 #define LTQ_SPI_TB 0x20 /* Transmit buffer */
37 #define LTQ_SPI_RB 0x24 /* Receive buffer */
38 #define LTQ_SPI_RXFCON 0x30 /* Receive FIFO control */
39 #define LTQ_SPI_TXFCON 0x34 /* Transmit FIFO control */
40 #define LTQ_SPI_FSTAT 0x38 /* FIFO status */
41 #define LTQ_SPI_BRT 0x40 /* Baudrate timer */
42 #define LTQ_SPI_BRSTAT 0x44 /* Baudrate timer status */
43 #define LTQ_SPI_SFCON 0x60 /* Serial frame control */
44 #define LTQ_SPI_SFSTAT 0x64 /* Serial frame status */
45 #define LTQ_SPI_GPOCON 0x70 /* General purpose output control */
46 #define LTQ_SPI_GPOSTAT 0x74 /* General purpose output status */
47 #define LTQ_SPI_FGPO 0x78 /* Forced general purpose output */
48 #define LTQ_SPI_RXREQ 0x80 /* Receive request */
49 #define LTQ_SPI_RXCNT 0x84 /* Receive count */
50 #define LTQ_SPI_DMACON 0xEC /* DMA control */
51 #define LTQ_SPI_IRNEN 0xF4 /* Interrupt node enable */
52 #define LTQ_SPI_IRNICR 0xF8 /* Interrupt node interrupt capture */
53 #define LTQ_SPI_IRNCR 0xFC /* Interrupt node control */
54
55 #define LTQ_SPI_CLC_SMC_SHIFT 16 /* Clock divider for sleep mode */
56 #define LTQ_SPI_CLC_SMC_MASK 0xFF
57 #define LTQ_SPI_CLC_RMC_SHIFT 8 /* Clock divider for normal run mode */
58 #define LTQ_SPI_CLC_RMC_MASK 0xFF
59 #define LTQ_SPI_CLC_DISS BIT(1) /* Disable status bit */
60 #define LTQ_SPI_CLC_DISR BIT(0) /* Disable request bit */
61
62 #define LTQ_SPI_ID_TXFS_SHIFT 24 /* Implemented TX FIFO size */
63 #define LTQ_SPI_ID_TXFS_MASK 0x3F
64 #define LTQ_SPI_ID_RXFS_SHIFT 16 /* Implemented RX FIFO size */
65 #define LTQ_SPI_ID_RXFS_MASK 0x3F
66 #define LTQ_SPI_ID_REV_MASK 0x1F /* Hardware revision number */
67 #define LTQ_SPI_ID_CFG BIT(5) /* DMA interface support */
68
69 #define LTQ_SPI_CON_BM_SHIFT 16 /* Data width selection */
70 #define LTQ_SPI_CON_BM_MASK 0x1F
71 #define LTQ_SPI_CON_EM BIT(24) /* Echo mode */
72 #define LTQ_SPI_CON_IDLE BIT(23) /* Idle bit value */
73 #define LTQ_SPI_CON_ENBV BIT(22) /* Enable byte valid control */
74 #define LTQ_SPI_CON_RUEN BIT(12) /* Receive underflow error enable */
75 #define LTQ_SPI_CON_TUEN BIT(11) /* Transmit underflow error enable */
76 #define LTQ_SPI_CON_AEN BIT(10) /* Abort error enable */
77 #define LTQ_SPI_CON_REN BIT(9) /* Receive overflow error enable */
78 #define LTQ_SPI_CON_TEN BIT(8) /* Transmit overflow error enable */
79 #define LTQ_SPI_CON_LB BIT(7) /* Loopback control */
80 #define LTQ_SPI_CON_PO BIT(6) /* Clock polarity control */
81 #define LTQ_SPI_CON_PH BIT(5) /* Clock phase control */
82 #define LTQ_SPI_CON_HB BIT(4) /* Heading control */
83 #define LTQ_SPI_CON_RXOFF BIT(1) /* Switch receiver off */
84 #define LTQ_SPI_CON_TXOFF BIT(0) /* Switch transmitter off */
85
86 #define LTQ_SPI_STAT_RXBV_MASK 0x7
87 #define LTQ_SPI_STAT_RXBV_SHIFT 28
88 #define LTQ_SPI_STAT_BSY BIT(13) /* Busy flag */
89 #define LTQ_SPI_STAT_RUE BIT(12) /* Receive underflow error flag */
90 #define LTQ_SPI_STAT_TUE BIT(11) /* Transmit underflow error flag */
91 #define LTQ_SPI_STAT_AE BIT(10) /* Abort error flag */
92 #define LTQ_SPI_STAT_RE BIT(9) /* Receive error flag */
93 #define LTQ_SPI_STAT_TE BIT(8) /* Transmit error flag */
94 #define LTQ_SPI_STAT_MS BIT(1) /* Master/slave select bit */
95 #define LTQ_SPI_STAT_EN BIT(0) /* Enable bit */
96
97 #define LTQ_SPI_WHBSTATE_SETTUE BIT(15) /* Set transmit underflow error flag */
98 #define LTQ_SPI_WHBSTATE_SETAE BIT(14) /* Set abort error flag */
99 #define LTQ_SPI_WHBSTATE_SETRE BIT(13) /* Set receive error flag */
100 #define LTQ_SPI_WHBSTATE_SETTE BIT(12) /* Set transmit error flag */
101 #define LTQ_SPI_WHBSTATE_CLRTUE BIT(11) /* Clear transmit underflow error flag */
102 #define LTQ_SPI_WHBSTATE_CLRAE BIT(10) /* Clear abort error flag */
103 #define LTQ_SPI_WHBSTATE_CLRRE BIT(9) /* Clear receive error flag */
104 #define LTQ_SPI_WHBSTATE_CLRTE BIT(8) /* Clear transmit error flag */
105 #define LTQ_SPI_WHBSTATE_SETME BIT(7) /* Set mode error flag */
106 #define LTQ_SPI_WHBSTATE_CLRME BIT(6) /* Clear mode error flag */
107 #define LTQ_SPI_WHBSTATE_SETRUE BIT(5) /* Set receive underflow error flag */
108 #define LTQ_SPI_WHBSTATE_CLRRUE BIT(4) /* Clear receive underflow error flag */
109 #define LTQ_SPI_WHBSTATE_SETMS BIT(3) /* Set master select bit */
110 #define LTQ_SPI_WHBSTATE_CLRMS BIT(2) /* Clear master select bit */
111 #define LTQ_SPI_WHBSTATE_SETEN BIT(1) /* Set enable bit (operational mode) */
112 #define LTQ_SPI_WHBSTATE_CLREN BIT(0) /* Clear enable bit (config mode */
113 #define LTQ_SPI_WHBSTATE_CLR_ERRORS 0x0F50
114
115 #define LTQ_SPI_RXFCON_RXFITL_SHIFT 8 /* FIFO interrupt trigger level */
116 #define LTQ_SPI_RXFCON_RXFITL_MASK 0x3F
117 #define LTQ_SPI_RXFCON_RXFLU BIT(1) /* FIFO flush */
118 #define LTQ_SPI_RXFCON_RXFEN BIT(0) /* FIFO enable */
119
120 #define LTQ_SPI_TXFCON_TXFITL_SHIFT 8 /* FIFO interrupt trigger level */
121 #define LTQ_SPI_TXFCON_TXFITL_MASK 0x3F
122 #define LTQ_SPI_TXFCON_TXFLU BIT(1) /* FIFO flush */
123 #define LTQ_SPI_TXFCON_TXFEN BIT(0) /* FIFO enable */
124
125 #define LTQ_SPI_FSTAT_RXFFL_MASK 0x3f
126 #define LTQ_SPI_FSTAT_RXFFL_SHIFT 0
127 #define LTQ_SPI_FSTAT_TXFFL_MASK 0x3f
128 #define LTQ_SPI_FSTAT_TXFFL_SHIFT 8
129
130 #define LTQ_SPI_GPOCON_ISCSBN_SHIFT 8
131 #define LTQ_SPI_GPOCON_INVOUTN_SHIFT 0
132
133 #define LTQ_SPI_FGPO_SETOUTN_SHIFT 8
134 #define LTQ_SPI_FGPO_CLROUTN_SHIFT 0
135
136 #define LTQ_SPI_RXREQ_RXCNT_MASK 0xFFFF /* Receive count value */
137 #define LTQ_SPI_RXCNT_TODO_MASK 0xFFFF /* Recevie to-do value */
138
139 #define LTQ_SPI_IRNEN_F BIT(3) /* Frame end interrupt request */
140 #define LTQ_SPI_IRNEN_E BIT(2) /* Error end interrupt request */
141 #define LTQ_SPI_IRNEN_T BIT(1) /* Transmit end interrupt request */
142 #define LTQ_SPI_IRNEN_R BIT(0) /* Receive end interrupt request */
143 #define LTQ_SPI_IRNEN_ALL 0xF
144
145 /* Hard-wired GPIOs used by SPI controller */
146 #define LTQ_SPI_GPIO_DI (ltq_is_ase()? 8 : 16)
147 #define LTQ_SPI_GPIO_DO (ltq_is_ase()? 9 : 17)
148 #define LTQ_SPI_GPIO_CLK (ltq_is_ase()? 10 : 18)
149
150 struct ltq_spi {
151 struct spi_bitbang bitbang;
152 struct completion done;
153 spinlock_t lock;
154
155 struct device *dev;
156 void __iomem *base;
157 struct clk *fpiclk;
158 struct clk *spiclk;
159
160 int status;
161 int irq[3];
162
163 const u8 *tx;
164 u8 *rx;
165 u32 tx_cnt;
166 u32 rx_cnt;
167 u32 len;
168 struct spi_transfer *curr_transfer;
169
170 u32 (*get_tx) (struct ltq_spi *);
171
172 u16 txfs;
173 u16 rxfs;
174 unsigned dma_support:1;
175 unsigned cfg_mode:1;
176
177 };
178
179 struct ltq_spi_controller_state {
180 void (*cs_activate) (struct spi_device *);
181 void (*cs_deactivate) (struct spi_device *);
182 };
183
184 struct ltq_spi_irq_map {
185 char *name;
186 irq_handler_t handler;
187 };
188
189 struct ltq_spi_cs_gpio_map {
190 unsigned gpio;
191 unsigned mux;
192 };
193
194 static inline struct ltq_spi *ltq_spi_to_hw(struct spi_device *spi)
195 {
196 return spi_master_get_devdata(spi->master);
197 }
198
199 static inline u32 ltq_spi_reg_read(struct ltq_spi *hw, u32 reg)
200 {
201 return ioread32be(hw->base + reg);
202 }
203
204 static inline void ltq_spi_reg_write(struct ltq_spi *hw, u32 val, u32 reg)
205 {
206 iowrite32be(val, hw->base + reg);
207 }
208
209 static inline void ltq_spi_reg_setbit(struct ltq_spi *hw, u32 bits, u32 reg)
210 {
211 u32 val;
212
213 val = ltq_spi_reg_read(hw, reg);
214 val |= bits;
215 ltq_spi_reg_write(hw, val, reg);
216 }
217
218 static inline void ltq_spi_reg_clearbit(struct ltq_spi *hw, u32 bits, u32 reg)
219 {
220 u32 val;
221
222 val = ltq_spi_reg_read(hw, reg);
223 val &= ~bits;
224 ltq_spi_reg_write(hw, val, reg);
225 }
226
227 static void ltq_spi_hw_enable(struct ltq_spi *hw)
228 {
229 u32 clc;
230
231 /* Power-up mdule */
232 clk_enable(hw->spiclk);
233
234 /*
235 * Set clock divider for run mode to 1 to
236 * run at same frequency as FPI bus
237 */
238 clc = (1 << LTQ_SPI_CLC_RMC_SHIFT);
239 ltq_spi_reg_write(hw, clc, LTQ_SPI_CLC);
240 }
241
242 static void ltq_spi_hw_disable(struct ltq_spi *hw)
243 {
244 /* Set clock divider to 0 and set module disable bit */
245 ltq_spi_reg_write(hw, LTQ_SPI_CLC_DISS, LTQ_SPI_CLC);
246
247 /* Power-down mdule */
248 clk_disable(hw->spiclk);
249 }
250
251 static void ltq_spi_reset_fifos(struct ltq_spi *hw)
252 {
253 u32 val;
254
255 /*
256 * Enable and flush FIFOs. Set interrupt trigger level to
257 * half of FIFO count implemented in hardware.
258 */
259 if (hw->txfs > 1) {
260 val = hw->txfs << (LTQ_SPI_TXFCON_TXFITL_SHIFT - 1);
261 val |= LTQ_SPI_TXFCON_TXFEN | LTQ_SPI_TXFCON_TXFLU;
262 ltq_spi_reg_write(hw, val, LTQ_SPI_TXFCON);
263 }
264
265 if (hw->rxfs > 1) {
266 val = hw->rxfs << (LTQ_SPI_RXFCON_RXFITL_SHIFT - 1);
267 val |= LTQ_SPI_RXFCON_RXFEN | LTQ_SPI_RXFCON_RXFLU;
268 ltq_spi_reg_write(hw, val, LTQ_SPI_RXFCON);
269 }
270 }
271
272 static inline int ltq_spi_wait_ready(struct ltq_spi *hw)
273 {
274 u32 stat;
275 unsigned long timeout;
276
277 timeout = jiffies + msecs_to_jiffies(200);
278
279 do {
280 stat = ltq_spi_reg_read(hw, LTQ_SPI_STAT);
281 if (!(stat & LTQ_SPI_STAT_BSY))
282 return 0;
283
284 cond_resched();
285 } while (!time_after_eq(jiffies, timeout));
286
287 dev_err(hw->dev, "SPI wait ready timed out stat: %x\n", stat);
288
289 return -ETIMEDOUT;
290 }
291
292 static void ltq_spi_config_mode_set(struct ltq_spi *hw)
293 {
294 if (hw->cfg_mode)
295 return;
296
297 /*
298 * Putting the SPI module in config mode is only safe if no
299 * transfer is in progress as indicated by busy flag STATE.BSY.
300 */
301 if (ltq_spi_wait_ready(hw)) {
302 ltq_spi_reset_fifos(hw);
303 hw->status = -ETIMEDOUT;
304 }
305 ltq_spi_reg_write(hw, LTQ_SPI_WHBSTATE_CLREN, LTQ_SPI_WHBSTATE);
306
307 hw->cfg_mode = 1;
308 }
309
310 static void ltq_spi_run_mode_set(struct ltq_spi *hw)
311 {
312 if (!hw->cfg_mode)
313 return;
314
315 ltq_spi_reg_write(hw, LTQ_SPI_WHBSTATE_SETEN, LTQ_SPI_WHBSTATE);
316
317 hw->cfg_mode = 0;
318 }
319
320 static u32 ltq_spi_tx_word_u8(struct ltq_spi *hw)
321 {
322 const u8 *tx = hw->tx;
323 u32 data = *tx++;
324
325 hw->tx_cnt++;
326 hw->tx++;
327
328 return data;
329 }
330
331 static u32 ltq_spi_tx_word_u16(struct ltq_spi *hw)
332 {
333 const u16 *tx = (u16 *) hw->tx;
334 u32 data = *tx++;
335
336 hw->tx_cnt += 2;
337 hw->tx += 2;
338
339 return data;
340 }
341
342 static u32 ltq_spi_tx_word_u32(struct ltq_spi *hw)
343 {
344 const u32 *tx = (u32 *) hw->tx;
345 u32 data = *tx++;
346
347 hw->tx_cnt += 4;
348 hw->tx += 4;
349
350 return data;
351 }
352
353 static void ltq_spi_bits_per_word_set(struct spi_device *spi)
354 {
355 struct ltq_spi *hw = ltq_spi_to_hw(spi);
356 u32 bm;
357 u8 bits_per_word = spi->bits_per_word;
358
359 /*
360 * Use either default value of SPI device or value
361 * from current transfer.
362 */
363 if (hw->curr_transfer && hw->curr_transfer->bits_per_word)
364 bits_per_word = hw->curr_transfer->bits_per_word;
365
366 if (bits_per_word <= 8)
367 hw->get_tx = ltq_spi_tx_word_u8;
368 else if (bits_per_word <= 16)
369 hw->get_tx = ltq_spi_tx_word_u16;
370 else if (bits_per_word <= 32)
371 hw->get_tx = ltq_spi_tx_word_u32;
372
373 /* CON.BM value = bits_per_word - 1 */
374 bm = (bits_per_word - 1) << LTQ_SPI_CON_BM_SHIFT;
375
376 ltq_spi_reg_clearbit(hw, LTQ_SPI_CON_BM_MASK <<
377 LTQ_SPI_CON_BM_SHIFT, LTQ_SPI_CON);
378 ltq_spi_reg_setbit(hw, bm, LTQ_SPI_CON);
379 }
380
381 static void ltq_spi_speed_set(struct spi_device *spi)
382 {
383 struct ltq_spi *hw = ltq_spi_to_hw(spi);
384 u32 br, max_speed_hz, spi_clk;
385 u32 speed_hz = spi->max_speed_hz;
386
387 /*
388 * Use either default value of SPI device or value
389 * from current transfer.
390 */
391 if (hw->curr_transfer && hw->curr_transfer->speed_hz)
392 speed_hz = hw->curr_transfer->speed_hz;
393
394 /*
395 * SPI module clock is derived from FPI bus clock dependent on
396 * divider value in CLC.RMS which is always set to 1.
397 */
398 spi_clk = clk_get_rate(hw->fpiclk);
399
400 /*
401 * Maximum SPI clock frequency in master mode is half of
402 * SPI module clock frequency. Maximum reload value of
403 * baudrate generator BR is 2^16.
404 */
405 max_speed_hz = spi_clk / 2;
406 if (speed_hz >= max_speed_hz)
407 br = 0;
408 else
409 br = (max_speed_hz / speed_hz) - 1;
410
411 if (br > 0xFFFF)
412 br = 0xFFFF;
413
414 ltq_spi_reg_write(hw, br, LTQ_SPI_BRT);
415 }
416
417 static void ltq_spi_clockmode_set(struct spi_device *spi)
418 {
419 struct ltq_spi *hw = ltq_spi_to_hw(spi);
420 u32 con;
421
422 con = ltq_spi_reg_read(hw, LTQ_SPI_CON);
423
424 /*
425 * SPI mode mapping in CON register:
426 * Mode CPOL CPHA CON.PO CON.PH
427 * 0 0 0 0 1
428 * 1 0 1 0 0
429 * 2 1 0 1 1
430 * 3 1 1 1 0
431 */
432 if (spi->mode & SPI_CPHA)
433 con &= ~LTQ_SPI_CON_PH;
434 else
435 con |= LTQ_SPI_CON_PH;
436
437 if (spi->mode & SPI_CPOL)
438 con |= LTQ_SPI_CON_PO;
439 else
440 con &= ~LTQ_SPI_CON_PO;
441
442 /* Set heading control */
443 if (spi->mode & SPI_LSB_FIRST)
444 con &= ~LTQ_SPI_CON_HB;
445 else
446 con |= LTQ_SPI_CON_HB;
447
448 ltq_spi_reg_write(hw, con, LTQ_SPI_CON);
449 }
450
451 static void ltq_spi_xmit_set(struct ltq_spi *hw, struct spi_transfer *t)
452 {
453 u32 con;
454
455 con = ltq_spi_reg_read(hw, LTQ_SPI_CON);
456
457 if (t) {
458 if (t->tx_buf && t->rx_buf) {
459 con &= ~(LTQ_SPI_CON_TXOFF | LTQ_SPI_CON_RXOFF);
460 } else if (t->rx_buf) {
461 con &= ~LTQ_SPI_CON_RXOFF;
462 con |= LTQ_SPI_CON_TXOFF;
463 } else if (t->tx_buf) {
464 con &= ~LTQ_SPI_CON_TXOFF;
465 con |= LTQ_SPI_CON_RXOFF;
466 }
467 } else
468 con |= (LTQ_SPI_CON_TXOFF | LTQ_SPI_CON_RXOFF);
469
470 ltq_spi_reg_write(hw, con, LTQ_SPI_CON);
471 }
472
473 static void ltq_spi_gpio_cs_activate(struct spi_device *spi)
474 {
475 struct ltq_spi_controller_data *cdata = spi->controller_data;
476 int val = spi->mode & SPI_CS_HIGH ? 1 : 0;
477
478 gpio_set_value(cdata->gpio, val);
479 }
480
481 static void ltq_spi_gpio_cs_deactivate(struct spi_device *spi)
482 {
483 struct ltq_spi_controller_data *cdata = spi->controller_data;
484 int val = spi->mode & SPI_CS_HIGH ? 0 : 1;
485
486 gpio_set_value(cdata->gpio, val);
487 }
488
489 static void ltq_spi_internal_cs_activate(struct spi_device *spi)
490 {
491 struct ltq_spi *hw = ltq_spi_to_hw(spi);
492 u32 fgpo;
493
494 fgpo = (1 << (spi->chip_select + LTQ_SPI_FGPO_CLROUTN_SHIFT));
495 ltq_spi_reg_setbit(hw, fgpo, LTQ_SPI_FGPO);
496 }
497
498 static void ltq_spi_internal_cs_deactivate(struct spi_device *spi)
499 {
500 struct ltq_spi *hw = ltq_spi_to_hw(spi);
501 u32 fgpo;
502
503 fgpo = (1 << (spi->chip_select + LTQ_SPI_FGPO_SETOUTN_SHIFT));
504 ltq_spi_reg_setbit(hw, fgpo, LTQ_SPI_FGPO);
505 }
506
507 static void ltq_spi_chipselect(struct spi_device *spi, int cs)
508 {
509 struct ltq_spi *hw = ltq_spi_to_hw(spi);
510 struct ltq_spi_controller_state *cstate = spi->controller_state;
511
512 switch (cs) {
513 case BITBANG_CS_ACTIVE:
514 ltq_spi_bits_per_word_set(spi);
515 ltq_spi_speed_set(spi);
516 ltq_spi_clockmode_set(spi);
517 ltq_spi_run_mode_set(hw);
518
519 cstate->cs_activate(spi);
520 break;
521
522 case BITBANG_CS_INACTIVE:
523 cstate->cs_deactivate(spi);
524
525 ltq_spi_config_mode_set(hw);
526
527 break;
528 }
529 }
530
531 static int ltq_spi_setup_transfer(struct spi_device *spi,
532 struct spi_transfer *t)
533 {
534 struct ltq_spi *hw = ltq_spi_to_hw(spi);
535 u8 bits_per_word = spi->bits_per_word;
536
537 hw->curr_transfer = t;
538
539 if (t && t->bits_per_word)
540 bits_per_word = t->bits_per_word;
541
542 if (bits_per_word > 32)
543 return -EINVAL;
544
545 ltq_spi_config_mode_set(hw);
546
547 return 0;
548 }
549
550 static const struct ltq_spi_cs_gpio_map ltq_spi_cs[] = {
551 { 15, 2 },
552 { 22, 2 },
553 { 13, 1 },
554 { 10, 1 },
555 { 9, 1 },
556 { 11, 3 },
557 };
558
559 static const struct ltq_spi_cs_gpio_map ltq_spi_cs_ase[] = {
560 { 7, 2 },
561 { 15, 1 },
562 { 14, 1 },
563 };
564
565 static int ltq_spi_setup(struct spi_device *spi)
566 {
567 struct ltq_spi *hw = ltq_spi_to_hw(spi);
568 struct ltq_spi_controller_data *cdata = spi->controller_data;
569 struct ltq_spi_controller_state *cstate;
570 u32 gpocon, fgpo;
571 int ret;
572
573 /* Set default word length to 8 if not set */
574 if (!spi->bits_per_word)
575 spi->bits_per_word = 8;
576
577 if (spi->bits_per_word > 32)
578 return -EINVAL;
579
580 if (!spi->controller_state) {
581 cstate = kzalloc(sizeof(struct ltq_spi_controller_state),
582 GFP_KERNEL);
583 if (!cstate)
584 return -ENOMEM;
585
586 spi->controller_state = cstate;
587 } else
588 return 0;
589
590 /*
591 * Up to six GPIOs can be connected to the SPI module
592 * via GPIO alternate function to control the chip select lines.
593 * For more flexibility in board layout this driver can also control
594 * the CS lines via GPIO API. If GPIOs should be used, board setup code
595 * have to register the SPI device with struct ltq_spi_controller_data
596 * attached.
597 */
598 if (cdata && cdata->gpio) {
599 ret = gpio_request(cdata->gpio, "spi-cs");
600 if (ret)
601 return -EBUSY;
602
603 ret = spi->mode & SPI_CS_HIGH ? 0 : 1;
604 gpio_direction_output(cdata->gpio, ret);
605
606 cstate->cs_activate = ltq_spi_gpio_cs_activate;
607 cstate->cs_deactivate = ltq_spi_gpio_cs_deactivate;
608 } else {
609 struct ltq_spi_cs_gpio_map *cs_map =
610 ltq_is_ase() ? ltq_spi_cs_ase : ltq_spi_cs;
611 ret = ltq_gpio_request(&spi->dev, cs_map[spi->chip_select].gpio,
612 cs_map[spi->chip_select].mux,
613 1, "spi-cs");
614 if (ret)
615 return -EBUSY;
616
617 gpocon = (1 << (spi->chip_select +
618 LTQ_SPI_GPOCON_ISCSBN_SHIFT));
619
620 if (spi->mode & SPI_CS_HIGH)
621 gpocon |= (1 << spi->chip_select);
622
623 fgpo = (1 << (spi->chip_select + LTQ_SPI_FGPO_SETOUTN_SHIFT));
624
625 ltq_spi_reg_setbit(hw, gpocon, LTQ_SPI_GPOCON);
626 ltq_spi_reg_setbit(hw, fgpo, LTQ_SPI_FGPO);
627
628 cstate->cs_activate = ltq_spi_internal_cs_activate;
629 cstate->cs_deactivate = ltq_spi_internal_cs_deactivate;
630 }
631
632 return 0;
633 }
634
635 static void ltq_spi_cleanup(struct spi_device *spi)
636 {
637 struct ltq_spi_controller_data *cdata = spi->controller_data;
638 struct ltq_spi_controller_state *cstate = spi->controller_state;
639 unsigned gpio;
640
641 if (cdata && cdata->gpio)
642 gpio = cdata->gpio;
643 else
644 gpio = ltq_is_ase() ? ltq_spi_cs_ase[spi->chip_select].gpio :
645 ltq_spi_cs[spi->chip_select].gpio;
646
647 gpio_free(gpio);
648 kfree(cstate);
649 }
650
651 static void ltq_spi_txfifo_write(struct ltq_spi *hw)
652 {
653 u32 fstat, data;
654 u16 fifo_space;
655
656 /* Determine how much FIFOs are free for TX data */
657 fstat = ltq_spi_reg_read(hw, LTQ_SPI_FSTAT);
658 fifo_space = hw->txfs - ((fstat >> LTQ_SPI_FSTAT_TXFFL_SHIFT) &
659 LTQ_SPI_FSTAT_TXFFL_MASK);
660
661 if (!fifo_space)
662 return;
663
664 while (hw->tx_cnt < hw->len && fifo_space) {
665 data = hw->get_tx(hw);
666 ltq_spi_reg_write(hw, data, LTQ_SPI_TB);
667 fifo_space--;
668 }
669 }
670
671 static void ltq_spi_rxfifo_read(struct ltq_spi *hw)
672 {
673 u32 fstat, data, *rx32;
674 u16 fifo_fill;
675 u8 rxbv, shift, *rx8;
676
677 /* Determine how much FIFOs are filled with RX data */
678 fstat = ltq_spi_reg_read(hw, LTQ_SPI_FSTAT);
679 fifo_fill = ((fstat >> LTQ_SPI_FSTAT_RXFFL_SHIFT)
680 & LTQ_SPI_FSTAT_RXFFL_MASK);
681
682 if (!fifo_fill)
683 return;
684
685 /*
686 * The 32 bit FIFO is always used completely independent from the
687 * bits_per_word value. Thus four bytes have to be read at once
688 * per FIFO.
689 */
690 rx32 = (u32 *) hw->rx;
691 while (hw->len - hw->rx_cnt >= 4 && fifo_fill) {
692 *rx32++ = ltq_spi_reg_read(hw, LTQ_SPI_RB);
693 hw->rx_cnt += 4;
694 hw->rx += 4;
695 fifo_fill--;
696 }
697
698 /*
699 * If there are remaining bytes, read byte count from STAT.RXBV
700 * register and read the data byte-wise.
701 */
702 while (fifo_fill && hw->rx_cnt < hw->len) {
703 rxbv = (ltq_spi_reg_read(hw, LTQ_SPI_STAT) >>
704 LTQ_SPI_STAT_RXBV_SHIFT) & LTQ_SPI_STAT_RXBV_MASK;
705 data = ltq_spi_reg_read(hw, LTQ_SPI_RB);
706
707 shift = (rxbv - 1) * 8;
708 rx8 = hw->rx;
709
710 while (rxbv) {
711 *rx8++ = (data >> shift) & 0xFF;
712 rxbv--;
713 shift -= 8;
714 hw->rx_cnt++;
715 hw->rx++;
716 }
717
718 fifo_fill--;
719 }
720 }
721
722 static void ltq_spi_rxreq_set(struct ltq_spi *hw)
723 {
724 u32 rxreq, rxreq_max, rxtodo;
725
726 rxtodo = ltq_spi_reg_read(hw, LTQ_SPI_RXCNT) & LTQ_SPI_RXCNT_TODO_MASK;
727
728 /*
729 * In RX-only mode the serial clock is activated only after writing
730 * the expected amount of RX bytes into RXREQ register.
731 * To avoid receive overflows at high clocks it is better to request
732 * only the amount of bytes that fits into all FIFOs. This value
733 * depends on the FIFO size implemented in hardware.
734 */
735 rxreq = hw->len - hw->rx_cnt;
736 rxreq_max = hw->rxfs << 2;
737 rxreq = min(rxreq_max, rxreq);
738
739 if (!rxtodo && rxreq)
740 ltq_spi_reg_write(hw, rxreq, LTQ_SPI_RXREQ);
741 }
742
743 static inline void ltq_spi_complete(struct ltq_spi *hw)
744 {
745 complete(&hw->done);
746 }
747
748 irqreturn_t ltq_spi_tx_irq(int irq, void *data)
749 {
750 struct ltq_spi *hw = data;
751 unsigned long flags;
752 int completed = 0;
753
754 spin_lock_irqsave(&hw->lock, flags);
755
756 if (hw->tx_cnt < hw->len)
757 ltq_spi_txfifo_write(hw);
758
759 if (hw->tx_cnt == hw->len)
760 completed = 1;
761
762 spin_unlock_irqrestore(&hw->lock, flags);
763
764 if (completed)
765 ltq_spi_complete(hw);
766
767 return IRQ_HANDLED;
768 }
769
770 irqreturn_t ltq_spi_rx_irq(int irq, void *data)
771 {
772 struct ltq_spi *hw = data;
773 unsigned long flags;
774 int completed = 0;
775
776 spin_lock_irqsave(&hw->lock, flags);
777
778 if (hw->rx_cnt < hw->len) {
779 ltq_spi_rxfifo_read(hw);
780
781 if (hw->tx && hw->tx_cnt < hw->len)
782 ltq_spi_txfifo_write(hw);
783 }
784
785 if (hw->rx_cnt == hw->len)
786 completed = 1;
787 else if (!hw->tx)
788 ltq_spi_rxreq_set(hw);
789
790 spin_unlock_irqrestore(&hw->lock, flags);
791
792 if (completed)
793 ltq_spi_complete(hw);
794
795 return IRQ_HANDLED;
796 }
797
798 irqreturn_t ltq_spi_err_irq(int irq, void *data)
799 {
800 struct ltq_spi *hw = data;
801 unsigned long flags;
802
803 spin_lock_irqsave(&hw->lock, flags);
804
805 /* Disable all interrupts */
806 ltq_spi_reg_clearbit(hw, LTQ_SPI_IRNEN_ALL, LTQ_SPI_IRNEN);
807
808 /* Clear all error flags */
809 ltq_spi_reg_write(hw, LTQ_SPI_WHBSTATE_CLR_ERRORS, LTQ_SPI_WHBSTATE);
810
811 /* Flush FIFOs */
812 ltq_spi_reg_setbit(hw, LTQ_SPI_RXFCON_RXFLU, LTQ_SPI_RXFCON);
813 ltq_spi_reg_setbit(hw, LTQ_SPI_TXFCON_TXFLU, LTQ_SPI_TXFCON);
814
815 hw->status = -EIO;
816 spin_unlock_irqrestore(&hw->lock, flags);
817
818 ltq_spi_complete(hw);
819
820 return IRQ_HANDLED;
821 }
822
823 static int ltq_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
824 {
825 struct ltq_spi *hw = ltq_spi_to_hw(spi);
826 u32 irq_flags = 0;
827
828 hw->tx = t->tx_buf;
829 hw->rx = t->rx_buf;
830 hw->len = t->len;
831 hw->tx_cnt = 0;
832 hw->rx_cnt = 0;
833 hw->status = 0;
834 INIT_COMPLETION(hw->done);
835
836 ltq_spi_xmit_set(hw, t);
837
838 /* Enable error interrupts */
839 ltq_spi_reg_setbit(hw, LTQ_SPI_IRNEN_E, LTQ_SPI_IRNEN);
840
841 if (hw->tx) {
842 /* Initially fill TX FIFO with as much data as possible */
843 ltq_spi_txfifo_write(hw);
844 irq_flags |= LTQ_SPI_IRNEN_T;
845
846 /* Always enable RX interrupt in Full Duplex mode */
847 if (hw->rx)
848 irq_flags |= LTQ_SPI_IRNEN_R;
849 } else if (hw->rx) {
850 /* Start RX clock */
851 ltq_spi_rxreq_set(hw);
852
853 /* Enable RX interrupt to receive data from RX FIFOs */
854 irq_flags |= LTQ_SPI_IRNEN_R;
855 }
856
857 /* Enable TX or RX interrupts */
858 ltq_spi_reg_setbit(hw, irq_flags, LTQ_SPI_IRNEN);
859 wait_for_completion_interruptible(&hw->done);
860
861 /* Disable all interrupts */
862 ltq_spi_reg_clearbit(hw, LTQ_SPI_IRNEN_ALL, LTQ_SPI_IRNEN);
863
864 /*
865 * Return length of current transfer for bitbang utility code if
866 * no errors occured during transmission.
867 */
868 if (!hw->status)
869 hw->status = hw->len;
870
871 return hw->status;
872 }
873
874 static const struct ltq_spi_irq_map ltq_spi_irqs[] = {
875 { "spi_tx", ltq_spi_tx_irq },
876 { "spi_rx", ltq_spi_rx_irq },
877 { "spi_err", ltq_spi_err_irq },
878 };
879
880 static int __devinit
881 ltq_spi_probe(struct platform_device *pdev)
882 {
883 struct spi_master *master;
884 struct resource *r;
885 struct ltq_spi *hw;
886 struct ltq_spi_platform_data *pdata = pdev->dev.platform_data;
887 int ret, i;
888 u32 data, id;
889
890 master = spi_alloc_master(&pdev->dev, sizeof(struct ltq_spi));
891 if (!master) {
892 dev_err(&pdev->dev, "spi_alloc_master\n");
893 ret = -ENOMEM;
894 goto err;
895 }
896
897 hw = spi_master_get_devdata(master);
898
899 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
900 if (r == NULL) {
901 dev_err(&pdev->dev, "platform_get_resource\n");
902 ret = -ENOENT;
903 goto err_master;
904 }
905
906 r = devm_request_mem_region(&pdev->dev, r->start, resource_size(r),
907 pdev->name);
908 if (!r) {
909 dev_err(&pdev->dev, "devm_request_mem_region\n");
910 ret = -ENXIO;
911 goto err_master;
912 }
913
914 hw->base = devm_ioremap_nocache(&pdev->dev, r->start, resource_size(r));
915 if (!hw->base) {
916 dev_err(&pdev->dev, "devm_ioremap_nocache\n");
917 ret = -ENXIO;
918 goto err_master;
919 }
920
921 hw->fpiclk = clk_get_fpi();
922 if (IS_ERR(hw->fpiclk)) {
923 dev_err(&pdev->dev, "fpi clk\n");
924 ret = PTR_ERR(hw->fpiclk);
925 goto err_master;
926 }
927
928 hw->spiclk = clk_get(&pdev->dev, NULL);
929 if (IS_ERR(hw->spiclk)) {
930 dev_err(&pdev->dev, "spi clk\n");
931 ret = PTR_ERR(hw->spiclk);
932 goto err_master;
933 }
934
935 memset(hw->irq, 0, sizeof(hw->irq));
936 for (i = 0; i < ARRAY_SIZE(ltq_spi_irqs); i++) {
937 ret = platform_get_irq_byname(pdev, ltq_spi_irqs[i].name);
938 if (0 > ret) {
939 dev_err(&pdev->dev, "platform_get_irq_byname\n");
940 goto err_irq;
941 }
942
943 hw->irq[i] = ret;
944 ret = request_irq(hw->irq[i], ltq_spi_irqs[i].handler,
945 0, ltq_spi_irqs[i].name, hw);
946 if (ret) {
947 dev_err(&pdev->dev, "request_irq\n");
948 goto err_irq;
949 }
950 }
951
952 hw->bitbang.master = spi_master_get(master);
953 hw->bitbang.chipselect = ltq_spi_chipselect;
954 hw->bitbang.setup_transfer = ltq_spi_setup_transfer;
955 hw->bitbang.txrx_bufs = ltq_spi_txrx_bufs;
956
957 master->bus_num = pdev->id;
958 master->num_chipselect = pdata->num_chipselect;
959 master->setup = ltq_spi_setup;
960 master->cleanup = ltq_spi_cleanup;
961
962 hw->dev = &pdev->dev;
963 init_completion(&hw->done);
964 spin_lock_init(&hw->lock);
965
966 /* Set GPIO alternate functions to SPI */
967 ltq_gpio_request(&pdev->dev, LTQ_SPI_GPIO_DI, 2, 0, "spi-di");
968 ltq_gpio_request(&pdev->dev, LTQ_SPI_GPIO_DO, 2, 1, "spi-do");
969 ltq_gpio_request(&pdev->dev, LTQ_SPI_GPIO_CLK, 2, 1, "spi-clk");
970
971 ltq_spi_hw_enable(hw);
972
973 /* Read module capabilities */
974 id = ltq_spi_reg_read(hw, LTQ_SPI_ID);
975 hw->txfs = (id >> LTQ_SPI_ID_TXFS_SHIFT) & LTQ_SPI_ID_TXFS_MASK;
976 hw->rxfs = (id >> LTQ_SPI_ID_TXFS_SHIFT) & LTQ_SPI_ID_TXFS_MASK;
977 hw->dma_support = (id & LTQ_SPI_ID_CFG) ? 1 : 0;
978
979 ltq_spi_config_mode_set(hw);
980
981 /* Enable error checking, disable TX/RX, set idle value high */
982 data = LTQ_SPI_CON_RUEN | LTQ_SPI_CON_AEN |
983 LTQ_SPI_CON_TEN | LTQ_SPI_CON_REN |
984 LTQ_SPI_CON_TXOFF | LTQ_SPI_CON_RXOFF | LTQ_SPI_CON_IDLE;
985 ltq_spi_reg_write(hw, data, LTQ_SPI_CON);
986
987 /* Enable master mode and clear error flags */
988 ltq_spi_reg_write(hw, LTQ_SPI_WHBSTATE_SETMS |
989 LTQ_SPI_WHBSTATE_CLR_ERRORS, LTQ_SPI_WHBSTATE);
990
991 /* Reset GPIO/CS registers */
992 ltq_spi_reg_write(hw, 0x0, LTQ_SPI_GPOCON);
993 ltq_spi_reg_write(hw, 0xFF00, LTQ_SPI_FGPO);
994
995 /* Enable and flush FIFOs */
996 ltq_spi_reset_fifos(hw);
997
998 ret = spi_bitbang_start(&hw->bitbang);
999 if (ret) {
1000 dev_err(&pdev->dev, "spi_bitbang_start\n");
1001 goto err_bitbang;
1002 }
1003
1004 platform_set_drvdata(pdev, hw);
1005
1006 pr_info("Lantiq SoC SPI controller rev %u (TXFS %u, RXFS %u, DMA %u)\n",
1007 id & LTQ_SPI_ID_REV_MASK, hw->txfs, hw->rxfs, hw->dma_support);
1008
1009 return 0;
1010
1011 err_bitbang:
1012 ltq_spi_hw_disable(hw);
1013
1014 err_irq:
1015 clk_put(hw->fpiclk);
1016
1017 for (; i > 0; i--)
1018 free_irq(hw->irq[i], hw);
1019
1020 err_master:
1021 spi_master_put(master);
1022
1023 err:
1024 return ret;
1025 }
1026
1027 static int __devexit
1028 ltq_spi_remove(struct platform_device *pdev)
1029 {
1030 struct ltq_spi *hw = platform_get_drvdata(pdev);
1031 int ret, i;
1032
1033 ret = spi_bitbang_stop(&hw->bitbang);
1034 if (ret)
1035 return ret;
1036
1037 platform_set_drvdata(pdev, NULL);
1038
1039 ltq_spi_config_mode_set(hw);
1040 ltq_spi_hw_disable(hw);
1041
1042 for (i = 0; i < ARRAY_SIZE(hw->irq); i++)
1043 if (0 < hw->irq[i])
1044 free_irq(hw->irq[i], hw);
1045
1046 gpio_free(LTQ_SPI_GPIO_DI);
1047 gpio_free(LTQ_SPI_GPIO_DO);
1048 gpio_free(LTQ_SPI_GPIO_CLK);
1049
1050 clk_put(hw->fpiclk);
1051 spi_master_put(hw->bitbang.master);
1052
1053 return 0;
1054 }
1055
1056 static struct platform_driver ltq_spi_driver = {
1057 .probe = ltq_spi_probe,
1058 .remove = __devexit_p(ltq_spi_remove),
1059 .driver = {
1060 .name = "ltq_spi",
1061 .owner = THIS_MODULE,
1062 },
1063 };
1064
1065 module_platform_driver(ltq_spi_driver);
1066
1067 MODULE_DESCRIPTION("Lantiq SoC SPI controller driver");
1068 MODULE_AUTHOR("Daniel Schwierzeck <daniel.schwierzeck@googlemail.com>");
1069 MODULE_LICENSE("GPL");
1070 MODULE_ALIAS("platform:ltq-spi");