ae25c20b3a727d2426da0f4227ff8de408181fa4
[openwrt/svn-archive/archive.git] / target / linux / lantiq / files-3.3 / drivers / spi / spi_svip.c
1 /************************************************************************
2 *
3 * Copyright (c) 2008
4 * Infineon Technologies AG
5 * St. Martin Strasse 53; 81669 Muenchen; Germany
6 *
7 * Inspired by Atmel AT32/AT91 SPI Controller driver
8 * Copyright (c) 2006 Atmel Corporation
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 *
15 ************************************************************************/
16 #include <linux/kernel.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/delay.h>
20 #include <linux/interrupt.h>
21 #include <linux/slab.h>
22 #include <linux/platform_device.h>
23 #include <linux/spi/spi.h>
24
25 #include <asm/io.h>
26
27 #include <status_reg.h>
28 #include <base_reg.h>
29 #include <ssc_reg.h>
30 #include <sys0_reg.h>
31 #include <sys1_reg.h>
32
33 #define SFRAME_SIZE 512 /* bytes */
34 #define FIFO_HEADROOM 2 /* words */
35
36 #define SVIP_SSC_RFIFO_WORDS 8
37
38 enum svip_ssc_dir {
39 SSC_RXTX,
40 SSC_RX,
41 SSC_TX,
42 SSC_UNDEF
43 };
44
45 /*
46 * The core SPI transfer engine just talks to a register bank to set up
47 * DMA transfers; transfer queue progress is driven by IRQs. The clock
48 * framework provides the base clock, subdivided for each spi_device.
49 */
50 struct svip_ssc_device {
51 struct svip_reg_ssc *regs;
52 enum svip_ssc_dir bus_dir;
53 struct spi_device *stay;
54
55 u8 stopping;
56 struct list_head queue;
57 struct spi_transfer *current_transfer;
58 int remaining_bytes;
59 int rx_bytes;
60 int tx_bytes;
61
62 char intname[4][16];
63
64 spinlock_t lock;
65 };
66
67 static int svip_ssc_setup(struct spi_device *spi);
68
69 extern unsigned int ltq_get_fbs0_hz(void);
70
71 static void cs_activate(struct svip_ssc_device *ssc_dev, struct spi_device *spi)
72 {
73 ssc_dev->regs->whbgpostat = 0x0001 << spi->chip_select; /* activate the chip select */
74 }
75
76 static void cs_deactivate(struct svip_ssc_device *ssc_dev, struct spi_device *spi)
77 {
78 ssc_dev->regs->whbgpostat = 0x0100 << spi->chip_select; /* deactivate the chip select */
79 }
80
81 /*
82 * "Normally" returns Byte Valid = 4.
83 * If the unaligned remainder of the packet is 3 bytes, these have to be
84 * transferred as a combination of a 16-bit and a 8-bit FPI transfer. For
85 * 2 or 1 remaining bytes a single 16-bit or 8-bit transfer will do.
86 */
87 static int inline _estimate_bv(int byte_pos, int bytelen)
88 {
89 int remainder = bytelen % 4;
90
91 if (byte_pos < (bytelen - remainder))
92 return 4;
93
94 if (remainder == 3)
95 {
96 if (byte_pos == (bytelen - remainder))
97 return 2;
98 else
99 return 1;
100 }
101 return remainder;
102 }
103
104 /*
105 * Submit next transfer.
106 * lock is held, spi irq is blocked
107 */
108 static void svip_ssc_next_xfer(struct spi_master *master,
109 struct spi_message *msg)
110 {
111 struct svip_ssc_device *ssc_dev = spi_master_get_devdata(master);
112 struct spi_transfer *xfer;
113 unsigned char *buf_ptr;
114
115 xfer = ssc_dev->current_transfer;
116 if (!xfer || ssc_dev->remaining_bytes == 0) {
117 if (xfer)
118 xfer = list_entry(xfer->transfer_list.next,
119 struct spi_transfer, transfer_list);
120 else
121 xfer = list_entry(msg->transfers.next,
122 struct spi_transfer, transfer_list);
123 ssc_dev->remaining_bytes = xfer->len;
124 ssc_dev->rx_bytes = 0;
125 ssc_dev->tx_bytes = 0;
126 ssc_dev->current_transfer = xfer;
127 ssc_dev->regs->sfcon = 0; /* reset Serial Framing */
128
129 /* enable and flush RX/TX FIFO */
130 ssc_dev->regs->rxfcon =
131 SSC_RXFCON_RXFITL_VAL(SVIP_SSC_RFIFO_WORDS-FIFO_HEADROOM) |
132 SSC_RXFCON_RXFLU | /* Receive FIFO Flush */
133 SSC_RXFCON_RXFEN; /* Receive FIFO Enable */
134
135 ssc_dev->regs->txfcon =
136 SSC_TXFCON_TXFITL_VAL(FIFO_HEADROOM) |
137 SSC_TXFCON_TXFLU | /* Transmit FIFO Flush */
138 SSC_TXFCON_TXFEN; /* Transmit FIFO Enable */
139
140 asm("sync");
141
142 /* select mode RXTX, RX or TX */
143 if (xfer->rx_buf && xfer->tx_buf) /* RX and TX */
144 {
145 if (ssc_dev->bus_dir != SSC_RXTX)
146 {
147 ssc_dev->regs->mcon &= ~(SSC_MCON_RXOFF | SSC_MCON_TXOFF);
148 ssc_dev->bus_dir = SSC_RXTX;
149 ssc_dev->regs->irnen = SSC_IRNEN_T | SSC_IRNEN_F | SSC_IRNEN_E;
150 }
151 ssc_dev->regs->sfcon =
152 SSC_SFCON_PLEN_VAL(0) |
153 SSC_SFCON_DLEN_VAL(((xfer->len-1)%SFRAME_SIZE)*8+7) |
154 SSC_SFCON_STOP |
155 SSC_SFCON_ICLK_VAL(2) |
156 SSC_SFCON_IDAT_VAL(2) |
157 SSC_SFCON_IAEN |
158 SSC_SFCON_SFEN;
159
160 }
161 else if (xfer->rx_buf) /* RX only */
162 {
163 if (ssc_dev->bus_dir != SSC_RX)
164 {
165 ssc_dev->regs->mcon =
166 (ssc_dev->regs->mcon | SSC_MCON_TXOFF) & ~SSC_MCON_RXOFF;
167
168 ssc_dev->bus_dir = SSC_RX;
169
170 ssc_dev->regs->irnen = SSC_IRNEN_R | SSC_IRNEN_E;
171 }
172 /* Initiate clock generation for Rx-Only Transfer. In case of RX-only transfer,
173 * rx_bytes represents the number of already requested bytes.
174 */
175 ssc_dev->rx_bytes = min(xfer->len, (unsigned)(SVIP_SSC_RFIFO_WORDS*4));
176 ssc_dev->regs->rxreq = ssc_dev->rx_bytes;
177 }
178 else /* TX only */
179 {
180 if (ssc_dev->bus_dir != SSC_TX)
181 {
182 ssc_dev->regs->mcon =
183 (ssc_dev->regs->mcon | SSC_MCON_RXOFF) & ~SSC_MCON_TXOFF;
184
185 ssc_dev->bus_dir = SSC_TX;
186
187 ssc_dev->regs->irnen =
188 SSC_IRNEN_T | SSC_IRNEN_F | SSC_IRNEN_E;
189 }
190 ssc_dev->regs->sfcon =
191 SSC_SFCON_PLEN_VAL(0) |
192 SSC_SFCON_DLEN_VAL(((xfer->len-1)%SFRAME_SIZE)*8+7) |
193 SSC_SFCON_STOP |
194 SSC_SFCON_ICLK_VAL(2) |
195 SSC_SFCON_IDAT_VAL(2) |
196 SSC_SFCON_IAEN |
197 SSC_SFCON_SFEN;
198 }
199 }
200
201 if (xfer->tx_buf)
202 {
203 int outstanding;
204 int i;
205 int fstat = ssc_dev->regs->fstat;
206 int txffl = SSC_FSTAT_TXFFL_GET(fstat);
207 int rxffl = SSC_FSTAT_RXFFL_GET(fstat);
208
209 outstanding = txffl;
210
211 if (xfer->rx_buf)
212 {
213 outstanding += rxffl;
214 if (SSC_STATE_BSY_GET(ssc_dev->regs->state))
215 outstanding++;
216
217 while (rxffl) /* is 0 in TX-Only mode */
218 {
219 unsigned int rb;
220 int rxbv = _estimate_bv(ssc_dev->rx_bytes, xfer->len);
221 rb = ssc_dev->regs->rb;
222 for (i=0; i<rxbv; i++)
223 {
224 ((unsigned char*)xfer->rx_buf)[ssc_dev->rx_bytes] =
225 (rb >> ((rxbv-i-1)*8)) & 0xFF;
226
227 ssc_dev->rx_bytes++;
228 }
229 rxffl--;
230 outstanding--;
231 }
232 ssc_dev->remaining_bytes = xfer->len - ssc_dev->rx_bytes;
233 }
234
235 /* for last Tx cycle set TxFifo threshold to 0 */
236 if ((xfer->len - ssc_dev->tx_bytes) <=
237 (4*(SVIP_SSC_RFIFO_WORDS-1-outstanding)))
238 {
239 ssc_dev->regs->txfcon = SSC_TXFCON_TXFITL_VAL(0) |
240 SSC_TXFCON_TXFEN;
241 }
242
243 while ((ssc_dev->tx_bytes < xfer->len) &&
244 (outstanding < (SVIP_SSC_RFIFO_WORDS-1)))
245 {
246 unsigned int tb = 0;
247 int txbv = _estimate_bv(ssc_dev->tx_bytes, xfer->len);
248
249 for (i=0; i<txbv; i++)
250 {
251 tb |= ((unsigned char*)xfer->tx_buf)[ssc_dev->tx_bytes] <<
252 ((txbv-i-1)*8);
253
254 ssc_dev->tx_bytes++;
255 }
256 switch(txbv)
257 {
258 #ifdef __BIG_ENDIAN
259 case 1:
260 *((unsigned char *)(&(ssc_dev->regs->tb))+3) = tb & 0xFF;
261 break;
262 case 2:
263 *((unsigned short *)(&(ssc_dev->regs->tb))+1) = tb & 0xFFFF;
264 break;
265 #else /* __LITTLE_ENDIAN */
266 case 1:
267 *((unsigned char *)(&(ssc_dev->regs->tb))) = tb & 0xFF;
268 break;
269 case 2:
270 *((unsigned short *)(&(ssc_dev->regs->tb))) = tb & 0xFFFF;
271 break;
272 #endif
273 default:
274 ssc_dev->regs->tb = tb;
275 }
276 outstanding++;
277 }
278 }
279 else /* xfer->tx_buf == NULL -> RX only! */
280 {
281 int j;
282 int rxffl = SSC_FSTAT_RXFFL_GET(ssc_dev->regs->fstat);
283 int rxbv = 0;
284 unsigned int rbuf;
285
286 buf_ptr = (unsigned char*)xfer->rx_buf +
287 (xfer->len - ssc_dev->remaining_bytes);
288
289 for (j = 0; j < rxffl; j++)
290 {
291 rxbv = SSC_STATE_RXBV_GET(ssc_dev->regs->state);
292 rbuf = ssc_dev->regs->rb;
293
294 if (rxbv == 4)
295 {
296 *((unsigned int*)buf_ptr+j) = ntohl(rbuf);
297 }
298 else
299 {
300 int b;
301 #ifdef __BIG_ENDIAN
302 for (b = 0; b < rxbv; b++)
303 {
304 buf_ptr[4*j+b] = ((unsigned char*)(&rbuf))[4-rxbv+b];
305 }
306 #else /* __LITTLE_ENDIAN */
307 for (b = 0; b < rxbv; b++)
308 {
309 buf_ptr[4*j+b] = ((unsigned char*)(&rbuf))[rxbv-1-b];
310 }
311 #endif
312 }
313 ssc_dev->remaining_bytes -= rxbv;
314 }
315 if ((ssc_dev->rx_bytes < xfer->len) &&
316 !SSC_STATE_BSY_GET(ssc_dev->regs->state))
317 {
318 int rxreq = min(xfer->len - ssc_dev->rx_bytes,
319 (unsigned)(SVIP_SSC_RFIFO_WORDS*4));
320
321 ssc_dev->rx_bytes += rxreq;
322 ssc_dev->regs->rxreq = rxreq;
323 }
324
325 if (ssc_dev->remaining_bytes < 0)
326 {
327 printk("ssc_dev->remaining_bytes = %d! xfer->len = %d, "
328 "rxffl=%d, rxbv=%d\n", ssc_dev->remaining_bytes, xfer->len,
329 rxffl, rxbv);
330
331 ssc_dev->remaining_bytes = 0;
332 }
333 }
334 }
335
336 /*
337 * Submit next message.
338 * lock is held
339 */
340 static void svip_ssc_next_message(struct spi_master *master)
341 {
342 struct svip_ssc_device *ssc_dev = spi_master_get_devdata(master);
343 struct spi_message *msg;
344 struct spi_device *spi;
345
346 BUG_ON(ssc_dev->current_transfer);
347
348 msg = list_entry(ssc_dev->queue.next, struct spi_message, queue);
349 spi = msg->spi;
350
351 dev_dbg(master->dev.parent, "start message %p on %p\n", msg, spi);
352
353 /* select chip if it's not still active */
354 if (ssc_dev->stay) {
355 if (ssc_dev->stay != spi) {
356 cs_deactivate(ssc_dev, ssc_dev->stay);
357 svip_ssc_setup(spi);
358 cs_activate(ssc_dev, spi);
359 }
360 ssc_dev->stay = NULL;
361 }
362 else {
363 svip_ssc_setup(spi);
364 cs_activate(ssc_dev, spi);
365 }
366
367 svip_ssc_next_xfer(master, msg);
368 }
369
370 /*
371 * Report message completion.
372 * lock is held
373 */
374 static void
375 svip_ssc_msg_done(struct spi_master *master, struct svip_ssc_device *ssc_dev,
376 struct spi_message *msg, int status, int stay)
377 {
378 if (!stay || status < 0)
379 cs_deactivate(ssc_dev, msg->spi);
380 else
381 ssc_dev->stay = msg->spi;
382
383 list_del(&msg->queue);
384 msg->status = status;
385
386 dev_dbg(master->dev.parent,
387 "xfer complete: %u bytes transferred\n",
388 msg->actual_length);
389
390 spin_unlock(&ssc_dev->lock);
391 msg->complete(msg->context);
392 spin_lock(&ssc_dev->lock);
393
394 ssc_dev->current_transfer = NULL;
395
396 /* continue if needed */
397 if (list_empty(&ssc_dev->queue) || ssc_dev->stopping)
398 ; /* TODO: disable hardware */
399 else
400 svip_ssc_next_message(master);
401 }
402
403 static irqreturn_t svip_ssc_eir_handler(int irq, void *dev_id)
404 {
405 struct platform_device *pdev = (struct platform_device*)dev_id;
406 struct spi_master *master = platform_get_drvdata(pdev);
407 struct svip_ssc_device *ssc_dev = spi_master_get_devdata(master);
408
409 dev_err (&pdev->dev, "ERROR: errirq. STATE = 0x%0lx\n",
410 ssc_dev->regs->state);
411 return IRQ_HANDLED;
412 }
413
414 static irqreturn_t svip_ssc_rir_handler(int irq, void *dev_id)
415 {
416 struct platform_device *pdev = (struct platform_device*)dev_id;
417 struct spi_master *master = platform_get_drvdata(pdev);
418 struct svip_ssc_device *ssc_dev = spi_master_get_devdata(master);
419 struct spi_message *msg;
420 struct spi_transfer *xfer;
421
422 xfer = ssc_dev->current_transfer;
423 msg = list_entry(ssc_dev->queue.next, struct spi_message, queue);
424
425 /* Tx and Rx Interrupts are fairly unpredictable. Just leave interrupt
426 * handler for spurious Interrupts!
427 */
428 if (!xfer) {
429 dev_dbg(master->dev.parent,
430 "%s(%d): xfer = NULL\n", __FUNCTION__, irq);
431 goto out;
432 }
433 if ( !(xfer->rx_buf) ) {
434 dev_dbg(master->dev.parent,
435 "%s(%d): xfer->rx_buf = NULL\n", __FUNCTION__, irq);
436 goto out;
437 }
438 if (ssc_dev->remaining_bytes > 0)
439 {
440 /*
441 * Keep going, we still have data to send in
442 * the current transfer.
443 */
444 svip_ssc_next_xfer(master, msg);
445 }
446
447 if (ssc_dev->remaining_bytes == 0)
448 {
449 msg->actual_length += xfer->len;
450
451 if (msg->transfers.prev == &xfer->transfer_list) {
452 /* report completed message */
453 svip_ssc_msg_done(master, ssc_dev, msg, 0,
454 xfer->cs_change);
455 }
456 else {
457 if (xfer->cs_change) {
458 cs_deactivate(ssc_dev, msg->spi);
459 udelay(1); /* not nice in interrupt context */
460 cs_activate(ssc_dev, msg->spi);
461 }
462
463 /* Not done yet. Submit the next transfer. */
464 svip_ssc_next_xfer(master, msg);
465 }
466 }
467 out:
468 return IRQ_HANDLED;
469 }
470
471 static irqreturn_t svip_ssc_tir_handler(int irq, void *dev_id)
472 {
473 struct platform_device *pdev = (struct platform_device*)dev_id;
474 struct spi_master *master = platform_get_drvdata(pdev);
475 struct svip_ssc_device *ssc_dev = spi_master_get_devdata(master);
476 struct spi_message *msg;
477 struct spi_transfer *xfer;
478 int tx_remain;
479
480 xfer = ssc_dev->current_transfer;
481 msg = list_entry(ssc_dev->queue.next, struct spi_message, queue);
482
483 /* Tx and Rx Interrupts are fairly unpredictable. Just leave interrupt
484 * handler for spurious Interrupts!
485 */
486 if (!xfer) {
487 dev_dbg(master->dev.parent,
488 "%s(%d): xfer = NULL\n", __FUNCTION__, irq);
489 goto out;
490 }
491 if ( !(xfer->tx_buf) ) {
492 dev_dbg(master->dev.parent,
493 "%s(%d): xfer->tx_buf = NULL\n", __FUNCTION__, irq);
494 goto out;
495 }
496
497 if (ssc_dev->remaining_bytes > 0)
498 {
499 tx_remain = xfer->len - ssc_dev->tx_bytes;
500 if ( tx_remain == 0 )
501 {
502 dev_dbg(master->dev.parent,
503 "%s(%d): tx_remain = 0\n", __FUNCTION__, irq);
504 }
505 else
506 /*
507 * Keep going, we still have data to send in
508 * the current transfer.
509 */
510 svip_ssc_next_xfer(master, msg);
511 }
512 out:
513 return IRQ_HANDLED;
514 }
515
516 static irqreturn_t svip_ssc_fir_handler(int irq, void *dev_id)
517 {
518 struct platform_device *pdev = (struct platform_device*)dev_id;
519 struct spi_master *master = platform_get_drvdata(pdev);
520 struct svip_ssc_device *ssc_dev = spi_master_get_devdata(master);
521 struct spi_message *msg;
522 struct spi_transfer *xfer;
523
524 xfer = ssc_dev->current_transfer;
525 msg = list_entry(ssc_dev->queue.next, struct spi_message, queue);
526
527 /* Tx and Rx Interrupts are fairly unpredictable. Just leave interrupt
528 * handler for spurious Interrupts!
529 */
530 if (!xfer) {
531 dev_dbg(master->dev.parent,
532 "%s(%d): xfer = NULL\n", __FUNCTION__, irq);
533 goto out;
534 }
535 if ( !(xfer->tx_buf) ) {
536 dev_dbg(master->dev.parent,
537 "%s(%d): xfer->tx_buf = NULL\n", __FUNCTION__, irq);
538 goto out;
539 }
540
541 if (ssc_dev->remaining_bytes > 0)
542 {
543 int tx_remain = xfer->len - ssc_dev->tx_bytes;
544
545 if (tx_remain == 0)
546 {
547 /* Frame interrupt gets raised _before_ last Rx interrupt */
548 if (xfer->rx_buf)
549 {
550 svip_ssc_next_xfer(master, msg);
551 if (ssc_dev->remaining_bytes)
552 printk("expected RXTX transfer to be complete!\n");
553 }
554 ssc_dev->remaining_bytes = 0;
555 }
556 else
557 {
558 ssc_dev->regs->sfcon = SSC_SFCON_PLEN_VAL(0) |
559 SSC_SFCON_DLEN_VAL(SFRAME_SIZE*8-1) |
560 SSC_SFCON_STOP |
561 SSC_SFCON_ICLK_VAL(2) |
562 SSC_SFCON_IDAT_VAL(2) |
563 SSC_SFCON_IAEN |
564 SSC_SFCON_SFEN;
565 }
566 }
567
568 if (ssc_dev->remaining_bytes == 0)
569 {
570 msg->actual_length += xfer->len;
571
572 if (msg->transfers.prev == &xfer->transfer_list) {
573 /* report completed message */
574 svip_ssc_msg_done(master, ssc_dev, msg, 0,
575 xfer->cs_change);
576 }
577 else {
578 if (xfer->cs_change) {
579 cs_deactivate(ssc_dev, msg->spi);
580 udelay(1); /* not nice in interrupt context */
581 cs_activate(ssc_dev, msg->spi);
582 }
583
584 /* Not done yet. Submit the next transfer. */
585 svip_ssc_next_xfer(master, msg);
586 }
587 }
588
589 out:
590 return IRQ_HANDLED;
591 }
592
593 /* the spi->mode bits understood by this driver: */
594 #define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_LOOP)
595
596 static int svip_ssc_setup(struct spi_device *spi)
597 {
598 struct spi_master *master = spi->master;
599 struct svip_ssc_device *ssc_dev = spi_master_get_devdata(master);
600 unsigned int bits = spi->bits_per_word;
601 unsigned int br, sck_hz = spi->max_speed_hz;
602 unsigned long flags;
603
604 if (ssc_dev->stopping)
605 return -ESHUTDOWN;
606
607 if (spi->chip_select >= master->num_chipselect) {
608 dev_dbg(&spi->dev,
609 "setup: invalid chipselect %u (%u defined)\n",
610 spi->chip_select, master->num_chipselect);
611 return -EINVAL;
612 }
613
614 if (bits == 0)
615 bits = 8;
616 if (bits != 8) {
617 dev_dbg(&spi->dev,
618 "setup: invalid bits_per_word %u (expect 8)\n",
619 bits);
620 return -EINVAL;
621 }
622
623 if (spi->mode & ~MODEBITS) {
624 dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
625 spi->mode & ~MODEBITS);
626 return -EINVAL;
627 }
628
629 /* Disable SSC */
630 ssc_dev->regs->whbstate = SSC_WHBSTATE_CLREN;
631
632 if (sck_hz == 0)
633 sck_hz = 10000;
634
635 br = ltq_get_fbs0_hz()/(2 *sck_hz);
636 if (ltq_get_fbs0_hz()%(2 *sck_hz) == 0)
637 br = br -1;
638 ssc_dev->regs->br = br;
639
640 /* set Control Register */
641 ssc_dev->regs->mcon = SSC_MCON_ENBV |
642 SSC_MCON_RUEN |
643 SSC_MCON_TUEN |
644 SSC_MCON_AEN |
645 SSC_MCON_REN |
646 SSC_MCON_TEN |
647 (spi->mode & SPI_CPOL ? SSC_MCON_PO : 0) | /* Clock Polarity */
648 (spi->mode & SPI_CPHA ? 0 : SSC_MCON_PH) | /* Tx on trailing edge */
649 (spi->mode & SPI_LOOP ? SSC_MCON_LB : 0) | /* Loopback */
650 (spi->mode & SPI_LSB_FIRST ? 0 : SSC_MCON_HB); /* MSB first */
651 ssc_dev->bus_dir = SSC_UNDEF;
652
653 /* Enable SSC */
654 ssc_dev->regs->whbstate = SSC_WHBSTATE_SETEN;
655 asm("sync");
656
657 spin_lock_irqsave(&ssc_dev->lock, flags);
658 if (ssc_dev->stay == spi)
659 ssc_dev->stay = NULL;
660 cs_deactivate(ssc_dev, spi);
661 spin_unlock_irqrestore(&ssc_dev->lock, flags);
662
663 dev_dbg(&spi->dev,
664 "setup: %u Hz bpw %u mode 0x%02x cs %u\n",
665 sck_hz, bits, spi->mode, spi->chip_select);
666
667 return 0;
668 }
669
670 static int svip_ssc_transfer(struct spi_device *spi, struct spi_message *msg)
671 {
672 struct spi_master *master = spi->master;
673 struct svip_ssc_device *ssc_dev = spi_master_get_devdata(master);
674 struct spi_transfer *xfer;
675 unsigned long flags;
676
677 dev_dbg(&spi->dev, "new message %p submitted\n", msg);
678
679 if (unlikely(list_empty(&msg->transfers)
680 || !spi->max_speed_hz)) {
681 return -EINVAL;
682 }
683
684 if (ssc_dev->stopping)
685 return -ESHUTDOWN;
686
687 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
688 if (!(xfer->tx_buf || xfer->rx_buf) || (xfer->len == 0)) {
689 dev_dbg(&spi->dev, "missing rx or tx buf\n");
690 return -EINVAL;
691 }
692
693 /* FIXME implement these protocol options!! */
694 if (xfer->bits_per_word || xfer->speed_hz) {
695 dev_dbg(&spi->dev, "no protocol options yet\n");
696 return -ENOPROTOOPT;
697 }
698
699 #ifdef VERBOSE
700 dev_dbg(spi->dev,
701 " xfer %p: len %u tx %p/%08x rx %p/%08x\n",
702 xfer, xfer->len,
703 xfer->tx_buf, xfer->tx_dma,
704 xfer->rx_buf, xfer->rx_dma);
705 #endif
706 }
707
708 msg->status = -EINPROGRESS;
709 msg->actual_length = 0;
710
711 spin_lock_irqsave(&ssc_dev->lock, flags);
712 list_add_tail(&msg->queue, &ssc_dev->queue);
713 if (!ssc_dev->current_transfer)
714 {
715 /* start transmission machine, if not started yet */
716 svip_ssc_next_message(master);
717 }
718 spin_unlock_irqrestore(&ssc_dev->lock, flags);
719
720 return 0;
721 }
722
723 static void svip_ssc_cleanup(struct spi_device *spi)
724 {
725 struct svip_ssc_device *ssc_dev = spi_master_get_devdata(spi->master);
726 unsigned long flags;
727
728 if (!spi->controller_state)
729 return;
730
731 spin_lock_irqsave(&ssc_dev->lock, flags);
732 if (ssc_dev->stay == spi) {
733 ssc_dev->stay = NULL;
734 cs_deactivate(ssc_dev, spi);
735 }
736 spin_unlock_irqrestore(&ssc_dev->lock, flags);
737 }
738
739 /*-------------------------------------------------------------------------*/
740
741 static int __init svip_ssc_probe(struct platform_device *pdev)
742 {
743 int ret;
744 struct spi_master *master;
745 struct svip_ssc_device *ssc_dev;
746 struct resource *res_regs;
747 int irq;
748
749 ret = -ENOMEM;
750
751 /* setup spi core then atmel-specific driver state */
752 master = spi_alloc_master(&pdev->dev, sizeof (*ssc_dev));
753 if (!master)
754 {
755 dev_err (&pdev->dev, "ERROR: no memory for master spi\n");
756 goto errout;
757 }
758
759 ssc_dev = spi_master_get_devdata(master);
760 platform_set_drvdata(pdev, master);
761
762 master->bus_num = pdev->id;
763 master->num_chipselect = 8;
764 master->mode_bits = MODEBITS;
765 master->setup = svip_ssc_setup;
766 master->transfer = svip_ssc_transfer;
767 master->cleanup = svip_ssc_cleanup;
768
769 spin_lock_init(&ssc_dev->lock);
770 INIT_LIST_HEAD(&ssc_dev->queue);
771
772 /* retrive register configration */
773 res_regs = platform_get_resource_byname (pdev, IORESOURCE_MEM, "regs");
774 if (NULL == res_regs)
775 {
776 dev_err (&pdev->dev, "ERROR: missed 'regs' resource\n");
777 goto spierr;
778 }
779
780 ssc_dev->regs = (struct svip_reg_ssc*)KSEG1ADDR(res_regs->start);
781
782 irq = platform_get_irq_byname (pdev, "tx");
783 if (irq < 0)
784 goto irqerr;
785 sprintf(ssc_dev->intname[0], "%s_tx", pdev->name);
786 ret = devm_request_irq(&pdev->dev, irq, svip_ssc_tir_handler,
787 IRQF_DISABLED, ssc_dev->intname[0], pdev);
788 if (ret != 0)
789 goto irqerr;
790
791 irq = platform_get_irq_byname (pdev, "rx");
792 if (irq < 0)
793 goto irqerr;
794 sprintf(ssc_dev->intname[1], "%s_rx", pdev->name);
795 ret = devm_request_irq(&pdev->dev, irq, svip_ssc_rir_handler,
796 IRQF_DISABLED, ssc_dev->intname[1], pdev);
797 if (ret != 0)
798 goto irqerr;
799
800 irq = platform_get_irq_byname (pdev, "err");
801 if (irq < 0)
802 goto irqerr;
803 sprintf(ssc_dev->intname[2], "%s_err", pdev->name);
804 ret = devm_request_irq(&pdev->dev, irq, svip_ssc_eir_handler,
805 IRQF_DISABLED, ssc_dev->intname[2], pdev);
806 if (ret != 0)
807 goto irqerr;
808
809 irq = platform_get_irq_byname (pdev, "frm");
810 if (irq < 0)
811 goto irqerr;
812 sprintf(ssc_dev->intname[3], "%s_frm", pdev->name);
813 ret = devm_request_irq(&pdev->dev, irq, svip_ssc_fir_handler,
814 IRQF_DISABLED, ssc_dev->intname[3], pdev);
815 if (ret != 0)
816 goto irqerr;
817
818 /*
819 * Initialize the Hardware
820 */
821
822 /* Clear enable bit, i.e. put SSC into configuration mode */
823 ssc_dev->regs->whbstate = SSC_WHBSTATE_CLREN;
824 /* enable SSC core to run at fpi clock */
825 ssc_dev->regs->clc = SSC_CLC_RMC_VAL(1);
826 asm("sync");
827
828 /* GPIO CS */
829 ssc_dev->regs->gpocon = SSC_GPOCON_ISCSBN_VAL(0xFF);
830 ssc_dev->regs->whbgpostat = SSC_WHBGPOSTAT_SETOUTN_VAL(0xFF); /* CS to high */
831
832 /* Set Master mode */
833 ssc_dev->regs->whbstate = SSC_WHBSTATE_SETMS;
834
835 /* enable and flush RX/TX FIFO */
836 ssc_dev->regs->rxfcon = SSC_RXFCON_RXFITL_VAL(SVIP_SSC_RFIFO_WORDS-FIFO_HEADROOM) |
837 SSC_RXFCON_RXFLU | /* Receive FIFO Flush */
838 SSC_RXFCON_RXFEN; /* Receive FIFO Enable */
839
840 ssc_dev->regs->txfcon = SSC_TXFCON_TXFITL_VAL(FIFO_HEADROOM) |
841 SSC_TXFCON_TXFLU | /* Transmit FIFO Flush */
842 SSC_TXFCON_TXFEN; /* Transmit FIFO Enable */
843 asm("sync");
844
845 /* enable IRQ */
846 ssc_dev->regs->irnen = SSC_IRNEN_E;
847
848 dev_info(&pdev->dev, "controller at 0x%08lx (irq %d)\n",
849 (unsigned long)ssc_dev->regs, platform_get_irq_byname (pdev, "rx"));
850
851 ret = spi_register_master(master);
852 if (ret)
853 goto out_reset_hw;
854
855 return 0;
856
857 out_reset_hw:
858
859 irqerr:
860 devm_free_irq (&pdev->dev, platform_get_irq_byname (pdev, "tx"), pdev);
861 devm_free_irq (&pdev->dev, platform_get_irq_byname (pdev, "rx"), pdev);
862 devm_free_irq (&pdev->dev, platform_get_irq_byname (pdev, "err"), pdev);
863 devm_free_irq (&pdev->dev, platform_get_irq_byname (pdev, "frm"), pdev);
864
865 spierr:
866
867 spi_master_put(master);
868
869 errout:
870 return ret;
871 }
872
873 static int __exit svip_ssc_remove(struct platform_device *pdev)
874 {
875 struct spi_master *master = platform_get_drvdata(pdev);
876 struct svip_ssc_device *ssc_dev = spi_master_get_devdata(master);
877 struct spi_message *msg;
878
879 /* reset the hardware and block queue progress */
880 spin_lock_irq(&ssc_dev->lock);
881 ssc_dev->stopping = 1;
882 /* TODO: shutdown hardware */
883 spin_unlock_irq(&ssc_dev->lock);
884
885 /* Terminate remaining queued transfers */
886 list_for_each_entry(msg, &ssc_dev->queue, queue) {
887 /* REVISIT unmapping the dma is a NOP on ARM and AVR32
888 * but we shouldn't depend on that...
889 */
890 msg->status = -ESHUTDOWN;
891 msg->complete(msg->context);
892 }
893
894 devm_free_irq (&pdev->dev, platform_get_irq_byname (pdev, "tx"), pdev);
895 devm_free_irq (&pdev->dev, platform_get_irq_byname (pdev, "rx"), pdev);
896 devm_free_irq (&pdev->dev, platform_get_irq_byname (pdev, "err"), pdev);
897 devm_free_irq (&pdev->dev, platform_get_irq_byname (pdev, "frm"), pdev);
898
899 spi_unregister_master(master);
900 platform_set_drvdata(pdev, NULL);
901 spi_master_put(master);
902 return 0;
903 }
904
905 #ifdef CONFIG_PM
906 static int svip_ssc_suspend(struct platform_device *pdev, pm_message_t mesg)
907 {
908 struct spi_master *master = platform_get_drvdata(pdev);
909 struct svip_ssc_device *ssc_dev = spi_master_get_devdata(master);
910
911 clk_disable(ssc_dev->clk);
912 return 0;
913 }
914
915 static int svip_ssc_resume(struct platform_device *pdev)
916 {
917 struct spi_master *master = platform_get_drvdata(pdev);
918 struct svip_ssc_device *ssc_dev = spi_master_get_devdata(master);
919
920 clk_enable(ssc_dev->clk);
921 return 0;
922 }
923 #endif
924
925 static struct platform_driver svip_ssc_driver = {
926 .driver = {
927 .name = "ifx_ssc",
928 .owner = THIS_MODULE,
929 },
930 .probe = svip_ssc_probe,
931 #ifdef CONFIG_PM
932 .suspend = svip_ssc_suspend,
933 .resume = svip_ssc_resume,
934 #endif
935 .remove = __exit_p(svip_ssc_remove)
936 };
937
938 int __init svip_ssc_init(void)
939 {
940 return platform_driver_register(&svip_ssc_driver);
941 }
942
943 void __exit svip_ssc_exit(void)
944 {
945 platform_driver_unregister(&svip_ssc_driver);
946 }
947
948 module_init(svip_ssc_init);
949 module_exit(svip_ssc_exit);
950
951 MODULE_ALIAS("platform:ifx_ssc");
952 MODULE_DESCRIPTION("Lantiq SSC Controller driver");
953 MODULE_AUTHOR("Andreas Schmidt <andreas.schmidt@infineon.com>");
954 MODULE_AUTHOR("Jevgenijs Grigorjevs <Jevgenijs.Grigorjevs@lantiq.com>");
955 MODULE_LICENSE("GPL");