1 /************************************************************************
4 * Infineon Technologies AG
5 * St. Martin Strasse 53; 81669 Muenchen; Germany
7 * Inspired by Atmel AT32/AT91 SPI Controller driver
8 * Copyright (c) 2006 Atmel Corporation
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
15 ************************************************************************/
16 #include <linux/kernel.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/delay.h>
20 #include <linux/interrupt.h>
21 #include <linux/slab.h>
22 #include <linux/platform_device.h>
23 #include <linux/spi/spi.h>
27 #include <status_reg.h>
33 #define SFRAME_SIZE 512 /* bytes */
34 #define FIFO_HEADROOM 2 /* words */
36 #define SVIP_SSC_RFIFO_WORDS 8
46 * The core SPI transfer engine just talks to a register bank to set up
47 * DMA transfers; transfer queue progress is driven by IRQs. The clock
48 * framework provides the base clock, subdivided for each spi_device.
50 struct svip_ssc_device
{
51 struct svip_reg_ssc
*regs
;
52 enum svip_ssc_dir bus_dir
;
53 struct spi_device
*stay
;
56 struct list_head queue
;
57 struct spi_transfer
*current_transfer
;
67 static int svip_ssc_setup(struct spi_device
*spi
);
69 extern unsigned int ltq_get_fbs0_hz(void);
71 static void cs_activate(struct svip_ssc_device
*ssc_dev
, struct spi_device
*spi
)
73 ssc_dev
->regs
->whbgpostat
= 0x0001 << spi
->chip_select
; /* activate the chip select */
76 static void cs_deactivate(struct svip_ssc_device
*ssc_dev
, struct spi_device
*spi
)
78 ssc_dev
->regs
->whbgpostat
= 0x0100 << spi
->chip_select
; /* deactivate the chip select */
82 * "Normally" returns Byte Valid = 4.
83 * If the unaligned remainder of the packet is 3 bytes, these have to be
84 * transferred as a combination of a 16-bit and a 8-bit FPI transfer. For
85 * 2 or 1 remaining bytes a single 16-bit or 8-bit transfer will do.
87 static int inline _estimate_bv(int byte_pos
, int bytelen
)
89 int remainder
= bytelen
% 4;
91 if (byte_pos
< (bytelen
- remainder
))
96 if (byte_pos
== (bytelen
- remainder
))
105 * Submit next transfer.
106 * lock is held, spi irq is blocked
108 static void svip_ssc_next_xfer(struct spi_master
*master
,
109 struct spi_message
*msg
)
111 struct svip_ssc_device
*ssc_dev
= spi_master_get_devdata(master
);
112 struct spi_transfer
*xfer
;
113 unsigned char *buf_ptr
;
115 xfer
= ssc_dev
->current_transfer
;
116 if (!xfer
|| ssc_dev
->remaining_bytes
== 0) {
118 xfer
= list_entry(xfer
->transfer_list
.next
,
119 struct spi_transfer
, transfer_list
);
121 xfer
= list_entry(msg
->transfers
.next
,
122 struct spi_transfer
, transfer_list
);
123 ssc_dev
->remaining_bytes
= xfer
->len
;
124 ssc_dev
->rx_bytes
= 0;
125 ssc_dev
->tx_bytes
= 0;
126 ssc_dev
->current_transfer
= xfer
;
127 ssc_dev
->regs
->sfcon
= 0; /* reset Serial Framing */
129 /* enable and flush RX/TX FIFO */
130 ssc_dev
->regs
->rxfcon
=
131 SSC_RXFCON_RXFITL_VAL(SVIP_SSC_RFIFO_WORDS
-FIFO_HEADROOM
) |
132 SSC_RXFCON_RXFLU
| /* Receive FIFO Flush */
133 SSC_RXFCON_RXFEN
; /* Receive FIFO Enable */
135 ssc_dev
->regs
->txfcon
=
136 SSC_TXFCON_TXFITL_VAL(FIFO_HEADROOM
) |
137 SSC_TXFCON_TXFLU
| /* Transmit FIFO Flush */
138 SSC_TXFCON_TXFEN
; /* Transmit FIFO Enable */
142 /* select mode RXTX, RX or TX */
143 if (xfer
->rx_buf
&& xfer
->tx_buf
) /* RX and TX */
145 if (ssc_dev
->bus_dir
!= SSC_RXTX
)
147 ssc_dev
->regs
->mcon
&= ~(SSC_MCON_RXOFF
| SSC_MCON_TXOFF
);
148 ssc_dev
->bus_dir
= SSC_RXTX
;
149 ssc_dev
->regs
->irnen
= SSC_IRNEN_T
| SSC_IRNEN_F
| SSC_IRNEN_E
;
151 ssc_dev
->regs
->sfcon
=
152 SSC_SFCON_PLEN_VAL(0) |
153 SSC_SFCON_DLEN_VAL(((xfer
->len
-1)%SFRAME_SIZE
)*8+7) |
155 SSC_SFCON_ICLK_VAL(2) |
156 SSC_SFCON_IDAT_VAL(2) |
161 else if (xfer
->rx_buf
) /* RX only */
163 if (ssc_dev
->bus_dir
!= SSC_RX
)
165 ssc_dev
->regs
->mcon
=
166 (ssc_dev
->regs
->mcon
| SSC_MCON_TXOFF
) & ~SSC_MCON_RXOFF
;
168 ssc_dev
->bus_dir
= SSC_RX
;
170 ssc_dev
->regs
->irnen
= SSC_IRNEN_R
| SSC_IRNEN_E
;
172 /* Initiate clock generation for Rx-Only Transfer. In case of RX-only transfer,
173 * rx_bytes represents the number of already requested bytes.
175 ssc_dev
->rx_bytes
= min(xfer
->len
, (unsigned)(SVIP_SSC_RFIFO_WORDS
*4));
176 ssc_dev
->regs
->rxreq
= ssc_dev
->rx_bytes
;
180 if (ssc_dev
->bus_dir
!= SSC_TX
)
182 ssc_dev
->regs
->mcon
=
183 (ssc_dev
->regs
->mcon
| SSC_MCON_RXOFF
) & ~SSC_MCON_TXOFF
;
185 ssc_dev
->bus_dir
= SSC_TX
;
187 ssc_dev
->regs
->irnen
=
188 SSC_IRNEN_T
| SSC_IRNEN_F
| SSC_IRNEN_E
;
190 ssc_dev
->regs
->sfcon
=
191 SSC_SFCON_PLEN_VAL(0) |
192 SSC_SFCON_DLEN_VAL(((xfer
->len
-1)%SFRAME_SIZE
)*8+7) |
194 SSC_SFCON_ICLK_VAL(2) |
195 SSC_SFCON_IDAT_VAL(2) |
205 int fstat
= ssc_dev
->regs
->fstat
;
206 int txffl
= SSC_FSTAT_TXFFL_GET(fstat
);
207 int rxffl
= SSC_FSTAT_RXFFL_GET(fstat
);
213 outstanding
+= rxffl
;
214 if (SSC_STATE_BSY_GET(ssc_dev
->regs
->state
))
217 while (rxffl
) /* is 0 in TX-Only mode */
220 int rxbv
= _estimate_bv(ssc_dev
->rx_bytes
, xfer
->len
);
221 rb
= ssc_dev
->regs
->rb
;
222 for (i
=0; i
<rxbv
; i
++)
224 ((unsigned char*)xfer
->rx_buf
)[ssc_dev
->rx_bytes
] =
225 (rb
>> ((rxbv
-i
-1)*8)) & 0xFF;
232 ssc_dev
->remaining_bytes
= xfer
->len
- ssc_dev
->rx_bytes
;
235 /* for last Tx cycle set TxFifo threshold to 0 */
236 if ((xfer
->len
- ssc_dev
->tx_bytes
) <=
237 (4*(SVIP_SSC_RFIFO_WORDS
-1-outstanding
)))
239 ssc_dev
->regs
->txfcon
= SSC_TXFCON_TXFITL_VAL(0) |
243 while ((ssc_dev
->tx_bytes
< xfer
->len
) &&
244 (outstanding
< (SVIP_SSC_RFIFO_WORDS
-1)))
247 int txbv
= _estimate_bv(ssc_dev
->tx_bytes
, xfer
->len
);
249 for (i
=0; i
<txbv
; i
++)
251 tb
|= ((unsigned char*)xfer
->tx_buf
)[ssc_dev
->tx_bytes
] <<
260 *((unsigned char *)(&(ssc_dev
->regs
->tb
))+3) = tb
& 0xFF;
263 *((unsigned short *)(&(ssc_dev
->regs
->tb
))+1) = tb
& 0xFFFF;
265 #else /* __LITTLE_ENDIAN */
267 *((unsigned char *)(&(ssc_dev
->regs
->tb
))) = tb
& 0xFF;
270 *((unsigned short *)(&(ssc_dev
->regs
->tb
))) = tb
& 0xFFFF;
274 ssc_dev
->regs
->tb
= tb
;
279 else /* xfer->tx_buf == NULL -> RX only! */
282 int rxffl
= SSC_FSTAT_RXFFL_GET(ssc_dev
->regs
->fstat
);
286 buf_ptr
= (unsigned char*)xfer
->rx_buf
+
287 (xfer
->len
- ssc_dev
->remaining_bytes
);
289 for (j
= 0; j
< rxffl
; j
++)
291 rxbv
= SSC_STATE_RXBV_GET(ssc_dev
->regs
->state
);
292 rbuf
= ssc_dev
->regs
->rb
;
296 *((unsigned int*)buf_ptr
+j
) = ntohl(rbuf
);
302 for (b
= 0; b
< rxbv
; b
++)
304 buf_ptr
[4*j
+b
] = ((unsigned char*)(&rbuf
))[4-rxbv
+b
];
306 #else /* __LITTLE_ENDIAN */
307 for (b
= 0; b
< rxbv
; b
++)
309 buf_ptr
[4*j
+b
] = ((unsigned char*)(&rbuf
))[rxbv
-1-b
];
313 ssc_dev
->remaining_bytes
-= rxbv
;
315 if ((ssc_dev
->rx_bytes
< xfer
->len
) &&
316 !SSC_STATE_BSY_GET(ssc_dev
->regs
->state
))
318 int rxreq
= min(xfer
->len
- ssc_dev
->rx_bytes
,
319 (unsigned)(SVIP_SSC_RFIFO_WORDS
*4));
321 ssc_dev
->rx_bytes
+= rxreq
;
322 ssc_dev
->regs
->rxreq
= rxreq
;
325 if (ssc_dev
->remaining_bytes
< 0)
327 printk("ssc_dev->remaining_bytes = %d! xfer->len = %d, "
328 "rxffl=%d, rxbv=%d\n", ssc_dev
->remaining_bytes
, xfer
->len
,
331 ssc_dev
->remaining_bytes
= 0;
337 * Submit next message.
340 static void svip_ssc_next_message(struct spi_master
*master
)
342 struct svip_ssc_device
*ssc_dev
= spi_master_get_devdata(master
);
343 struct spi_message
*msg
;
344 struct spi_device
*spi
;
346 BUG_ON(ssc_dev
->current_transfer
);
348 msg
= list_entry(ssc_dev
->queue
.next
, struct spi_message
, queue
);
351 dev_dbg(master
->dev
.parent
, "start message %p on %p\n", msg
, spi
);
353 /* select chip if it's not still active */
355 if (ssc_dev
->stay
!= spi
) {
356 cs_deactivate(ssc_dev
, ssc_dev
->stay
);
358 cs_activate(ssc_dev
, spi
);
360 ssc_dev
->stay
= NULL
;
364 cs_activate(ssc_dev
, spi
);
367 svip_ssc_next_xfer(master
, msg
);
371 * Report message completion.
375 svip_ssc_msg_done(struct spi_master
*master
, struct svip_ssc_device
*ssc_dev
,
376 struct spi_message
*msg
, int status
, int stay
)
378 if (!stay
|| status
< 0)
379 cs_deactivate(ssc_dev
, msg
->spi
);
381 ssc_dev
->stay
= msg
->spi
;
383 list_del(&msg
->queue
);
384 msg
->status
= status
;
386 dev_dbg(master
->dev
.parent
,
387 "xfer complete: %u bytes transferred\n",
390 spin_unlock(&ssc_dev
->lock
);
391 msg
->complete(msg
->context
);
392 spin_lock(&ssc_dev
->lock
);
394 ssc_dev
->current_transfer
= NULL
;
396 /* continue if needed */
397 if (list_empty(&ssc_dev
->queue
) || ssc_dev
->stopping
)
398 ; /* TODO: disable hardware */
400 svip_ssc_next_message(master
);
403 static irqreturn_t
svip_ssc_eir_handler(int irq
, void *dev_id
)
405 struct platform_device
*pdev
= (struct platform_device
*)dev_id
;
406 struct spi_master
*master
= platform_get_drvdata(pdev
);
407 struct svip_ssc_device
*ssc_dev
= spi_master_get_devdata(master
);
409 dev_err (&pdev
->dev
, "ERROR: errirq. STATE = 0x%0lx\n",
410 ssc_dev
->regs
->state
);
414 static irqreturn_t
svip_ssc_rir_handler(int irq
, void *dev_id
)
416 struct platform_device
*pdev
= (struct platform_device
*)dev_id
;
417 struct spi_master
*master
= platform_get_drvdata(pdev
);
418 struct svip_ssc_device
*ssc_dev
= spi_master_get_devdata(master
);
419 struct spi_message
*msg
;
420 struct spi_transfer
*xfer
;
422 xfer
= ssc_dev
->current_transfer
;
423 msg
= list_entry(ssc_dev
->queue
.next
, struct spi_message
, queue
);
425 /* Tx and Rx Interrupts are fairly unpredictable. Just leave interrupt
426 * handler for spurious Interrupts!
429 dev_dbg(master
->dev
.parent
,
430 "%s(%d): xfer = NULL\n", __FUNCTION__
, irq
);
433 if ( !(xfer
->rx_buf
) ) {
434 dev_dbg(master
->dev
.parent
,
435 "%s(%d): xfer->rx_buf = NULL\n", __FUNCTION__
, irq
);
438 if (ssc_dev
->remaining_bytes
> 0)
441 * Keep going, we still have data to send in
442 * the current transfer.
444 svip_ssc_next_xfer(master
, msg
);
447 if (ssc_dev
->remaining_bytes
== 0)
449 msg
->actual_length
+= xfer
->len
;
451 if (msg
->transfers
.prev
== &xfer
->transfer_list
) {
452 /* report completed message */
453 svip_ssc_msg_done(master
, ssc_dev
, msg
, 0,
457 if (xfer
->cs_change
) {
458 cs_deactivate(ssc_dev
, msg
->spi
);
459 udelay(1); /* not nice in interrupt context */
460 cs_activate(ssc_dev
, msg
->spi
);
463 /* Not done yet. Submit the next transfer. */
464 svip_ssc_next_xfer(master
, msg
);
471 static irqreturn_t
svip_ssc_tir_handler(int irq
, void *dev_id
)
473 struct platform_device
*pdev
= (struct platform_device
*)dev_id
;
474 struct spi_master
*master
= platform_get_drvdata(pdev
);
475 struct svip_ssc_device
*ssc_dev
= spi_master_get_devdata(master
);
476 struct spi_message
*msg
;
477 struct spi_transfer
*xfer
;
480 xfer
= ssc_dev
->current_transfer
;
481 msg
= list_entry(ssc_dev
->queue
.next
, struct spi_message
, queue
);
483 /* Tx and Rx Interrupts are fairly unpredictable. Just leave interrupt
484 * handler for spurious Interrupts!
487 dev_dbg(master
->dev
.parent
,
488 "%s(%d): xfer = NULL\n", __FUNCTION__
, irq
);
491 if ( !(xfer
->tx_buf
) ) {
492 dev_dbg(master
->dev
.parent
,
493 "%s(%d): xfer->tx_buf = NULL\n", __FUNCTION__
, irq
);
497 if (ssc_dev
->remaining_bytes
> 0)
499 tx_remain
= xfer
->len
- ssc_dev
->tx_bytes
;
500 if ( tx_remain
== 0 )
502 dev_dbg(master
->dev
.parent
,
503 "%s(%d): tx_remain = 0\n", __FUNCTION__
, irq
);
507 * Keep going, we still have data to send in
508 * the current transfer.
510 svip_ssc_next_xfer(master
, msg
);
516 static irqreturn_t
svip_ssc_fir_handler(int irq
, void *dev_id
)
518 struct platform_device
*pdev
= (struct platform_device
*)dev_id
;
519 struct spi_master
*master
= platform_get_drvdata(pdev
);
520 struct svip_ssc_device
*ssc_dev
= spi_master_get_devdata(master
);
521 struct spi_message
*msg
;
522 struct spi_transfer
*xfer
;
524 xfer
= ssc_dev
->current_transfer
;
525 msg
= list_entry(ssc_dev
->queue
.next
, struct spi_message
, queue
);
527 /* Tx and Rx Interrupts are fairly unpredictable. Just leave interrupt
528 * handler for spurious Interrupts!
531 dev_dbg(master
->dev
.parent
,
532 "%s(%d): xfer = NULL\n", __FUNCTION__
, irq
);
535 if ( !(xfer
->tx_buf
) ) {
536 dev_dbg(master
->dev
.parent
,
537 "%s(%d): xfer->tx_buf = NULL\n", __FUNCTION__
, irq
);
541 if (ssc_dev
->remaining_bytes
> 0)
543 int tx_remain
= xfer
->len
- ssc_dev
->tx_bytes
;
547 /* Frame interrupt gets raised _before_ last Rx interrupt */
550 svip_ssc_next_xfer(master
, msg
);
551 if (ssc_dev
->remaining_bytes
)
552 printk("expected RXTX transfer to be complete!\n");
554 ssc_dev
->remaining_bytes
= 0;
558 ssc_dev
->regs
->sfcon
= SSC_SFCON_PLEN_VAL(0) |
559 SSC_SFCON_DLEN_VAL(SFRAME_SIZE
*8-1) |
561 SSC_SFCON_ICLK_VAL(2) |
562 SSC_SFCON_IDAT_VAL(2) |
568 if (ssc_dev
->remaining_bytes
== 0)
570 msg
->actual_length
+= xfer
->len
;
572 if (msg
->transfers
.prev
== &xfer
->transfer_list
) {
573 /* report completed message */
574 svip_ssc_msg_done(master
, ssc_dev
, msg
, 0,
578 if (xfer
->cs_change
) {
579 cs_deactivate(ssc_dev
, msg
->spi
);
580 udelay(1); /* not nice in interrupt context */
581 cs_activate(ssc_dev
, msg
->spi
);
584 /* Not done yet. Submit the next transfer. */
585 svip_ssc_next_xfer(master
, msg
);
593 /* the spi->mode bits understood by this driver: */
594 #define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_LOOP)
596 static int svip_ssc_setup(struct spi_device
*spi
)
598 struct spi_master
*master
= spi
->master
;
599 struct svip_ssc_device
*ssc_dev
= spi_master_get_devdata(master
);
600 unsigned int bits
= spi
->bits_per_word
;
601 unsigned int br
, sck_hz
= spi
->max_speed_hz
;
604 if (ssc_dev
->stopping
)
607 if (spi
->chip_select
>= master
->num_chipselect
) {
609 "setup: invalid chipselect %u (%u defined)\n",
610 spi
->chip_select
, master
->num_chipselect
);
618 "setup: invalid bits_per_word %u (expect 8)\n",
623 if (spi
->mode
& ~MODEBITS
) {
624 dev_dbg(&spi
->dev
, "setup: unsupported mode bits %x\n",
625 spi
->mode
& ~MODEBITS
);
630 ssc_dev
->regs
->whbstate
= SSC_WHBSTATE_CLREN
;
635 br
= ltq_get_fbs0_hz()/(2 *sck_hz
);
636 if (ltq_get_fbs0_hz()%(2 *sck_hz
) == 0)
638 ssc_dev
->regs
->br
= br
;
640 /* set Control Register */
641 ssc_dev
->regs
->mcon
= SSC_MCON_ENBV
|
647 (spi
->mode
& SPI_CPOL
? SSC_MCON_PO
: 0) | /* Clock Polarity */
648 (spi
->mode
& SPI_CPHA
? 0 : SSC_MCON_PH
) | /* Tx on trailing edge */
649 (spi
->mode
& SPI_LOOP
? SSC_MCON_LB
: 0) | /* Loopback */
650 (spi
->mode
& SPI_LSB_FIRST
? 0 : SSC_MCON_HB
); /* MSB first */
651 ssc_dev
->bus_dir
= SSC_UNDEF
;
654 ssc_dev
->regs
->whbstate
= SSC_WHBSTATE_SETEN
;
657 spin_lock_irqsave(&ssc_dev
->lock
, flags
);
658 if (ssc_dev
->stay
== spi
)
659 ssc_dev
->stay
= NULL
;
660 cs_deactivate(ssc_dev
, spi
);
661 spin_unlock_irqrestore(&ssc_dev
->lock
, flags
);
664 "setup: %u Hz bpw %u mode 0x%02x cs %u\n",
665 sck_hz
, bits
, spi
->mode
, spi
->chip_select
);
670 static int svip_ssc_transfer(struct spi_device
*spi
, struct spi_message
*msg
)
672 struct spi_master
*master
= spi
->master
;
673 struct svip_ssc_device
*ssc_dev
= spi_master_get_devdata(master
);
674 struct spi_transfer
*xfer
;
677 dev_dbg(&spi
->dev
, "new message %p submitted\n", msg
);
679 if (unlikely(list_empty(&msg
->transfers
)
680 || !spi
->max_speed_hz
)) {
684 if (ssc_dev
->stopping
)
687 list_for_each_entry(xfer
, &msg
->transfers
, transfer_list
) {
688 if (!(xfer
->tx_buf
|| xfer
->rx_buf
) || (xfer
->len
== 0)) {
689 dev_dbg(&spi
->dev
, "missing rx or tx buf\n");
693 /* FIXME implement these protocol options!! */
694 if (xfer
->bits_per_word
|| xfer
->speed_hz
) {
695 dev_dbg(&spi
->dev
, "no protocol options yet\n");
701 " xfer %p: len %u tx %p/%08x rx %p/%08x\n",
703 xfer
->tx_buf
, xfer
->tx_dma
,
704 xfer
->rx_buf
, xfer
->rx_dma
);
708 msg
->status
= -EINPROGRESS
;
709 msg
->actual_length
= 0;
711 spin_lock_irqsave(&ssc_dev
->lock
, flags
);
712 list_add_tail(&msg
->queue
, &ssc_dev
->queue
);
713 if (!ssc_dev
->current_transfer
)
715 /* start transmission machine, if not started yet */
716 svip_ssc_next_message(master
);
718 spin_unlock_irqrestore(&ssc_dev
->lock
, flags
);
723 static void svip_ssc_cleanup(struct spi_device
*spi
)
725 struct svip_ssc_device
*ssc_dev
= spi_master_get_devdata(spi
->master
);
728 if (!spi
->controller_state
)
731 spin_lock_irqsave(&ssc_dev
->lock
, flags
);
732 if (ssc_dev
->stay
== spi
) {
733 ssc_dev
->stay
= NULL
;
734 cs_deactivate(ssc_dev
, spi
);
736 spin_unlock_irqrestore(&ssc_dev
->lock
, flags
);
739 /*-------------------------------------------------------------------------*/
741 static int __init
svip_ssc_probe(struct platform_device
*pdev
)
744 struct spi_master
*master
;
745 struct svip_ssc_device
*ssc_dev
;
746 struct resource
*res_regs
;
751 /* setup spi core then atmel-specific driver state */
752 master
= spi_alloc_master(&pdev
->dev
, sizeof (*ssc_dev
));
755 dev_err (&pdev
->dev
, "ERROR: no memory for master spi\n");
759 ssc_dev
= spi_master_get_devdata(master
);
760 platform_set_drvdata(pdev
, master
);
762 master
->bus_num
= pdev
->id
;
763 master
->num_chipselect
= 8;
764 master
->mode_bits
= MODEBITS
;
765 master
->setup
= svip_ssc_setup
;
766 master
->transfer
= svip_ssc_transfer
;
767 master
->cleanup
= svip_ssc_cleanup
;
769 spin_lock_init(&ssc_dev
->lock
);
770 INIT_LIST_HEAD(&ssc_dev
->queue
);
772 /* retrive register configration */
773 res_regs
= platform_get_resource_byname (pdev
, IORESOURCE_MEM
, "regs");
774 if (NULL
== res_regs
)
776 dev_err (&pdev
->dev
, "ERROR: missed 'regs' resource\n");
780 ssc_dev
->regs
= (struct svip_reg_ssc
*)KSEG1ADDR(res_regs
->start
);
782 irq
= platform_get_irq_byname (pdev
, "tx");
785 sprintf(ssc_dev
->intname
[0], "%s_tx", pdev
->name
);
786 ret
= devm_request_irq(&pdev
->dev
, irq
, svip_ssc_tir_handler
,
787 IRQF_DISABLED
, ssc_dev
->intname
[0], pdev
);
791 irq
= platform_get_irq_byname (pdev
, "rx");
794 sprintf(ssc_dev
->intname
[1], "%s_rx", pdev
->name
);
795 ret
= devm_request_irq(&pdev
->dev
, irq
, svip_ssc_rir_handler
,
796 IRQF_DISABLED
, ssc_dev
->intname
[1], pdev
);
800 irq
= platform_get_irq_byname (pdev
, "err");
803 sprintf(ssc_dev
->intname
[2], "%s_err", pdev
->name
);
804 ret
= devm_request_irq(&pdev
->dev
, irq
, svip_ssc_eir_handler
,
805 IRQF_DISABLED
, ssc_dev
->intname
[2], pdev
);
809 irq
= platform_get_irq_byname (pdev
, "frm");
812 sprintf(ssc_dev
->intname
[3], "%s_frm", pdev
->name
);
813 ret
= devm_request_irq(&pdev
->dev
, irq
, svip_ssc_fir_handler
,
814 IRQF_DISABLED
, ssc_dev
->intname
[3], pdev
);
819 * Initialize the Hardware
822 /* Clear enable bit, i.e. put SSC into configuration mode */
823 ssc_dev
->regs
->whbstate
= SSC_WHBSTATE_CLREN
;
824 /* enable SSC core to run at fpi clock */
825 ssc_dev
->regs
->clc
= SSC_CLC_RMC_VAL(1);
829 ssc_dev
->regs
->gpocon
= SSC_GPOCON_ISCSBN_VAL(0xFF);
830 ssc_dev
->regs
->whbgpostat
= SSC_WHBGPOSTAT_SETOUTN_VAL(0xFF); /* CS to high */
832 /* Set Master mode */
833 ssc_dev
->regs
->whbstate
= SSC_WHBSTATE_SETMS
;
835 /* enable and flush RX/TX FIFO */
836 ssc_dev
->regs
->rxfcon
= SSC_RXFCON_RXFITL_VAL(SVIP_SSC_RFIFO_WORDS
-FIFO_HEADROOM
) |
837 SSC_RXFCON_RXFLU
| /* Receive FIFO Flush */
838 SSC_RXFCON_RXFEN
; /* Receive FIFO Enable */
840 ssc_dev
->regs
->txfcon
= SSC_TXFCON_TXFITL_VAL(FIFO_HEADROOM
) |
841 SSC_TXFCON_TXFLU
| /* Transmit FIFO Flush */
842 SSC_TXFCON_TXFEN
; /* Transmit FIFO Enable */
846 ssc_dev
->regs
->irnen
= SSC_IRNEN_E
;
848 dev_info(&pdev
->dev
, "controller at 0x%08lx (irq %d)\n",
849 (unsigned long)ssc_dev
->regs
, platform_get_irq_byname (pdev
, "rx"));
851 ret
= spi_register_master(master
);
860 devm_free_irq (&pdev
->dev
, platform_get_irq_byname (pdev
, "tx"), pdev
);
861 devm_free_irq (&pdev
->dev
, platform_get_irq_byname (pdev
, "rx"), pdev
);
862 devm_free_irq (&pdev
->dev
, platform_get_irq_byname (pdev
, "err"), pdev
);
863 devm_free_irq (&pdev
->dev
, platform_get_irq_byname (pdev
, "frm"), pdev
);
867 spi_master_put(master
);
873 static int __exit
svip_ssc_remove(struct platform_device
*pdev
)
875 struct spi_master
*master
= platform_get_drvdata(pdev
);
876 struct svip_ssc_device
*ssc_dev
= spi_master_get_devdata(master
);
877 struct spi_message
*msg
;
879 /* reset the hardware and block queue progress */
880 spin_lock_irq(&ssc_dev
->lock
);
881 ssc_dev
->stopping
= 1;
882 /* TODO: shutdown hardware */
883 spin_unlock_irq(&ssc_dev
->lock
);
885 /* Terminate remaining queued transfers */
886 list_for_each_entry(msg
, &ssc_dev
->queue
, queue
) {
887 /* REVISIT unmapping the dma is a NOP on ARM and AVR32
888 * but we shouldn't depend on that...
890 msg
->status
= -ESHUTDOWN
;
891 msg
->complete(msg
->context
);
894 devm_free_irq (&pdev
->dev
, platform_get_irq_byname (pdev
, "tx"), pdev
);
895 devm_free_irq (&pdev
->dev
, platform_get_irq_byname (pdev
, "rx"), pdev
);
896 devm_free_irq (&pdev
->dev
, platform_get_irq_byname (pdev
, "err"), pdev
);
897 devm_free_irq (&pdev
->dev
, platform_get_irq_byname (pdev
, "frm"), pdev
);
899 spi_unregister_master(master
);
900 platform_set_drvdata(pdev
, NULL
);
901 spi_master_put(master
);
906 static int svip_ssc_suspend(struct platform_device
*pdev
, pm_message_t mesg
)
908 struct spi_master
*master
= platform_get_drvdata(pdev
);
909 struct svip_ssc_device
*ssc_dev
= spi_master_get_devdata(master
);
911 clk_disable(ssc_dev
->clk
);
915 static int svip_ssc_resume(struct platform_device
*pdev
)
917 struct spi_master
*master
= platform_get_drvdata(pdev
);
918 struct svip_ssc_device
*ssc_dev
= spi_master_get_devdata(master
);
920 clk_enable(ssc_dev
->clk
);
925 static struct platform_driver svip_ssc_driver
= {
928 .owner
= THIS_MODULE
,
930 .probe
= svip_ssc_probe
,
932 .suspend
= svip_ssc_suspend
,
933 .resume
= svip_ssc_resume
,
935 .remove
= __exit_p(svip_ssc_remove
)
938 int __init
svip_ssc_init(void)
940 return platform_driver_register(&svip_ssc_driver
);
943 void __exit
svip_ssc_exit(void)
945 platform_driver_unregister(&svip_ssc_driver
);
948 module_init(svip_ssc_init
);
949 module_exit(svip_ssc_exit
);
951 MODULE_ALIAS("platform:ifx_ssc");
952 MODULE_DESCRIPTION("Lantiq SSC Controller driver");
953 MODULE_AUTHOR("Andreas Schmidt <andreas.schmidt@infineon.com>");
954 MODULE_AUTHOR("Jevgenijs Grigorjevs <Jevgenijs.Grigorjevs@lantiq.com>");
955 MODULE_LICENSE("GPL");