2 * Cavium CNS3xxx Gigabit driver for Linux
4 * Copyright 2011 Gateworks Corporation
5 * Chris Lang <clang@gateworks.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License
9 * as published by the Free Software Foundation.
13 #include <linux/delay.h>
14 #include <linux/module.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/dmapool.h>
17 #include <linux/etherdevice.h>
18 #include <linux/interrupt.h>
20 #include <linux/kernel.h>
21 #include <linux/phy.h>
22 #include <linux/platform_device.h>
23 #include <linux/skbuff.h>
24 #include <mach/irqs.h>
25 #include <mach/platform.h>
27 #define DRV_NAME "cns3xxx_eth"
31 #define TX_DESC_RESERVE 20
33 #define RX_POOL_ALLOC_SIZE (sizeof(struct rx_desc) * RX_DESCS)
34 #define TX_POOL_ALLOC_SIZE (sizeof(struct tx_desc) * TX_DESCS)
37 #define RX_BUFFER_ALIGN 64
38 #define RX_BUFFER_ALIGN_MASK (~(RX_BUFFER_ALIGN - 1))
40 #define SKB_HEAD_ALIGN (((PAGE_SIZE - NET_SKB_PAD) % RX_BUFFER_ALIGN) + NET_SKB_PAD + NET_IP_ALIGN)
41 #define RX_SEGMENT_ALLOC_SIZE 2048
42 #define RX_SEGMENT_BUFSIZE (SKB_WITH_OVERHEAD(RX_SEGMENT_ALLOC_SIZE))
43 #define RX_SEGMENT_MRU (((RX_SEGMENT_BUFSIZE - SKB_HEAD_ALIGN) & RX_BUFFER_ALIGN_MASK) - NET_IP_ALIGN)
46 #define NAPI_WEIGHT 64
49 #define MDIO_CMD_COMPLETE 0x00008000
50 #define MDIO_WRITE_COMMAND 0x00002000
51 #define MDIO_READ_COMMAND 0x00004000
52 #define MDIO_REG_OFFSET 8
53 #define MDIO_VALUE_OFFSET 16
55 /* Descritor Defines */
56 #define END_OF_RING 0x40000000
57 #define FIRST_SEGMENT 0x20000000
58 #define LAST_SEGMENT 0x10000000
59 #define FORCE_ROUTE 0x04000000
60 #define IP_CHECKSUM 0x00040000
61 #define UDP_CHECKSUM 0x00020000
62 #define TCP_CHECKSUM 0x00010000
64 /* Port Config Defines */
65 #define PORT_BP_ENABLE 0x00020000
66 #define PORT_DISABLE 0x00040000
67 #define PORT_LEARN_DIS 0x00080000
68 #define PORT_BLOCK_STATE 0x00100000
69 #define PORT_BLOCK_MODE 0x00200000
71 #define PROMISC_OFFSET 29
73 /* Global Config Defines */
74 #define UNKNOWN_VLAN_TO_CPU 0x02000000
75 #define ACCEPT_CRC_PACKET 0x00200000
76 #define CRC_STRIPPING 0x00100000
78 /* VLAN Config Defines */
79 #define NIC_MODE 0x00008000
80 #define VLAN_UNAWARE 0x00000001
82 /* DMA AUTO Poll Defines */
83 #define TS_POLL_EN 0x00000020
84 #define TS_SUSPEND 0x00000010
85 #define FS_POLL_EN 0x00000002
86 #define FS_SUSPEND 0x00000001
88 /* DMA Ring Control Defines */
89 #define QUEUE_THRESHOLD 0x000000f0
90 #define CLR_FS_STATE 0x80000000
92 /* Interrupt Status Defines */
93 #define MAC0_STATUS_CHANGE 0x00004000
94 #define MAC1_STATUS_CHANGE 0x00008000
95 #define MAC2_STATUS_CHANGE 0x00010000
96 #define MAC0_RX_ERROR 0x00100000
97 #define MAC1_RX_ERROR 0x00200000
98 #define MAC2_RX_ERROR 0x00400000
102 u32 sdp
; /* segment data pointer */
106 u32 sdl
:16; /* segment data length */
110 u32 rsv_1
:3; /* reserve */
112 u32 fp
:1; /* force priority */
152 u8 alignment
[16]; /* for 32 byte */
157 u32 sdp
; /* segment data pointer */
161 u32 sdl
:16; /* segment data length */
206 u8 alignment
[16]; /* for 32 byte alignment */
215 u32 mac_pri_ctrl
[5], __res
;
237 u32 fc_input_thrs
, __res1
[2];
239 u32 mac_glob_cfg_ext
, __res2
[2];
241 u32 dma_auto_poll_cfg
;
242 u32 delay_intr_cfg
, __res3
;
245 u32 ts_desc_base_addr0
, __res4
;
248 u32 fs_desc_base_addr0
, __res5
;
251 u32 ts_desc_base_addr1
, __res6
;
254 u32 fs_desc_base_addr1
;
256 u32 mac_counter0
[13];
260 struct tx_desc
*desc
;
261 dma_addr_t phys_addr
;
262 struct tx_desc
*cur_addr
;
263 struct sk_buff
*buff_tab
[TX_DESCS
];
264 unsigned int phys_tab
[TX_DESCS
];
274 struct rx_desc
*desc
;
275 dma_addr_t phys_addr
;
276 struct rx_desc
*cur_addr
;
277 void *buff_tab
[RX_DESCS
];
278 unsigned int phys_tab
[RX_DESCS
];
285 struct resource
*mem_res
;
286 struct switch_regs __iomem
*regs
;
287 struct napi_struct napi
;
288 struct cns3xxx_plat_info
*plat
;
289 struct _tx_ring tx_ring
;
290 struct _rx_ring rx_ring
;
291 struct sk_buff
*frag_first
;
292 struct sk_buff
*frag_last
;
296 struct net_device
*netdev
;
297 struct phy_device
*phydev
;
299 int id
; /* logical port ID */
303 static spinlock_t mdio_lock
;
304 static DEFINE_SPINLOCK(tx_lock
);
305 static struct switch_regs __iomem
*mdio_regs
; /* mdio command and status only */
306 struct mii_bus
*mdio_bus
;
307 static int ports_open
;
308 static struct port
*switch_port_tab
[4];
309 static struct dma_pool
*rx_dma_pool
;
310 static struct dma_pool
*tx_dma_pool
;
311 struct net_device
*napi_dev
;
313 static int cns3xxx_mdio_cmd(struct mii_bus
*bus
, int phy_id
, int location
,
319 temp
= __raw_readl(&mdio_regs
->phy_control
);
320 temp
|= MDIO_CMD_COMPLETE
;
321 __raw_writel(temp
, &mdio_regs
->phy_control
);
325 temp
= (cmd
<< MDIO_VALUE_OFFSET
);
326 temp
|= MDIO_WRITE_COMMAND
;
328 temp
= MDIO_READ_COMMAND
;
330 temp
|= ((location
& 0x1f) << MDIO_REG_OFFSET
);
331 temp
|= (phy_id
& 0x1f);
333 __raw_writel(temp
, &mdio_regs
->phy_control
);
335 while (((__raw_readl(&mdio_regs
->phy_control
) & MDIO_CMD_COMPLETE
) == 0)
341 if (cycles
== 5000) {
342 printk(KERN_ERR
"%s #%i: MII transaction failed\n", bus
->name
,
347 temp
= __raw_readl(&mdio_regs
->phy_control
);
348 temp
|= MDIO_CMD_COMPLETE
;
349 __raw_writel(temp
, &mdio_regs
->phy_control
);
354 return ((temp
>> MDIO_VALUE_OFFSET
) & 0xFFFF);
357 static int cns3xxx_mdio_read(struct mii_bus
*bus
, int phy_id
, int location
)
362 spin_lock_irqsave(&mdio_lock
, flags
);
363 ret
= cns3xxx_mdio_cmd(bus
, phy_id
, location
, 0, 0);
364 spin_unlock_irqrestore(&mdio_lock
, flags
);
368 static int cns3xxx_mdio_write(struct mii_bus
*bus
, int phy_id
, int location
,
374 spin_lock_irqsave(&mdio_lock
, flags
);
375 ret
= cns3xxx_mdio_cmd(bus
, phy_id
, location
, 1, val
);
376 spin_unlock_irqrestore(&mdio_lock
, flags
);
380 static int cns3xxx_mdio_register(void)
384 if (!(mdio_bus
= mdiobus_alloc()))
387 mdio_regs
= (struct switch_regs __iomem
*)CNS3XXX_SWITCH_BASE_VIRT
;
389 spin_lock_init(&mdio_lock
);
390 mdio_bus
->name
= "CNS3xxx MII Bus";
391 mdio_bus
->read
= &cns3xxx_mdio_read
;
392 mdio_bus
->write
= &cns3xxx_mdio_write
;
393 strcpy(mdio_bus
->id
, "0");
395 if ((err
= mdiobus_register(mdio_bus
)))
396 mdiobus_free(mdio_bus
);
400 static void cns3xxx_mdio_remove(void)
402 mdiobus_unregister(mdio_bus
);
403 mdiobus_free(mdio_bus
);
406 static void enable_tx_dma(struct sw
*sw
)
408 __raw_writel(0x1, &sw
->regs
->ts_dma_ctrl0
);
411 static void enable_rx_dma(struct sw
*sw
)
413 __raw_writel(0x1, &sw
->regs
->fs_dma_ctrl0
);
416 static void cns3xxx_adjust_link(struct net_device
*dev
)
418 struct port
*port
= netdev_priv(dev
);
419 struct phy_device
*phydev
= port
->phydev
;
424 printk(KERN_INFO
"%s: link down\n", dev
->name
);
429 if (port
->speed
== phydev
->speed
&& port
->duplex
== phydev
->duplex
)
432 port
->speed
= phydev
->speed
;
433 port
->duplex
= phydev
->duplex
;
435 printk(KERN_INFO
"%s: link up, speed %u Mb/s, %s duplex\n",
436 dev
->name
, port
->speed
, port
->duplex
? "full" : "half");
439 irqreturn_t
eth_rx_irq(int irq
, void *pdev
)
441 struct net_device
*dev
= pdev
;
442 struct sw
*sw
= netdev_priv(dev
);
443 if (likely(napi_schedule_prep(&sw
->napi
))) {
444 disable_irq_nosync(IRQ_CNS3XXX_SW_R0RXC
);
445 __napi_schedule(&sw
->napi
);
447 return (IRQ_HANDLED
);
450 irqreturn_t
eth_stat_irq(int irq
, void *pdev
)
452 struct net_device
*dev
= pdev
;
453 struct sw
*sw
= netdev_priv(dev
);
455 u32 stat
= __raw_readl(&sw
->regs
->intr_stat
);
456 __raw_writel(0xffffffff, &sw
->regs
->intr_stat
);
458 if (stat
& MAC2_RX_ERROR
)
459 switch_port_tab
[3]->netdev
->stats
.rx_dropped
++;
460 if (stat
& MAC1_RX_ERROR
)
461 switch_port_tab
[1]->netdev
->stats
.rx_dropped
++;
462 if (stat
& MAC0_RX_ERROR
)
463 switch_port_tab
[0]->netdev
->stats
.rx_dropped
++;
465 if (stat
& MAC0_STATUS_CHANGE
) {
466 cfg
= __raw_readl(&sw
->regs
->mac_cfg
[0]);
467 switch_port_tab
[0]->phydev
->link
= (cfg
& 0x1);
468 switch_port_tab
[0]->phydev
->duplex
= ((cfg
>> 4) & 0x1);
469 if (((cfg
>> 2) & 0x3) == 2)
470 switch_port_tab
[0]->phydev
->speed
= 1000;
471 else if (((cfg
>> 2) & 0x3) == 1)
472 switch_port_tab
[0]->phydev
->speed
= 100;
474 switch_port_tab
[0]->phydev
->speed
= 10;
475 cns3xxx_adjust_link(switch_port_tab
[0]->netdev
);
478 if (stat
& MAC1_STATUS_CHANGE
) {
479 cfg
= __raw_readl(&sw
->regs
->mac_cfg
[1]);
480 switch_port_tab
[1]->phydev
->link
= (cfg
& 0x1);
481 switch_port_tab
[1]->phydev
->duplex
= ((cfg
>> 4) & 0x1);
482 if (((cfg
>> 2) & 0x3) == 2)
483 switch_port_tab
[1]->phydev
->speed
= 1000;
484 else if (((cfg
>> 2) & 0x3) == 1)
485 switch_port_tab
[1]->phydev
->speed
= 100;
487 switch_port_tab
[1]->phydev
->speed
= 10;
488 cns3xxx_adjust_link(switch_port_tab
[1]->netdev
);
491 if (stat
& MAC2_STATUS_CHANGE
) {
492 cfg
= __raw_readl(&sw
->regs
->mac_cfg
[3]);
493 switch_port_tab
[3]->phydev
->link
= (cfg
& 0x1);
494 switch_port_tab
[3]->phydev
->duplex
= ((cfg
>> 4) & 0x1);
495 if (((cfg
>> 2) & 0x3) == 2)
496 switch_port_tab
[3]->phydev
->speed
= 1000;
497 else if (((cfg
>> 2) & 0x3) == 1)
498 switch_port_tab
[3]->phydev
->speed
= 100;
500 switch_port_tab
[3]->phydev
->speed
= 10;
501 cns3xxx_adjust_link(switch_port_tab
[3]->netdev
);
504 return (IRQ_HANDLED
);
508 static void cns3xxx_alloc_rx_buf(struct sw
*sw
, int received
)
510 struct _rx_ring
*rx_ring
= &sw
->rx_ring
;
511 unsigned int i
= rx_ring
->alloc_index
;
512 struct rx_desc
*desc
= &(rx_ring
)->desc
[i
];
516 for (received
+= rx_ring
->alloc_count
; received
> 0; received
--) {
517 buf
= kmalloc(RX_SEGMENT_ALLOC_SIZE
, GFP_ATOMIC
);
521 phys
= dma_map_single(NULL
, buf
+ SKB_HEAD_ALIGN
,
522 RX_SEGMENT_MRU
, DMA_FROM_DEVICE
);
523 if (dma_mapping_error(NULL
, phys
)) {
528 desc
->sdl
= RX_SEGMENT_MRU
;
533 /* put the new buffer on RX-free queue */
534 rx_ring
->buff_tab
[i
] = buf
;
535 rx_ring
->phys_tab
[i
] = phys
;
536 if (i
== RX_DESCS
- 1) {
538 desc
->config0
= END_OF_RING
| FIRST_SEGMENT
|
539 LAST_SEGMENT
| RX_SEGMENT_MRU
;
540 desc
= &(rx_ring
)->desc
[i
];
542 desc
->config0
= FIRST_SEGMENT
| LAST_SEGMENT
|
549 rx_ring
->alloc_count
= received
;
550 rx_ring
->alloc_index
= i
;
553 static void eth_check_num_used(struct _tx_ring
*tx_ring
)
558 if (tx_ring
->num_used
>= TX_DESCS
- TX_DESC_RESERVE
)
561 if (tx_ring
->stopped
== stop
)
564 tx_ring
->stopped
= stop
;
565 for (i
= 0; i
< 4; i
++) {
566 struct port
*port
= switch_port_tab
[i
];
567 struct net_device
*dev
;
574 netif_stop_queue(dev
);
576 netif_wake_queue(dev
);
580 static void eth_complete_tx(struct sw
*sw
)
582 struct _tx_ring
*tx_ring
= &sw
->tx_ring
;
583 struct tx_desc
*desc
;
586 int num_used
= tx_ring
->num_used
;
589 index
= tx_ring
->free_index
;
590 desc
= &(tx_ring
)->desc
[index
];
591 for (i
= 0; i
< num_used
; i
++) {
593 skb
= tx_ring
->buff_tab
[index
];
594 tx_ring
->buff_tab
[index
] = 0;
596 dev_kfree_skb_any(skb
);
597 dma_unmap_single(NULL
, tx_ring
->phys_tab
[index
],
598 desc
->sdl
, DMA_TO_DEVICE
);
599 if (++index
== TX_DESCS
) {
601 desc
= &(tx_ring
)->desc
[index
];
609 tx_ring
->free_index
= index
;
610 tx_ring
->num_used
-= i
;
611 eth_check_num_used(tx_ring
);
614 static int eth_poll(struct napi_struct
*napi
, int budget
)
616 struct sw
*sw
= container_of(napi
, struct sw
, napi
);
617 struct _rx_ring
*rx_ring
= &sw
->rx_ring
;
620 unsigned int i
= rx_ring
->cur_index
;
621 struct rx_desc
*desc
= &(rx_ring
)->desc
[i
];
622 unsigned int alloc_count
= rx_ring
->alloc_count
;
624 while (desc
->cown
&& alloc_count
+ received
< RX_DESCS
- 1) {
626 int reserve
= SKB_HEAD_ALIGN
;
628 if (received
>= budget
)
631 /* process received frame */
632 dma_unmap_single(NULL
, rx_ring
->phys_tab
[i
],
633 RX_SEGMENT_MRU
, DMA_FROM_DEVICE
);
635 skb
= build_skb(rx_ring
->buff_tab
[i
], 0);
639 skb
->dev
= switch_port_tab
[desc
->sp
]->netdev
;
642 if (desc
->fsd
&& !desc
->lsd
)
643 length
= RX_SEGMENT_MRU
;
646 reserve
-= NET_IP_ALIGN
;
648 length
+= NET_IP_ALIGN
;
651 skb_reserve(skb
, reserve
);
652 skb_put(skb
, length
);
655 sw
->frag_first
= skb
;
657 if (sw
->frag_first
== sw
->frag_last
)
658 skb_frag_add_head(sw
->frag_first
, skb
);
660 sw
->frag_last
->next
= skb
;
661 sw
->frag_first
->len
+= skb
->len
;
662 sw
->frag_first
->data_len
+= skb
->len
;
663 sw
->frag_first
->truesize
+= skb
->truesize
;
668 struct net_device
*dev
;
670 skb
= sw
->frag_first
;
672 skb
->protocol
= eth_type_trans(skb
, dev
);
674 dev
->stats
.rx_packets
++;
675 dev
->stats
.rx_bytes
+= skb
->len
;
677 /* RX Hardware checksum offload */
678 skb
->ip_summed
= CHECKSUM_NONE
;
679 switch (desc
->prot
) {
687 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
688 napi_gro_receive(napi
, skb
);
693 netif_receive_skb(skb
);
697 sw
->frag_first
= NULL
;
698 sw
->frag_last
= NULL
;
702 if (++i
== RX_DESCS
) {
704 desc
= &(rx_ring
)->desc
[i
];
712 enable_irq(IRQ_CNS3XXX_SW_R0RXC
);
715 cns3xxx_alloc_rx_buf(sw
, received
);
717 rx_ring
->cur_index
= i
;
722 spin_lock_bh(&tx_lock
);
724 spin_unlock_bh(&tx_lock
);
729 static void eth_set_desc(struct _tx_ring
*tx_ring
, int index
, int index_last
,
730 void *data
, int len
, u32 config0
, u32 pmap
)
732 struct tx_desc
*tx_desc
= &(tx_ring
)->desc
[index
];
735 phys
= dma_map_single(NULL
, data
, len
, DMA_TO_DEVICE
);
737 tx_desc
->pmap
= pmap
;
738 tx_ring
->phys_tab
[index
] = phys
;
741 if (index
== TX_DESCS
- 1)
742 config0
|= END_OF_RING
;
743 if (index
== index_last
)
744 config0
|= LAST_SEGMENT
;
747 tx_desc
->config0
= config0
;
750 static int eth_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
752 struct port
*port
= netdev_priv(dev
);
753 struct sw
*sw
= port
->sw
;
754 struct _tx_ring
*tx_ring
= &sw
->tx_ring
;
755 struct sk_buff
*skb1
;
756 char pmap
= (1 << port
->id
);
757 int nr_frags
= skb_shinfo(skb
)->nr_frags
;
758 int nr_desc
= nr_frags
;
759 int index0
, index
, index_last
;
767 skb_walk_frags(skb
, skb1
)
770 spin_lock_bh(&tx_lock
);
773 if ((tx_ring
->num_used
+ nr_desc
+ 1) >= TX_DESCS
) {
774 spin_unlock_bh(&tx_lock
);
775 return NETDEV_TX_BUSY
;
778 index
= index0
= tx_ring
->cur_index
;
779 index_last
= (index0
+ nr_desc
) % TX_DESCS
;
780 tx_ring
->cur_index
= (index_last
+ 1) % TX_DESCS
;
782 spin_unlock_bh(&tx_lock
);
784 config0
= FORCE_ROUTE
;
785 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
786 config0
|= UDP_CHECKSUM
| TCP_CHECKSUM
;
791 for (i
= 0; i
< nr_frags
; i
++) {
792 struct skb_frag_struct
*frag
;
795 index
= (index
+ 1) % TX_DESCS
;
797 frag
= &skb_shinfo(skb
)->frags
[i
];
798 addr
= page_address(skb_frag_page(frag
)) + frag
->page_offset
;
800 eth_set_desc(tx_ring
, index
, index_last
, addr
, frag
->size
,
805 len0
= skb
->len
- skb
->data_len
;
807 skb_walk_frags(skb
, skb1
) {
808 index
= (index
+ 1) % TX_DESCS
;
811 eth_set_desc(tx_ring
, index
, index_last
, skb1
->data
, skb1
->len
,
815 tx_ring
->buff_tab
[index0
] = skb
;
816 eth_set_desc(tx_ring
, index0
, index_last
, skb
->data
, len0
,
817 config0
| FIRST_SEGMENT
, pmap
);
822 tx_ring
->num_used
+= nr_desc
+ 1;
823 spin_unlock(&tx_lock
);
825 dev
->stats
.tx_packets
++;
826 dev
->stats
.tx_bytes
+= skb
->len
;
833 static int eth_ioctl(struct net_device
*dev
, struct ifreq
*req
, int cmd
)
835 struct port
*port
= netdev_priv(dev
);
837 if (!netif_running(dev
))
839 return phy_mii_ioctl(port
->phydev
, req
, cmd
);
842 /* ethtool support */
844 static void cns3xxx_get_drvinfo(struct net_device
*dev
,
845 struct ethtool_drvinfo
*info
)
847 strcpy(info
->driver
, DRV_NAME
);
848 strcpy(info
->bus_info
, "internal");
851 static int cns3xxx_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
853 struct port
*port
= netdev_priv(dev
);
854 return phy_ethtool_gset(port
->phydev
, cmd
);
857 static int cns3xxx_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
859 struct port
*port
= netdev_priv(dev
);
860 return phy_ethtool_sset(port
->phydev
, cmd
);
863 static int cns3xxx_nway_reset(struct net_device
*dev
)
865 struct port
*port
= netdev_priv(dev
);
866 return phy_start_aneg(port
->phydev
);
869 static struct ethtool_ops cns3xxx_ethtool_ops
= {
870 .get_drvinfo
= cns3xxx_get_drvinfo
,
871 .get_settings
= cns3xxx_get_settings
,
872 .set_settings
= cns3xxx_set_settings
,
873 .nway_reset
= cns3xxx_nway_reset
,
874 .get_link
= ethtool_op_get_link
,
878 static int init_rings(struct sw
*sw
)
881 struct _rx_ring
*rx_ring
= &sw
->rx_ring
;
882 struct _tx_ring
*tx_ring
= &sw
->tx_ring
;
884 __raw_writel(0, &sw
->regs
->fs_dma_ctrl0
);
885 __raw_writel(TS_SUSPEND
| FS_SUSPEND
, &sw
->regs
->dma_auto_poll_cfg
);
886 __raw_writel(QUEUE_THRESHOLD
, &sw
->regs
->dma_ring_ctrl
);
887 __raw_writel(CLR_FS_STATE
| QUEUE_THRESHOLD
, &sw
->regs
->dma_ring_ctrl
);
889 __raw_writel(QUEUE_THRESHOLD
, &sw
->regs
->dma_ring_ctrl
);
891 if (!(rx_dma_pool
= dma_pool_create(DRV_NAME
, NULL
,
892 RX_POOL_ALLOC_SIZE
, 32, 0)))
895 if (!(rx_ring
->desc
= dma_pool_alloc(rx_dma_pool
, GFP_KERNEL
,
896 &rx_ring
->phys_addr
)))
898 memset(rx_ring
->desc
, 0, RX_POOL_ALLOC_SIZE
);
900 /* Setup RX buffers */
901 for (i
= 0; i
< RX_DESCS
; i
++) {
902 struct rx_desc
*desc
= &(rx_ring
)->desc
[i
];
905 buf
= kzalloc(RX_SEGMENT_ALLOC_SIZE
, GFP_KERNEL
);
909 desc
->sdl
= RX_SEGMENT_MRU
;
910 if (i
== (RX_DESCS
- 1))
915 desc
->sdp
= dma_map_single(NULL
, buf
+ SKB_HEAD_ALIGN
,
916 RX_SEGMENT_MRU
, DMA_FROM_DEVICE
);
917 if (dma_mapping_error(NULL
, desc
->sdp
))
920 rx_ring
->buff_tab
[i
] = buf
;
921 rx_ring
->phys_tab
[i
] = desc
->sdp
;
924 __raw_writel(rx_ring
->phys_addr
, &sw
->regs
->fs_desc_ptr0
);
925 __raw_writel(rx_ring
->phys_addr
, &sw
->regs
->fs_desc_base_addr0
);
927 if (!(tx_dma_pool
= dma_pool_create(DRV_NAME
, NULL
,
928 TX_POOL_ALLOC_SIZE
, 32, 0)))
931 if (!(tx_ring
->desc
= dma_pool_alloc(tx_dma_pool
, GFP_KERNEL
,
932 &tx_ring
->phys_addr
)))
934 memset(tx_ring
->desc
, 0, TX_POOL_ALLOC_SIZE
);
936 /* Setup TX buffers */
937 for (i
= 0; i
< TX_DESCS
; i
++) {
938 struct tx_desc
*desc
= &(tx_ring
)->desc
[i
];
939 tx_ring
->buff_tab
[i
] = 0;
941 if (i
== (TX_DESCS
- 1))
945 __raw_writel(tx_ring
->phys_addr
, &sw
->regs
->ts_desc_ptr0
);
946 __raw_writel(tx_ring
->phys_addr
, &sw
->regs
->ts_desc_base_addr0
);
951 static void destroy_rings(struct sw
*sw
)
954 if (sw
->rx_ring
.desc
) {
955 for (i
= 0; i
< RX_DESCS
; i
++) {
956 struct _rx_ring
*rx_ring
= &sw
->rx_ring
;
957 struct rx_desc
*desc
= &(rx_ring
)->desc
[i
];
958 struct sk_buff
*skb
= sw
->rx_ring
.buff_tab
[i
];
963 dma_unmap_single(NULL
, desc
->sdp
, RX_SEGMENT_MRU
,
967 dma_pool_free(rx_dma_pool
, sw
->rx_ring
.desc
, sw
->rx_ring
.phys_addr
);
968 dma_pool_destroy(rx_dma_pool
);
970 sw
->rx_ring
.desc
= 0;
972 if (sw
->tx_ring
.desc
) {
973 for (i
= 0; i
< TX_DESCS
; i
++) {
974 struct _tx_ring
*tx_ring
= &sw
->tx_ring
;
975 struct tx_desc
*desc
= &(tx_ring
)->desc
[i
];
976 struct sk_buff
*skb
= sw
->tx_ring
.buff_tab
[i
];
978 dma_unmap_single(NULL
, desc
->sdp
,
979 skb
->len
, DMA_TO_DEVICE
);
983 dma_pool_free(tx_dma_pool
, sw
->tx_ring
.desc
, sw
->tx_ring
.phys_addr
);
984 dma_pool_destroy(tx_dma_pool
);
986 sw
->tx_ring
.desc
= 0;
990 static int eth_open(struct net_device
*dev
)
992 struct port
*port
= netdev_priv(dev
);
993 struct sw
*sw
= port
->sw
;
996 port
->speed
= 0; /* force "link up" message */
997 phy_start(port
->phydev
);
999 netif_start_queue(dev
);
1002 request_irq(IRQ_CNS3XXX_SW_R0RXC
, eth_rx_irq
, IRQF_SHARED
, "gig_switch", napi_dev
);
1003 request_irq(IRQ_CNS3XXX_SW_STATUS
, eth_stat_irq
, IRQF_SHARED
, "gig_stat", napi_dev
);
1004 napi_enable(&sw
->napi
);
1005 netif_start_queue(napi_dev
);
1007 __raw_writel(~(MAC0_STATUS_CHANGE
| MAC1_STATUS_CHANGE
| MAC2_STATUS_CHANGE
|
1008 MAC0_RX_ERROR
| MAC1_RX_ERROR
| MAC2_RX_ERROR
), &sw
->regs
->intr_mask
);
1010 temp
= __raw_readl(&sw
->regs
->mac_cfg
[2]);
1011 temp
&= ~(PORT_DISABLE
);
1012 __raw_writel(temp
, &sw
->regs
->mac_cfg
[2]);
1014 temp
= __raw_readl(&sw
->regs
->dma_auto_poll_cfg
);
1015 temp
&= ~(TS_SUSPEND
| FS_SUSPEND
);
1016 __raw_writel(temp
, &sw
->regs
->dma_auto_poll_cfg
);
1020 temp
= __raw_readl(&sw
->regs
->mac_cfg
[port
->id
]);
1021 temp
&= ~(PORT_DISABLE
);
1022 __raw_writel(temp
, &sw
->regs
->mac_cfg
[port
->id
]);
1025 netif_carrier_on(dev
);
1030 static int eth_close(struct net_device
*dev
)
1032 struct port
*port
= netdev_priv(dev
);
1033 struct sw
*sw
= port
->sw
;
1038 temp
= __raw_readl(&sw
->regs
->mac_cfg
[port
->id
]);
1039 temp
|= (PORT_DISABLE
);
1040 __raw_writel(temp
, &sw
->regs
->mac_cfg
[port
->id
]);
1042 netif_stop_queue(dev
);
1044 phy_stop(port
->phydev
);
1047 disable_irq(IRQ_CNS3XXX_SW_R0RXC
);
1048 free_irq(IRQ_CNS3XXX_SW_R0RXC
, napi_dev
);
1049 disable_irq(IRQ_CNS3XXX_SW_STATUS
);
1050 free_irq(IRQ_CNS3XXX_SW_STATUS
, napi_dev
);
1051 napi_disable(&sw
->napi
);
1052 netif_stop_queue(napi_dev
);
1053 temp
= __raw_readl(&sw
->regs
->mac_cfg
[2]);
1054 temp
|= (PORT_DISABLE
);
1055 __raw_writel(temp
, &sw
->regs
->mac_cfg
[2]);
1057 __raw_writel(TS_SUSPEND
| FS_SUSPEND
,
1058 &sw
->regs
->dma_auto_poll_cfg
);
1061 netif_carrier_off(dev
);
1065 static void eth_rx_mode(struct net_device
*dev
)
1067 struct port
*port
= netdev_priv(dev
);
1068 struct sw
*sw
= port
->sw
;
1071 temp
= __raw_readl(&sw
->regs
->mac_glob_cfg
);
1073 if (dev
->flags
& IFF_PROMISC
) {
1075 temp
|= ((1 << 2) << PROMISC_OFFSET
);
1077 temp
|= ((1 << port
->id
) << PROMISC_OFFSET
);
1080 temp
&= ~((1 << 2) << PROMISC_OFFSET
);
1082 temp
&= ~((1 << port
->id
) << PROMISC_OFFSET
);
1084 __raw_writel(temp
, &sw
->regs
->mac_glob_cfg
);
1087 static int eth_set_mac(struct net_device
*netdev
, void *p
)
1089 struct port
*port
= netdev_priv(netdev
);
1090 struct sw
*sw
= port
->sw
;
1091 struct sockaddr
*addr
= p
;
1094 if (!is_valid_ether_addr(addr
->sa_data
))
1095 return -EADDRNOTAVAIL
;
1097 /* Invalidate old ARL Entry */
1099 __raw_writel((port
->id
<< 16) | (0x4 << 9), &sw
->regs
->arl_ctrl
[0]);
1101 __raw_writel(((port
->id
+ 1) << 16) | (0x4 << 9), &sw
->regs
->arl_ctrl
[0]);
1102 __raw_writel( ((netdev
->dev_addr
[0] << 24) | (netdev
->dev_addr
[1] << 16) |
1103 (netdev
->dev_addr
[2] << 8) | (netdev
->dev_addr
[3])),
1104 &sw
->regs
->arl_ctrl
[1]);
1106 __raw_writel( ((netdev
->dev_addr
[4] << 24) | (netdev
->dev_addr
[5] << 16) |
1108 &sw
->regs
->arl_ctrl
[2]);
1109 __raw_writel((1 << 19), &sw
->regs
->arl_vlan_cmd
);
1111 while (((__raw_readl(&sw
->regs
->arl_vlan_cmd
) & (1 << 21)) == 0)
1118 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
1121 __raw_writel((port
->id
<< 16) | (0x4 << 9), &sw
->regs
->arl_ctrl
[0]);
1123 __raw_writel(((port
->id
+ 1) << 16) | (0x4 << 9), &sw
->regs
->arl_ctrl
[0]);
1124 __raw_writel( ((addr
->sa_data
[0] << 24) | (addr
->sa_data
[1] << 16) |
1125 (addr
->sa_data
[2] << 8) | (addr
->sa_data
[3])),
1126 &sw
->regs
->arl_ctrl
[1]);
1128 __raw_writel( ((addr
->sa_data
[4] << 24) | (addr
->sa_data
[5] << 16) |
1129 (7 << 4) | (1 << 1)), &sw
->regs
->arl_ctrl
[2]);
1130 __raw_writel((1 << 19), &sw
->regs
->arl_vlan_cmd
);
1132 while (((__raw_readl(&sw
->regs
->arl_vlan_cmd
) & (1 << 21)) == 0)
1140 static int cns3xxx_change_mtu(struct net_device
*dev
, int new_mtu
)
1142 if (new_mtu
> MAX_MTU
)
1149 static const struct net_device_ops cns3xxx_netdev_ops
= {
1150 .ndo_open
= eth_open
,
1151 .ndo_stop
= eth_close
,
1152 .ndo_start_xmit
= eth_xmit
,
1153 .ndo_set_rx_mode
= eth_rx_mode
,
1154 .ndo_do_ioctl
= eth_ioctl
,
1155 .ndo_change_mtu
= cns3xxx_change_mtu
,
1156 .ndo_set_mac_address
= eth_set_mac
,
1157 .ndo_validate_addr
= eth_validate_addr
,
1160 static int eth_init_one(struct platform_device
*pdev
)
1165 struct net_device
*dev
;
1166 struct cns3xxx_plat_info
*plat
= pdev
->dev
.platform_data
;
1168 char phy_id
[MII_BUS_ID_SIZE
+ 3];
1172 if (!(napi_dev
= alloc_etherdev(sizeof(struct sw
))))
1174 strcpy(napi_dev
->name
, "switch%d");
1175 napi_dev
->features
= NETIF_F_IP_CSUM
| NETIF_F_SG
| NETIF_F_FRAGLIST
;
1177 SET_NETDEV_DEV(napi_dev
, &pdev
->dev
);
1178 sw
= netdev_priv(napi_dev
);
1179 memset(sw
, 0, sizeof(struct sw
));
1180 sw
->regs
= (struct switch_regs __iomem
*)CNS3XXX_SWITCH_BASE_VIRT
;
1181 regs_phys
= CNS3XXX_SWITCH_BASE
;
1182 sw
->mem_res
= request_mem_region(regs_phys
, REGS_SIZE
, napi_dev
->name
);
1188 temp
= __raw_readl(&sw
->regs
->phy_auto_addr
);
1189 temp
|= (3 << 30); /* maximum frame length: 9600 bytes */
1190 __raw_writel(temp
, &sw
->regs
->phy_auto_addr
);
1192 for (i
= 0; i
< 4; i
++) {
1193 temp
= __raw_readl(&sw
->regs
->mac_cfg
[i
]);
1194 temp
|= (PORT_DISABLE
);
1195 __raw_writel(temp
, &sw
->regs
->mac_cfg
[i
]);
1198 temp
= PORT_DISABLE
;
1199 __raw_writel(temp
, &sw
->regs
->mac_cfg
[2]);
1201 temp
= __raw_readl(&sw
->regs
->vlan_cfg
);
1202 temp
|= NIC_MODE
| VLAN_UNAWARE
;
1203 __raw_writel(temp
, &sw
->regs
->vlan_cfg
);
1205 __raw_writel(UNKNOWN_VLAN_TO_CPU
|
1206 CRC_STRIPPING
, &sw
->regs
->mac_glob_cfg
);
1208 if ((err
= init_rings(sw
)) != 0) {
1213 platform_set_drvdata(pdev
, napi_dev
);
1215 netif_napi_add(napi_dev
, &sw
->napi
, eth_poll
, NAPI_WEIGHT
);
1217 for (i
= 0; i
< 3; i
++) {
1218 if (!(plat
->ports
& (1 << i
))) {
1222 if (!(dev
= alloc_etherdev(sizeof(struct port
)))) {
1226 port
= netdev_priv(dev
);
1234 temp
= __raw_readl(&sw
->regs
->mac_cfg
[port
->id
]);
1235 temp
|= (PORT_DISABLE
| PORT_BLOCK_STATE
| PORT_LEARN_DIS
);
1236 __raw_writel(temp
, &sw
->regs
->mac_cfg
[port
->id
]);
1238 dev
->netdev_ops
= &cns3xxx_netdev_ops
;
1239 dev
->ethtool_ops
= &cns3xxx_ethtool_ops
;
1240 dev
->tx_queue_len
= 1000;
1241 dev
->features
= NETIF_F_IP_CSUM
| NETIF_F_SG
| NETIF_F_FRAGLIST
;
1243 switch_port_tab
[port
->id
] = port
;
1244 memcpy(dev
->dev_addr
, &plat
->hwaddr
[i
], ETH_ALEN
);
1246 snprintf(phy_id
, MII_BUS_ID_SIZE
+ 3, PHY_ID_FMT
, "0", plat
->phy
[i
]);
1247 port
->phydev
= phy_connect(dev
, phy_id
, &cns3xxx_adjust_link
, 0,
1248 PHY_INTERFACE_MODE_RGMII
);
1249 if ((err
= IS_ERR(port
->phydev
))) {
1250 switch_port_tab
[port
->id
] = 0;
1255 port
->phydev
->irq
= PHY_IGNORE_INTERRUPT
;
1257 if ((err
= register_netdev(dev
))) {
1258 phy_disconnect(port
->phydev
);
1259 switch_port_tab
[port
->id
] = 0;
1264 printk(KERN_INFO
"%s: RGMII PHY %i on cns3xxx Switch\n", dev
->name
, plat
->phy
[i
]);
1265 netif_carrier_off(dev
);
1273 for (--i
; i
>= 0; i
--) {
1274 if (switch_port_tab
[i
]) {
1275 port
= switch_port_tab
[i
];
1277 unregister_netdev(dev
);
1278 phy_disconnect(port
->phydev
);
1279 switch_port_tab
[i
] = 0;
1284 free_netdev(napi_dev
);
1288 static int eth_remove_one(struct platform_device
*pdev
)
1290 struct net_device
*dev
= platform_get_drvdata(pdev
);
1291 struct sw
*sw
= netdev_priv(dev
);
1295 for (i
= 3; i
>= 0; i
--) {
1296 if (switch_port_tab
[i
]) {
1297 struct port
*port
= switch_port_tab
[i
];
1298 struct net_device
*dev
= port
->netdev
;
1299 unregister_netdev(dev
);
1300 phy_disconnect(port
->phydev
);
1301 switch_port_tab
[i
] = 0;
1306 release_resource(sw
->mem_res
);
1307 free_netdev(napi_dev
);
1311 static struct platform_driver cns3xxx_eth_driver
= {
1312 .driver
.name
= DRV_NAME
,
1313 .probe
= eth_init_one
,
1314 .remove
= eth_remove_one
,
1317 static int __init
eth_init_module(void)
1320 if ((err
= cns3xxx_mdio_register()))
1322 return platform_driver_register(&cns3xxx_eth_driver
);
1325 static void __exit
eth_cleanup_module(void)
1327 platform_driver_unregister(&cns3xxx_eth_driver
);
1328 cns3xxx_mdio_remove();
1331 module_init(eth_init_module
);
1332 module_exit(eth_cleanup_module
);
1334 MODULE_AUTHOR("Chris Lang");
1335 MODULE_DESCRIPTION("Cavium CNS3xxx Ethernet driver");
1336 MODULE_LICENSE("GPL v2");
1337 MODULE_ALIAS("platform:cns3xxx_eth");