2 * ADM5120 built in ethernet switch driver
4 * Copyright Jeroen Vreeken (pe1rxq@amsat.org), 2005
6 * Inspiration for this driver came from the original ADMtek 2.4
7 * driver, Copyright ADMtek Inc.
9 * NAPI extensions by Thomas Langer (Thomas.Langer@infineon.com)
10 * and Friedrich Beckmann (Friedrich.Beckmann@infineon.com), 2007
12 * TODO: Add support of high prio queues (currently disabled)
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/errno.h>
18 #include <linux/interrupt.h>
19 #include <linux/ioport.h>
20 #include <linux/spinlock.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
27 #include <linux/irq.h>
29 #include <asm/mipsregs.h>
31 #include <adm5120_info.h>
32 #include <adm5120_defs.h>
33 #include <adm5120_irq.h>
34 #include <adm5120_switch.h>
36 #include "adm5120sw.h"
38 #define DRV_NAME "adm5120-switch"
39 #define DRV_DESC "ADM5120 built-in ethernet switch driver"
40 #define DRV_VERSION "0.1.0"
42 MODULE_AUTHOR("Jeroen Vreeken (pe1rxq@amsat.org)");
43 MODULE_DESCRIPTION("ADM5120 ethernet switch driver");
44 MODULE_LICENSE("GPL");
46 /* ------------------------------------------------------------------------ */
48 #if 0 /*def ADM5120_SWITCH_DEBUG*/
49 #define SW_DBG(f, a...) printk(KERN_DEBUG "%s: " f, DRV_NAME , ## a)
51 #define SW_DBG(f, a...) do {} while (0)
53 #define SW_ERR(f, a...) printk(KERN_ERR "%s: " f, DRV_NAME , ## a)
54 #define SW_INFO(f, a...) printk(KERN_INFO "%s: " f, DRV_NAME , ## a)
56 #define SWITCH_NUM_PORTS 6
57 #define ETH_CSUM_LEN 4
59 #define RX_MAX_PKTLEN 1550
60 #define RX_RING_SIZE 64
62 #define TX_RING_SIZE 32
63 #define TX_QUEUE_LEN 28 /* Limit ring entries actually used. */
64 #define TX_TIMEOUT HZ*400
66 #define RX_DESCS_SIZE (RX_RING_SIZE * sizeof(struct dma_desc *))
67 #define RX_SKBS_SIZE (RX_RING_SIZE * sizeof(struct sk_buff *))
68 #define TX_DESCS_SIZE (TX_RING_SIZE * sizeof(struct dma_desc *))
69 #define TX_SKBS_SIZE (TX_RING_SIZE * sizeof(struct sk_buff *))
71 #define SKB_ALLOC_LEN (RX_MAX_PKTLEN + 32)
72 #define SKB_RESERVE_LEN (NET_IP_ALIGN + NET_SKB_PAD)
74 #define SWITCH_INTS_HIGH (SWITCH_INT_SHD | SWITCH_INT_RHD | SWITCH_INT_HDF)
75 #define SWITCH_INTS_LOW (SWITCH_INT_SLD | SWITCH_INT_RLD | SWITCH_INT_LDF)
76 #define SWITCH_INTS_ERR (SWITCH_INT_RDE | SWITCH_INT_SDE | SWITCH_INT_CPUH)
77 #define SWITCH_INTS_Q (SWITCH_INT_P0QF | SWITCH_INT_P1QF | SWITCH_INT_P2QF | \
78 SWITCH_INT_P3QF | SWITCH_INT_P4QF | SWITCH_INT_P5QF | \
79 SWITCH_INT_CPQF | SWITCH_INT_GQF)
81 #define SWITCH_INTS_ALL (SWITCH_INTS_HIGH | SWITCH_INTS_LOW | \
82 SWITCH_INTS_ERR | SWITCH_INTS_Q | \
83 SWITCH_INT_MD | SWITCH_INT_PSC)
85 #define SWITCH_INTS_USED (SWITCH_INTS_LOW | SWITCH_INT_PSC)
86 #define SWITCH_INTS_POLL (SWITCH_INT_RLD | SWITCH_INT_LDF)
88 /* ------------------------------------------------------------------------ */
92 #define DESC_OWN (1UL << 31) /* Owned by the switch */
93 #define DESC_EOR (1UL << 28) /* End of Ring */
94 #define DESC_ADDR_MASK 0x1FFFFFF
95 #define DESC_ADDR(x) ((__u32)(x) & DESC_ADDR_MASK)
97 #define DESC_BUF2_EN (1UL << 31) /* Buffer 2 enable */
100 /* definitions for tx/rx descriptors */
101 #define DESC_PKTLEN_SHIFT 16
102 #define DESC_PKTLEN_MASK 0x7FF
103 /* tx descriptor specific part */
104 #define DESC_CSUM (1UL << 31) /* Append checksum */
105 #define DESC_DSTPORT_SHIFT 8
106 #define DESC_DSTPORT_MASK 0x3F
107 #define DESC_VLAN_MASK 0x3F
108 /* rx descriptor specific part */
109 #define DESC_SRCPORT_SHIFT 12
110 #define DESC_SRCPORT_MASK 0x7
111 #define DESC_DA_MASK 0x3
112 #define DESC_DA_SHIFT 4
113 #define DESC_IPCSUM_FAIL (1UL << 3) /* IP checksum fail */
114 #define DESC_VLAN_TAG (1UL << 2) /* VLAN tag present */
115 #define DESC_TYPE_MASK 0x3 /* mask for Packet type */
116 #define DESC_TYPE_IP 0x0 /* IP packet */
117 #define DESC_TYPE_PPPoE 0x1 /* PPPoE packet */
118 } __attribute__ ((aligned(16)));
120 static inline u32
desc_get_srcport(struct dma_desc
*desc
)
122 return (desc
->misc
>> DESC_SRCPORT_SHIFT
) & DESC_SRCPORT_MASK
;
125 static inline u32
desc_get_pktlen(struct dma_desc
*desc
)
127 return (desc
->misc
>> DESC_PKTLEN_SHIFT
) & DESC_PKTLEN_MASK
;
130 static inline int desc_ipcsum_fail(struct dma_desc
*desc
)
132 return ((desc
->misc
& DESC_IPCSUM_FAIL
) != 0);
135 /* ------------------------------------------------------------------------ */
137 /* default settings - unlimited TX and RX on all ports, default shaper mode */
138 static unsigned char bw_matrix
[SWITCH_NUM_PORTS
] = {
142 static int adm5120_nrdevs
;
144 static struct net_device
*adm5120_devs
[SWITCH_NUM_PORTS
];
145 /* Lookup table port -> device */
146 static struct net_device
*adm5120_port
[SWITCH_NUM_PORTS
];
148 static struct dma_desc
*txl_descs
;
149 static struct dma_desc
*rxl_descs
;
151 static dma_addr_t txl_descs_dma
;
152 static dma_addr_t rxl_descs_dma
;
154 static struct sk_buff
**txl_skbuff
;
155 static struct sk_buff
**rxl_skbuff
;
157 static unsigned int cur_rxl
, dirty_rxl
; /* producer/consumer ring indices */
158 static unsigned int cur_txl
, dirty_txl
;
160 static unsigned int sw_used
;
162 static spinlock_t sw_lock
= SPIN_LOCK_UNLOCKED
;
163 static spinlock_t poll_lock
= SPIN_LOCK_UNLOCKED
;
165 static struct net_device sw_dev
;
166 static struct net_device
*poll_dev
;
168 /* ------------------------------------------------------------------------ */
170 static inline u32
sw_read_reg(u32 reg
)
172 return __raw_readl((void __iomem
*)KSEG1ADDR(ADM5120_SWITCH_BASE
)+reg
);
175 static inline void sw_write_reg(u32 reg
, u32 val
)
177 __raw_writel(val
, (void __iomem
*)KSEG1ADDR(ADM5120_SWITCH_BASE
)+reg
);
180 static inline void sw_int_mask(u32 mask
)
184 t
= sw_read_reg(SWITCH_REG_INT_MASK
);
186 sw_write_reg(SWITCH_REG_INT_MASK
, t
);
189 static inline void sw_int_unmask(u32 mask
)
193 t
= sw_read_reg(SWITCH_REG_INT_MASK
);
195 sw_write_reg(SWITCH_REG_INT_MASK
, t
);
198 static inline void sw_int_ack(u32 mask
)
200 sw_write_reg(SWITCH_REG_INT_STATUS
, mask
);
203 static inline u32
sw_int_status(void)
207 t
= sw_read_reg(SWITCH_REG_INT_STATUS
);
208 t
&= ~sw_read_reg(SWITCH_REG_INT_MASK
);
212 /* ------------------------------------------------------------------------ */
214 static void sw_dump_desc(char *label
, struct dma_desc
*desc
, int tx
)
218 SW_DBG("%s %s desc/%p\n", label
, tx
? "tx" : "rx", desc
);
221 SW_DBG(" buf1 %08X addr=%08X; len=%08X %s%s\n", t
,
224 (t
& DESC_OWN
) ? "SWITCH" : "CPU",
225 (t
& DESC_EOR
) ? " RE" : "");
228 SW_DBG(" buf2 %08X addr=%08X%s\n", desc
->buf2
,
230 (t
& DESC_BUF2_EN
) ? " EN" : "" );
234 SW_DBG(" misc %08X%s pktlen=%04X ports=%02X vlan=%02X\n", t
,
235 (t
& DESC_CSUM
) ? " CSUM" : "",
236 (t
>> DESC_PKTLEN_SHIFT
) & DESC_PKTLEN_MASK
,
237 (t
>> DESC_DSTPORT_SHIFT
) & DESC_DSTPORT_MASK
,
240 SW_DBG(" misc %08X pktlen=%04X port=%d DA=%d%s%s type=%d\n",
242 (t
>> DESC_PKTLEN_SHIFT
) & DESC_PKTLEN_MASK
,
243 (t
>> DESC_SRCPORT_SHIFT
) & DESC_SRCPORT_MASK
,
244 (t
>> DESC_DA_SHIFT
) & DESC_DA_MASK
,
245 (t
& DESC_IPCSUM_FAIL
) ? " IPCF" : "",
246 (t
& DESC_VLAN_TAG
) ? " VLAN" : "",
247 (t
& DESC_TYPE_MASK
));
250 static void sw_dump_intr_mask(char *label
, u32 mask
)
252 SW_DBG("%s %08X%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
254 (mask
& SWITCH_INT_SHD
) ? " SHD" : "",
255 (mask
& SWITCH_INT_SLD
) ? " SLD" : "",
256 (mask
& SWITCH_INT_RHD
) ? " RHD" : "",
257 (mask
& SWITCH_INT_RLD
) ? " RLD" : "",
258 (mask
& SWITCH_INT_HDF
) ? " HDF" : "",
259 (mask
& SWITCH_INT_LDF
) ? " LDF" : "",
260 (mask
& SWITCH_INT_P0QF
) ? " P0QF" : "",
261 (mask
& SWITCH_INT_P1QF
) ? " P1QF" : "",
262 (mask
& SWITCH_INT_P2QF
) ? " P2QF" : "",
263 (mask
& SWITCH_INT_P3QF
) ? " P3QF" : "",
264 (mask
& SWITCH_INT_P4QF
) ? " P4QF" : "",
265 (mask
& SWITCH_INT_CPQF
) ? " CPQF" : "",
266 (mask
& SWITCH_INT_GQF
) ? " GQF" : "",
267 (mask
& SWITCH_INT_MD
) ? " MD" : "",
268 (mask
& SWITCH_INT_BCS
) ? " BCS" : "",
269 (mask
& SWITCH_INT_PSC
) ? " PSC" : "",
270 (mask
& SWITCH_INT_ID
) ? " ID" : "",
271 (mask
& SWITCH_INT_W0TE
) ? " W0TE" : "",
272 (mask
& SWITCH_INT_W1TE
) ? " W1TE" : "",
273 (mask
& SWITCH_INT_RDE
) ? " RDE" : "",
274 (mask
& SWITCH_INT_SDE
) ? " SDE" : "",
275 (mask
& SWITCH_INT_CPUH
) ? " CPUH" : "");
278 static void sw_dump_regs(void)
282 t
= SW_READ_REG(PHY_STATUS
);
283 SW_DBG("phy_status: %08X\n", t
);
285 t
= SW_READ_REG(CPUP_CONF
);
286 SW_DBG("cpup_conf: %08X%s%s%s\n", t
,
287 (t
& CPUP_CONF_DCPUP
) ? " DCPUP" : "",
288 (t
& CPUP_CONF_CRCP
) ? " CRCP" : "",
289 (t
& CPUP_CONF_BTM
) ? " BTM" : "");
291 t
= SW_READ_REG(PORT_CONF0
);
292 SW_DBG("port_conf0: %08X\n", t
);
293 t
= SW_READ_REG(PORT_CONF1
);
294 SW_DBG("port_conf1: %08X\n", t
);
295 t
= SW_READ_REG(PORT_CONF2
);
296 SW_DBG("port_conf2: %08X\n", t
);
298 t
= SW_READ_REG(VLAN_G1
);
299 SW_DBG("vlan g1: %08X\n", t
);
300 t
= SW_READ_REG(VLAN_G2
);
301 SW_DBG("vlan g2: %08X\n", t
);
303 t
= SW_READ_REG(BW_CNTL0
);
304 SW_DBG("bw_cntl0: %08X\n", t
);
305 t
= SW_READ_REG(BW_CNTL1
);
306 SW_DBG("bw_cntl1: %08X\n", t
);
308 t
= SW_READ_REG(PHY_CNTL0
);
309 SW_DBG("phy_cntl0: %08X\n", t
);
310 t
= SW_READ_REG(PHY_CNTL1
);
311 SW_DBG("phy_cntl1: %08X\n", t
);
312 t
= SW_READ_REG(PHY_CNTL2
);
313 SW_DBG("phy_cntl2: %08X\n", t
);
314 t
= SW_READ_REG(PHY_CNTL3
);
315 SW_DBG("phy_cntl3: %08X\n", t
);
316 t
= SW_READ_REG(PHY_CNTL4
);
317 SW_DBG("phy_cntl4: %08X\n", t
);
319 t
= SW_READ_REG(INT_STATUS
);
320 sw_dump_intr_mask("int_status: ", t
);
322 t
= SW_READ_REG(INT_MASK
);
323 sw_dump_intr_mask("int_mask: ", t
);
325 t
= SW_READ_REG(SHDA
);
326 SW_DBG("shda: %08X\n", t
);
327 t
= SW_READ_REG(SLDA
);
328 SW_DBG("slda: %08X\n", t
);
329 t
= SW_READ_REG(RHDA
);
330 SW_DBG("rhda: %08X\n", t
);
331 t
= SW_READ_REG(RLDA
);
332 SW_DBG("rlda: %08X\n", t
);
336 /* ------------------------------------------------------------------------ */
338 static inline void adm5120_rx_dma_update(struct dma_desc
*desc
,
339 struct sk_buff
*skb
, int end
)
343 desc
->buflen
= RX_MAX_PKTLEN
;
344 desc
->buf1
= DESC_ADDR(skb
->data
) |
345 DESC_OWN
| (end
? DESC_EOR
: 0);
348 static void adm5120_switch_rx_refill(void)
352 for (; cur_rxl
- dirty_rxl
> 0; dirty_rxl
++) {
353 struct dma_desc
*desc
;
356 entry
= dirty_rxl
% RX_RING_SIZE
;
357 desc
= &rxl_descs
[entry
];
359 skb
= rxl_skbuff
[entry
];
361 skb
= alloc_skb(SKB_ALLOC_LEN
, GFP_ATOMIC
);
363 skb_reserve(skb
, SKB_RESERVE_LEN
);
364 rxl_skbuff
[entry
] = skb
;
366 SW_ERR("no memory for skb\n");
370 desc
->buf1
= (desc
->buf1
& DESC_EOR
) | DESC_OWN
;
376 desc
->buflen
= RX_MAX_PKTLEN
;
378 desc
->buf1
= (desc
->buf1
& DESC_EOR
) | DESC_OWN
|
379 DESC_ADDR(skb
->data
);
383 static int adm5120_switch_rx(int limit
)
385 unsigned int done
= 0;
387 SW_DBG("rx start, limit=%d, cur_rxl=%u, dirty_rxl=%u\n",
388 limit
, cur_rxl
, dirty_rxl
);
390 sw_int_ack(SWITCH_INTS_POLL
);
392 while (done
< limit
) {
393 int entry
= cur_rxl
% RX_RING_SIZE
;
394 struct dma_desc
*desc
= &rxl_descs
[entry
];
395 struct net_device
*rdev
;
398 if (desc
->buf1
& DESC_OWN
)
401 if (dirty_rxl
+ RX_RING_SIZE
== cur_rxl
)
404 port
= desc_get_srcport(desc
);
405 rdev
= adm5120_port
[port
];
407 SW_DBG("rx descriptor %u, desc=%p, skb=%p\n", entry
, desc
,
410 if ((rdev
) && netif_running(rdev
)) {
411 struct sk_buff
*skb
= rxl_skbuff
[entry
];
414 pktlen
= desc_get_pktlen(desc
);
415 pktlen
-= ETH_CSUM_LEN
;
417 if ((pktlen
== 0) || desc_ipcsum_fail(desc
)) {
418 rdev
->stats
.rx_errors
++;
420 rdev
->stats
.rx_length_errors
++;
421 if (desc_ipcsum_fail(desc
))
422 rdev
->stats
.rx_crc_errors
++;
423 SW_DBG("rx error, recycling skb %u\n", entry
);
425 skb_put(skb
, pktlen
);
428 skb
->protocol
= eth_type_trans(skb
, rdev
);
429 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
431 dma_cache_wback_inv((unsigned long)skb
->data
,
434 netif_receive_skb(skb
);
436 rdev
->last_rx
= jiffies
;
437 rdev
->stats
.rx_packets
++;
438 rdev
->stats
.rx_bytes
+= pktlen
;
440 rxl_skbuff
[entry
] = NULL
;
444 SW_DBG("no rx device, recycling skb %u\n", entry
);
448 if (cur_rxl
- dirty_rxl
> RX_RING_SIZE
/ 4)
449 adm5120_switch_rx_refill();
452 adm5120_switch_rx_refill();
454 SW_DBG("rx finished, cur_rxl=%u, dirty_rxl=%u, processed %d\n",
455 cur_rxl
, dirty_rxl
, done
);
461 static int adm5120_switch_poll(struct net_device
*dev
, int *budget
)
463 int limit
= min(dev
->quota
, *budget
);
467 done
= adm5120_switch_rx(limit
);
472 status
= sw_int_status() & SWITCH_INTS_POLL
;
473 if ((done
< limit
) && (!status
)) {
474 spin_lock_irq(&poll_lock
);
475 SW_DBG("disable polling mode for %s\n", poll_dev
->name
);
476 netif_rx_complete(poll_dev
);
477 sw_int_unmask(SWITCH_INTS_POLL
);
479 spin_unlock_irq(&poll_lock
);
486 static void adm5120_switch_tx(void)
490 /* find and cleanup dirty tx descriptors */
491 entry
= dirty_txl
% TX_RING_SIZE
;
492 while (dirty_txl
!= cur_txl
) {
493 struct dma_desc
*desc
= &txl_descs
[entry
];
494 struct sk_buff
*skb
= txl_skbuff
[entry
];
496 if (desc
->buf1
& DESC_OWN
)
499 if (netif_running(skb
->dev
)) {
500 skb
->dev
->stats
.tx_bytes
+= skb
->len
;
501 skb
->dev
->stats
.tx_packets
++;
504 dev_kfree_skb_irq(skb
);
505 txl_skbuff
[entry
] = NULL
;
506 entry
= (++dirty_txl
) % TX_RING_SIZE
;
509 if ((cur_txl
- dirty_txl
) < TX_QUEUE_LEN
- 4) {
510 /* wake up queue of all devices */
512 for (i
= 0; i
< SWITCH_NUM_PORTS
; i
++) {
513 if (!adm5120_devs
[i
])
515 netif_wake_queue(adm5120_devs
[i
]);
520 static irqreturn_t
adm5120_poll_irq(int irq
, void *dev_id
)
522 struct net_device
*dev
= dev_id
;
525 status
= sw_int_status();
526 status
&= SWITCH_INTS_POLL
;
530 sw_dump_intr_mask("poll ints", status
);
532 if (!netif_running(dev
)) {
533 SW_DBG("device %s is not running\n", dev
->name
);
537 spin_lock(&poll_lock
);
539 SW_DBG("enable polling mode for %s\n", dev
->name
);
541 sw_int_mask(SWITCH_INTS_POLL
);
542 netif_rx_schedule(poll_dev
);
544 spin_unlock(&poll_lock
);
549 static irqreturn_t
adm5120_switch_irq(int irq
, void *dev_id
)
553 status
= sw_int_status();
554 status
&= SWITCH_INTS_ALL
& ~SWITCH_INTS_POLL
;
560 if (status
& SWITCH_INT_SLD
) {
563 spin_unlock(&sw_lock
);
569 static void adm5120_set_vlan(char *matrix
)
574 val
= matrix
[0] + (matrix
[1]<<8) + (matrix
[2]<<16) + (matrix
[3]<<24);
575 sw_write_reg(SWITCH_REG_VLAN_G1
, val
);
576 val
= matrix
[4] + (matrix
[5]<<8);
577 sw_write_reg(SWITCH_REG_VLAN_G2
, val
);
579 /* Now set/update the port vs. device lookup table */
580 for (port
=0; port
<SWITCH_NUM_PORTS
; port
++) {
581 for (vlan_port
=0; vlan_port
<SWITCH_NUM_PORTS
&& !(matrix
[vlan_port
] & (0x00000001 << port
)); vlan_port
++);
582 if (vlan_port
<SWITCH_NUM_PORTS
)
583 adm5120_port
[port
] = adm5120_devs
[vlan_port
];
585 adm5120_port
[port
] = NULL
;
589 static void adm5120_set_bw(char *matrix
)
593 /* Port 0 to 3 are set using the bandwidth control 0 register */
594 val
= matrix
[0] + (matrix
[1]<<8) + (matrix
[2]<<16) + (matrix
[3]<<24);
595 sw_write_reg(SWITCH_REG_BW_CNTL0
, val
);
597 /* Port 4 and 5 are set using the bandwidth control 1 register */
600 sw_write_reg(SWITCH_REG_BW_CNTL1
, val
| 0x80000000);
602 sw_write_reg(SWITCH_REG_BW_CNTL1
, val
& ~0x8000000);
604 SW_DBG("D: ctl0 0x%ux, ctl1 0x%ux\n", sw_read_reg(SWITCH_REG_BW_CNTL0
),
605 sw_read_reg(SWITCH_REG_BW_CNTL1
));
608 static int adm5120_switch_open(struct net_device
*dev
)
613 netif_start_queue(dev
);
615 /* enable interrupts on first open */
616 sw_int_unmask(SWITCH_INTS_USED
);
618 /* enable (additional) port */
619 t
= sw_read_reg(SWITCH_REG_PORT_CONF0
);
620 for (i
= 0; i
< SWITCH_NUM_PORTS
; i
++) {
621 if (dev
== adm5120_devs
[i
])
622 t
&= ~adm5120_eth_vlans
[i
];
624 sw_write_reg(SWITCH_REG_PORT_CONF0
, t
);
629 static int adm5120_switch_stop(struct net_device
*dev
)
635 sw_int_mask(SWITCH_INTS_USED
);
637 /* disable port if not assigned to other devices */
638 t
= sw_read_reg(SWITCH_REG_PORT_CONF0
);
639 t
|= SWITCH_PORTS_NOCPU
;
640 for (i
= 0; i
< SWITCH_NUM_PORTS
; i
++) {
641 if ((dev
!= adm5120_devs
[i
]) && netif_running(adm5120_devs
[i
]))
642 t
&= ~adm5120_eth_vlans
[i
];
644 sw_write_reg(SWITCH_REG_PORT_CONF0
, t
);
646 netif_stop_queue(dev
);
650 static int adm5120_sw_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
652 struct dma_desc
*desc
;
653 struct adm5120_sw
*priv
= netdev_priv(dev
);
657 /* lock switch irq */
658 spin_lock_irq(&sw_lock
);
660 /* calculate the next TX descriptor entry. */
661 entry
= cur_txl
% TX_RING_SIZE
;
663 desc
= &txl_descs
[entry
];
664 if (desc
->buf1
& DESC_OWN
) {
665 /* We want to write a packet but the TX queue is still
666 * occupied by the DMA. We are faster than the DMA... */
668 dev
->stats
.tx_dropped
++;
672 txl_skbuff
[entry
] = skb
;
673 data
= (desc
->buf1
& DESC_EOR
);
674 data
|= DESC_ADDR(skb
->data
);
677 ((skb
->len
<ETH_ZLEN
?ETH_ZLEN
:skb
->len
) << DESC_PKTLEN_SHIFT
) |
680 desc
->buflen
= skb
->len
< ETH_ZLEN
? ETH_ZLEN
: skb
->len
;
682 desc
->buf1
= data
| DESC_OWN
;
683 sw_write_reg(SWITCH_REG_SEND_TRIG
, SEND_TRIG_STL
);
686 if (cur_txl
== dirty_txl
+ TX_QUEUE_LEN
) {
687 /* FIXME: stop queue for all devices */
688 netif_stop_queue(dev
);
691 dev
->trans_start
= jiffies
;
693 spin_unlock_irq(&sw_lock
);
698 static void adm5120_tx_timeout(struct net_device
*dev
)
700 SW_INFO("TX timeout on %s\n",dev
->name
);
703 static void adm5120_set_multicast_list(struct net_device
*dev
)
705 struct adm5120_sw
*priv
= netdev_priv(dev
);
709 ports
= adm5120_eth_vlans
[priv
->port
] & SWITCH_PORTS_NOCPU
;
711 t
= sw_read_reg(SWITCH_REG_CPUP_CONF
);
712 if (dev
->flags
& IFF_PROMISC
)
713 /* enable unknown packets */
714 t
&= ~(ports
<< CPUP_CONF_DUNP_SHIFT
);
716 /* disable unknown packets */
717 t
|= (ports
<< CPUP_CONF_DUNP_SHIFT
);
719 if (dev
->flags
& IFF_PROMISC
|| dev
->flags
& IFF_ALLMULTI
||
721 /* enable multicast packets */
722 t
&= ~(ports
<< CPUP_CONF_DMCP_SHIFT
);
724 /* disable multicast packets */
725 t
|= (ports
<< CPUP_CONF_DMCP_SHIFT
);
727 /* If there is any port configured to be in promiscuous mode, then the */
728 /* Bridge Test Mode has to be activated. This will result in */
729 /* transporting also packets learned in another VLAN to be forwarded */
731 /* The difficult scenario is when we want to build a bridge on the CPU.*/
732 /* Assume we have port0 and the CPU port in VLAN0 and port1 and the */
733 /* CPU port in VLAN1. Now we build a bridge on the CPU between */
734 /* VLAN0 and VLAN1. Both ports of the VLANs are set in promisc mode. */
735 /* Now assume a packet with ethernet source address 99 enters port 0 */
736 /* It will be forwarded to the CPU because it is unknown. Then the */
737 /* bridge in the CPU will send it to VLAN1 and it goes out at port 1. */
738 /* When now a packet with ethernet destination address 99 comes in at */
739 /* port 1 in VLAN1, then the switch has learned that this address is */
740 /* located at port 0 in VLAN0. Therefore the switch will drop */
741 /* this packet. In order to avoid this and to send the packet still */
742 /* to the CPU, the Bridge Test Mode has to be activated. */
744 /* Check if there is any vlan in promisc mode. */
745 if (t
& (SWITCH_PORTS_NOCPU
<< CPUP_CONF_DUNP_SHIFT
))
746 t
&= ~CPUP_CONF_BTM
; /* Disable Bridge Testing Mode */
748 t
|= CPUP_CONF_BTM
; /* Enable Bridge Testing Mode */
750 sw_write_reg(SWITCH_REG_CPUP_CONF
, t
);
754 static void adm5120_write_mac(struct net_device
*dev
)
756 struct adm5120_sw
*priv
= netdev_priv(dev
);
757 unsigned char *mac
= dev
->dev_addr
;
760 t
= mac
[2] | (mac
[3] << MAC_WT1_MAC3_SHIFT
) |
761 (mac
[4] << MAC_WT1_MAC4_SHIFT
) | (mac
[5] << MAC_WT1_MAC4_SHIFT
);
762 sw_write_reg(SWITCH_REG_MAC_WT1
, t
);
764 t
= (mac
[0] << MAC_WT0_MAC0_SHIFT
) | (mac
[1] << MAC_WT0_MAC1_SHIFT
) |
765 MAC_WT0_MAWC
| MAC_WT0_WVE
| (priv
->port
<<3);
767 sw_write_reg(SWITCH_REG_MAC_WT0
, t
);
769 while (!(sw_read_reg(SWITCH_REG_MAC_WT0
) & MAC_WT0_MWD
));
772 static int adm5120_sw_set_mac_address(struct net_device
*dev
, void *p
)
774 struct sockaddr
*addr
= p
;
776 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
777 adm5120_write_mac(dev
);
781 static int adm5120_do_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
784 struct adm5120_sw_info info
;
785 struct adm5120_sw
*priv
= netdev_priv(dev
);
790 info
.ports
= adm5120_nrdevs
;
791 info
.vlan
= priv
->port
;
792 err
= copy_to_user(rq
->ifr_data
, &info
, sizeof(info
));
797 if (!capable(CAP_NET_ADMIN
))
799 err
= copy_from_user(adm5120_eth_vlans
, rq
->ifr_data
,
800 sizeof(adm5120_eth_vlans
));
803 adm5120_set_vlan(adm5120_eth_vlans
);
806 err
= copy_to_user(rq
->ifr_data
, adm5120_eth_vlans
,
807 sizeof(adm5120_eth_vlans
));
812 err
= copy_to_user(rq
->ifr_data
, bw_matrix
, sizeof(bw_matrix
));
817 if (!capable(CAP_NET_ADMIN
))
819 err
= copy_from_user(bw_matrix
, rq
->ifr_data
, sizeof(bw_matrix
));
822 adm5120_set_bw(bw_matrix
);
830 static void adm5120_dma_tx_init(struct dma_desc
*desc
, struct sk_buff
**skbl
,
833 memset(desc
, 0, num
* sizeof(*desc
));
834 desc
[num
-1].buf1
|= DESC_EOR
;
835 memset(skbl
, 0, sizeof(struct skb
*)*num
);
838 static void adm5120_dma_rx_init(struct dma_desc
*desc
, struct sk_buff
**skbl
,
843 memset(desc
, 0, num
* sizeof(*desc
));
844 for (i
=0; i
<num
; i
++) {
845 skbl
[i
] = dev_alloc_skb(SKB_ALLOC_LEN
);
850 skb_reserve(skbl
[i
], SKB_RESERVE_LEN
);
851 adm5120_rx_dma_update(&desc
[i
], skbl
[i
], (num
-1==i
));
855 static void adm5120_switch_cleanup(void)
859 /* disable interrupts */
860 sw_int_mask(SWITCH_INTS_ALL
);
862 for (i
= 0; i
< SWITCH_NUM_PORTS
; i
++) {
863 struct net_device
*dev
= adm5120_devs
[i
];
865 unregister_netdev(dev
);
866 free_irq(ADM5120_IRQ_SWITCH
, dev
);
871 /* cleanup TX ring */
873 for (i
= 0; i
< TX_RING_SIZE
; i
++)
875 kfree_skb(txl_skbuff
[i
]);
880 dma_free_coherent(NULL
, TX_DESCS_SIZE
, txl_descs
,
883 /* cleanup RX ring */
885 for (i
= 0; i
< RX_RING_SIZE
; i
++)
887 kfree_skb(rxl_skbuff
[i
]);
892 dma_free_coherent(NULL
, RX_DESCS_SIZE
, rxl_descs
,
895 free_irq(ADM5120_IRQ_SWITCH
, &sw_dev
);
898 static int __init
adm5120_switch_init(void)
900 struct net_device
*dev
;
904 err
= request_irq(ADM5120_IRQ_SWITCH
, adm5120_switch_irq
,
905 (IRQF_SHARED
| IRQF_DISABLED
), "switch", &sw_dev
);
907 SW_ERR("request_irq failed with error %d\n", err
);
911 adm5120_nrdevs
= adm5120_eth_num_ports
;
913 t
= CPUP_CONF_DCPUP
| CPUP_CONF_CRCP
|
914 SWITCH_PORTS_NOCPU
<< CPUP_CONF_DUNP_SHIFT
|
915 SWITCH_PORTS_NOCPU
<< CPUP_CONF_DMCP_SHIFT
;
916 sw_write_reg(SWITCH_REG_CPUP_CONF
, t
);
918 t
= (SWITCH_PORTS_NOCPU
<< PORT_CONF0_EMCP_SHIFT
) |
919 (SWITCH_PORTS_NOCPU
<< PORT_CONF0_BP_SHIFT
) |
920 (SWITCH_PORTS_NOCPU
);
921 sw_write_reg(SWITCH_REG_PORT_CONF0
, t
);
923 /* setup ports to Autoneg/100M/Full duplex/Auto MDIX */
924 t
= SWITCH_PORTS_PHY
|
925 (SWITCH_PORTS_PHY
<< PHY_CNTL2_SC_SHIFT
) |
926 (SWITCH_PORTS_PHY
<< PHY_CNTL2_DC_SHIFT
) |
927 (SWITCH_PORTS_PHY
<< PHY_CNTL2_PHYR_SHIFT
) |
928 (SWITCH_PORTS_PHY
<< PHY_CNTL2_AMDIX_SHIFT
) |
930 SW_WRITE_REG(PHY_CNTL2
, t
);
932 t
= sw_read_reg(SWITCH_REG_PHY_CNTL3
);
934 sw_write_reg(SWITCH_REG_PHY_CNTL3
, t
);
936 /* Force all the packets from all ports are low priority */
937 sw_write_reg(SWITCH_REG_PRI_CNTL
, 0);
939 sw_int_mask(SWITCH_INTS_ALL
);
940 sw_int_ack(SWITCH_INTS_ALL
);
943 cur_rxl
= dirty_rxl
= 0;
944 rxl_descs
= dma_alloc_coherent(NULL
, RX_DESCS_SIZE
, &rxl_descs_dma
,
951 rxl_skbuff
= kzalloc(RX_SKBS_SIZE
, GFP_KERNEL
);
957 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
959 skb
= alloc_skb(SKB_ALLOC_LEN
, GFP_ATOMIC
);
965 skb_reserve(skb
, SKB_RESERVE_LEN
);
969 cur_txl
= dirty_txl
= 0;
970 txl_descs
= dma_alloc_coherent(NULL
, TX_DESCS_SIZE
, &txl_descs_dma
,
977 txl_skbuff
= kzalloc(TX_SKBS_SIZE
, GFP_KERNEL
);
983 adm5120_dma_tx_init(txl_descs
, txl_skbuff
, TX_RING_SIZE
);
984 adm5120_dma_rx_init(rxl_descs
, rxl_skbuff
, RX_RING_SIZE
);
986 sw_write_reg(SWITCH_REG_SHDA
, 0);
987 sw_write_reg(SWITCH_REG_SLDA
, KSEG1ADDR(txl_descs
));
988 sw_write_reg(SWITCH_REG_RHDA
, 0);
989 sw_write_reg(SWITCH_REG_RLDA
, KSEG1ADDR(rxl_descs
));
991 for (i
= 0; i
< SWITCH_NUM_PORTS
; i
++) {
992 adm5120_devs
[i
] = alloc_etherdev(sizeof(struct adm5120_sw
));
993 if (!adm5120_devs
[i
]) {
998 dev
= adm5120_devs
[i
];
999 err
= request_irq(ADM5120_IRQ_SWITCH
, adm5120_poll_irq
,
1000 (IRQF_SHARED
| IRQF_DISABLED
), dev
->name
, dev
);
1002 SW_ERR("unable to get irq for %s\n", dev
->name
);
1006 SET_MODULE_OWNER(dev
);
1007 memset(netdev_priv(dev
), 0, sizeof(struct adm5120_sw
));
1008 ((struct adm5120_sw
*)netdev_priv(dev
))->port
= i
;
1009 dev
->base_addr
= ADM5120_SWITCH_BASE
;
1010 dev
->irq
= ADM5120_IRQ_SWITCH
;
1011 dev
->open
= adm5120_switch_open
;
1012 dev
->hard_start_xmit
= adm5120_sw_start_xmit
;
1013 dev
->stop
= adm5120_switch_stop
;
1014 dev
->set_multicast_list
= adm5120_set_multicast_list
;
1015 dev
->do_ioctl
= adm5120_do_ioctl
;
1016 dev
->tx_timeout
= adm5120_tx_timeout
;
1017 dev
->watchdog_timeo
= TX_TIMEOUT
;
1018 dev
->set_mac_address
= adm5120_sw_set_mac_address
;
1019 dev
->poll
= adm5120_switch_poll
;
1022 memcpy(dev
->dev_addr
, adm5120_eth_macs
[i
], 6);
1023 adm5120_write_mac(dev
);
1025 err
= register_netdev(dev
);
1027 SW_INFO("%s register failed, error=%d\n",
1031 SW_INFO("%s created for switch port%d\n", dev
->name
, i
);
1034 /* setup vlan/port mapping after devs are filled up */
1035 adm5120_set_vlan(adm5120_eth_vlans
);
1037 /* enable CPU port */
1038 t
= sw_read_reg(SWITCH_REG_CPUP_CONF
);
1039 t
&= ~CPUP_CONF_DCPUP
;
1040 sw_write_reg(SWITCH_REG_CPUP_CONF
, t
);
1045 adm5120_switch_cleanup();
1047 SW_ERR("init failed\n");
1051 static void __exit
adm5120_switch_exit(void)
1053 adm5120_switch_cleanup();
1056 module_init(adm5120_switch_init
);
1057 module_exit(adm5120_switch_exit
);