switch driver cleanup, 2nd phase
[openwrt/staging/wigyori.git] / target / linux / adm5120 / files / drivers / net / adm5120sw.c
1 /*
2 * ADM5120 built in ethernet switch driver
3 *
4 * Copyright Jeroen Vreeken (pe1rxq@amsat.org), 2005
5 *
6 * Inspiration for this driver came from the original ADMtek 2.4
7 * driver, Copyright ADMtek Inc.
8 *
9 * NAPI extensions by Thomas Langer (Thomas.Langer@infineon.com)
10 * and Friedrich Beckmann (Friedrich.Beckmann@infineon.com), 2007
11 *
12 * TODO: Add support of high prio queues (currently disabled)
13 *
14 */
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/errno.h>
18 #include <linux/interrupt.h>
19 #include <linux/ioport.h>
20 #include <linux/spinlock.h>
21
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25
26 #include <linux/io.h>
27 #include <linux/irq.h>
28
29 #include <asm/mipsregs.h>
30
31 #include <adm5120_info.h>
32 #include <adm5120_defs.h>
33 #include <adm5120_irq.h>
34 #include <adm5120_switch.h>
35
36 #include "adm5120sw.h"
37
38 #define DRV_NAME "adm5120-switch"
39 #define DRV_DESC "ADM5120 built-in ethernet switch driver"
40 #define DRV_VERSION "0.1.0"
41
42 MODULE_AUTHOR("Jeroen Vreeken (pe1rxq@amsat.org)");
43 MODULE_DESCRIPTION("ADM5120 ethernet switch driver");
44 MODULE_LICENSE("GPL");
45
46 /* ------------------------------------------------------------------------ */
47
48 #if 0 /*def ADM5120_SWITCH_DEBUG*/
49 #define SW_DBG(f, a...) printk(KERN_DEBUG "%s: " f, DRV_NAME , ## a)
50 #else
51 #define SW_DBG(f, a...) do {} while (0)
52 #endif
53 #define SW_ERR(f, a...) printk(KERN_ERR "%s: " f, DRV_NAME , ## a)
54 #define SW_INFO(f, a...) printk(KERN_INFO "%s: " f, DRV_NAME , ## a)
55
56 #define SWITCH_NUM_PORTS 6
57 #define ETH_CSUM_LEN 4
58
59 #define RX_MAX_PKTLEN 1550
60 #define RX_RING_SIZE 64
61
62 #define TX_RING_SIZE 32
63 #define TX_QUEUE_LEN 28 /* Limit ring entries actually used. */
64 #define TX_TIMEOUT HZ*400
65
66 #define RX_DESCS_SIZE (RX_RING_SIZE * sizeof(struct dma_desc *))
67 #define RX_SKBS_SIZE (RX_RING_SIZE * sizeof(struct sk_buff *))
68 #define TX_DESCS_SIZE (TX_RING_SIZE * sizeof(struct dma_desc *))
69 #define TX_SKBS_SIZE (TX_RING_SIZE * sizeof(struct sk_buff *))
70
71 #define SKB_ALLOC_LEN (RX_MAX_PKTLEN + 32)
72 #define SKB_RESERVE_LEN (NET_IP_ALIGN + NET_SKB_PAD)
73
74 #define SWITCH_INTS_HIGH (SWITCH_INT_SHD | SWITCH_INT_RHD | SWITCH_INT_HDF)
75 #define SWITCH_INTS_LOW (SWITCH_INT_SLD | SWITCH_INT_RLD | SWITCH_INT_LDF)
76 #define SWITCH_INTS_ERR (SWITCH_INT_RDE | SWITCH_INT_SDE | SWITCH_INT_CPUH)
77 #define SWITCH_INTS_Q (SWITCH_INT_P0QF | SWITCH_INT_P1QF | SWITCH_INT_P2QF | \
78 SWITCH_INT_P3QF | SWITCH_INT_P4QF | SWITCH_INT_P5QF | \
79 SWITCH_INT_CPQF | SWITCH_INT_GQF)
80
81 #define SWITCH_INTS_ALL (SWITCH_INTS_HIGH | SWITCH_INTS_LOW | \
82 SWITCH_INTS_ERR | SWITCH_INTS_Q | \
83 SWITCH_INT_MD | SWITCH_INT_PSC)
84
85 #define SWITCH_INTS_USED (SWITCH_INTS_LOW | SWITCH_INT_PSC)
86 #define SWITCH_INTS_POLL (SWITCH_INT_RLD | SWITCH_INT_LDF)
87
88 /* ------------------------------------------------------------------------ */
89
90 struct dma_desc {
91 __u32 buf1;
92 #define DESC_OWN (1UL << 31) /* Owned by the switch */
93 #define DESC_EOR (1UL << 28) /* End of Ring */
94 #define DESC_ADDR_MASK 0x1FFFFFF
95 #define DESC_ADDR(x) ((__u32)(x) & DESC_ADDR_MASK)
96 __u32 buf2;
97 #define DESC_BUF2_EN (1UL << 31) /* Buffer 2 enable */
98 __u32 buflen;
99 __u32 misc;
100 /* definitions for tx/rx descriptors */
101 #define DESC_PKTLEN_SHIFT 16
102 #define DESC_PKTLEN_MASK 0x7FF
103 /* tx descriptor specific part */
104 #define DESC_CSUM (1UL << 31) /* Append checksum */
105 #define DESC_DSTPORT_SHIFT 8
106 #define DESC_DSTPORT_MASK 0x3F
107 #define DESC_VLAN_MASK 0x3F
108 /* rx descriptor specific part */
109 #define DESC_SRCPORT_SHIFT 12
110 #define DESC_SRCPORT_MASK 0x7
111 #define DESC_DA_MASK 0x3
112 #define DESC_DA_SHIFT 4
113 #define DESC_IPCSUM_FAIL (1UL << 3) /* IP checksum fail */
114 #define DESC_VLAN_TAG (1UL << 2) /* VLAN tag present */
115 #define DESC_TYPE_MASK 0x3 /* mask for Packet type */
116 #define DESC_TYPE_IP 0x0 /* IP packet */
117 #define DESC_TYPE_PPPoE 0x1 /* PPPoE packet */
118 } __attribute__ ((aligned(16)));
119
120 static inline u32 desc_get_srcport(struct dma_desc *desc)
121 {
122 return (desc->misc >> DESC_SRCPORT_SHIFT) & DESC_SRCPORT_MASK;
123 }
124
125 static inline u32 desc_get_pktlen(struct dma_desc *desc)
126 {
127 return (desc->misc >> DESC_PKTLEN_SHIFT) & DESC_PKTLEN_MASK;
128 }
129
130 static inline int desc_ipcsum_fail(struct dma_desc *desc)
131 {
132 return ((desc->misc & DESC_IPCSUM_FAIL) != 0);
133 }
134
135 /* ------------------------------------------------------------------------ */
136
137 /* default settings - unlimited TX and RX on all ports, default shaper mode */
138 static unsigned char bw_matrix[SWITCH_NUM_PORTS] = {
139 0, 0, 0, 0, 0, 0
140 };
141
142 static int adm5120_nrdevs;
143
144 static struct net_device *adm5120_devs[SWITCH_NUM_PORTS];
145 /* Lookup table port -> device */
146 static struct net_device *adm5120_port[SWITCH_NUM_PORTS];
147
148 static struct dma_desc *txl_descs;
149 static struct dma_desc *rxl_descs;
150
151 static dma_addr_t txl_descs_dma;
152 static dma_addr_t rxl_descs_dma;
153
154 static struct sk_buff **txl_skbuff;
155 static struct sk_buff **rxl_skbuff;
156
157 static unsigned int cur_rxl, dirty_rxl; /* producer/consumer ring indices */
158 static unsigned int cur_txl, dirty_txl;
159
160 static unsigned int sw_used;
161
162 static spinlock_t sw_lock = SPIN_LOCK_UNLOCKED;
163 static spinlock_t poll_lock = SPIN_LOCK_UNLOCKED;
164
165 static struct net_device sw_dev;
166 static struct net_device *poll_dev;
167
168 /* ------------------------------------------------------------------------ */
169
170 static inline u32 sw_read_reg(u32 reg)
171 {
172 return __raw_readl((void __iomem *)KSEG1ADDR(ADM5120_SWITCH_BASE)+reg);
173 }
174
175 static inline void sw_write_reg(u32 reg, u32 val)
176 {
177 __raw_writel(val, (void __iomem *)KSEG1ADDR(ADM5120_SWITCH_BASE)+reg);
178 }
179
180 static inline void sw_int_mask(u32 mask)
181 {
182 u32 t;
183
184 t = sw_read_reg(SWITCH_REG_INT_MASK);
185 t |= mask;
186 sw_write_reg(SWITCH_REG_INT_MASK, t);
187 }
188
189 static inline void sw_int_unmask(u32 mask)
190 {
191 u32 t;
192
193 t = sw_read_reg(SWITCH_REG_INT_MASK);
194 t &= ~mask;
195 sw_write_reg(SWITCH_REG_INT_MASK, t);
196 }
197
198 static inline void sw_int_ack(u32 mask)
199 {
200 sw_write_reg(SWITCH_REG_INT_STATUS, mask);
201 }
202
203 static inline u32 sw_int_status(void)
204 {
205 u32 t;
206
207 t = sw_read_reg(SWITCH_REG_INT_STATUS);
208 t &= ~sw_read_reg(SWITCH_REG_INT_MASK);
209 return t;
210 }
211
212 /* ------------------------------------------------------------------------ */
213
214 static void sw_dump_desc(char *label, struct dma_desc *desc, int tx)
215 {
216 u32 t;
217
218 SW_DBG("%s %s desc/%p\n", label, tx ? "tx" : "rx", desc);
219
220 t = desc->buf1;
221 SW_DBG(" buf1 %08X addr=%08X; len=%08X %s%s\n", t,
222 t & DESC_ADDR_MASK,
223 desc->buflen,
224 (t & DESC_OWN) ? "SWITCH" : "CPU",
225 (t & DESC_EOR) ? " RE" : "");
226
227 t = desc->buf2;
228 SW_DBG(" buf2 %08X addr=%08X%s\n", desc->buf2,
229 t & DESC_ADDR_MASK,
230 (t & DESC_BUF2_EN) ? " EN" : "" );
231
232 t = desc->misc;
233 if (tx)
234 SW_DBG(" misc %08X%s pktlen=%04X ports=%02X vlan=%02X\n", t,
235 (t & DESC_CSUM) ? " CSUM" : "",
236 (t >> DESC_PKTLEN_SHIFT) & DESC_PKTLEN_MASK,
237 (t >> DESC_DSTPORT_SHIFT) & DESC_DSTPORT_MASK,
238 t & DESC_VLAN_MASK);
239 else
240 SW_DBG(" misc %08X pktlen=%04X port=%d DA=%d%s%s type=%d\n",
241 t,
242 (t >> DESC_PKTLEN_SHIFT) & DESC_PKTLEN_MASK,
243 (t >> DESC_SRCPORT_SHIFT) & DESC_SRCPORT_MASK,
244 (t >> DESC_DA_SHIFT) & DESC_DA_MASK,
245 (t & DESC_IPCSUM_FAIL) ? " IPCF" : "",
246 (t & DESC_VLAN_TAG) ? " VLAN" : "",
247 (t & DESC_TYPE_MASK));
248 }
249
250 static void sw_dump_intr_mask(char *label, u32 mask)
251 {
252 SW_DBG("%s %08X%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
253 label, mask,
254 (mask & SWITCH_INT_SHD) ? " SHD" : "",
255 (mask & SWITCH_INT_SLD) ? " SLD" : "",
256 (mask & SWITCH_INT_RHD) ? " RHD" : "",
257 (mask & SWITCH_INT_RLD) ? " RLD" : "",
258 (mask & SWITCH_INT_HDF) ? " HDF" : "",
259 (mask & SWITCH_INT_LDF) ? " LDF" : "",
260 (mask & SWITCH_INT_P0QF) ? " P0QF" : "",
261 (mask & SWITCH_INT_P1QF) ? " P1QF" : "",
262 (mask & SWITCH_INT_P2QF) ? " P2QF" : "",
263 (mask & SWITCH_INT_P3QF) ? " P3QF" : "",
264 (mask & SWITCH_INT_P4QF) ? " P4QF" : "",
265 (mask & SWITCH_INT_CPQF) ? " CPQF" : "",
266 (mask & SWITCH_INT_GQF) ? " GQF" : "",
267 (mask & SWITCH_INT_MD) ? " MD" : "",
268 (mask & SWITCH_INT_BCS) ? " BCS" : "",
269 (mask & SWITCH_INT_PSC) ? " PSC" : "",
270 (mask & SWITCH_INT_ID) ? " ID" : "",
271 (mask & SWITCH_INT_W0TE) ? " W0TE" : "",
272 (mask & SWITCH_INT_W1TE) ? " W1TE" : "",
273 (mask & SWITCH_INT_RDE) ? " RDE" : "",
274 (mask & SWITCH_INT_SDE) ? " SDE" : "",
275 (mask & SWITCH_INT_CPUH) ? " CPUH" : "");
276 }
277
278 static void sw_dump_regs(void)
279 {
280 u32 t;
281
282 t = SW_READ_REG(PHY_STATUS);
283 SW_DBG("phy_status: %08X\n", t);
284
285 t = SW_READ_REG(CPUP_CONF);
286 SW_DBG("cpup_conf: %08X%s%s%s\n", t,
287 (t & CPUP_CONF_DCPUP) ? " DCPUP" : "",
288 (t & CPUP_CONF_CRCP) ? " CRCP" : "",
289 (t & CPUP_CONF_BTM) ? " BTM" : "");
290
291 t = SW_READ_REG(PORT_CONF0);
292 SW_DBG("port_conf0: %08X\n", t);
293 t = SW_READ_REG(PORT_CONF1);
294 SW_DBG("port_conf1: %08X\n", t);
295 t = SW_READ_REG(PORT_CONF2);
296 SW_DBG("port_conf2: %08X\n", t);
297
298 t = SW_READ_REG(VLAN_G1);
299 SW_DBG("vlan g1: %08X\n", t);
300 t = SW_READ_REG(VLAN_G2);
301 SW_DBG("vlan g2: %08X\n", t);
302
303 t = SW_READ_REG(BW_CNTL0);
304 SW_DBG("bw_cntl0: %08X\n", t);
305 t = SW_READ_REG(BW_CNTL1);
306 SW_DBG("bw_cntl1: %08X\n", t);
307
308 t = SW_READ_REG(PHY_CNTL0);
309 SW_DBG("phy_cntl0: %08X\n", t);
310 t = SW_READ_REG(PHY_CNTL1);
311 SW_DBG("phy_cntl1: %08X\n", t);
312 t = SW_READ_REG(PHY_CNTL2);
313 SW_DBG("phy_cntl2: %08X\n", t);
314 t = SW_READ_REG(PHY_CNTL3);
315 SW_DBG("phy_cntl3: %08X\n", t);
316 t = SW_READ_REG(PHY_CNTL4);
317 SW_DBG("phy_cntl4: %08X\n", t);
318
319 t = SW_READ_REG(INT_STATUS);
320 sw_dump_intr_mask("int_status: ", t);
321
322 t = SW_READ_REG(INT_MASK);
323 sw_dump_intr_mask("int_mask: ", t);
324
325 t = SW_READ_REG(SHDA);
326 SW_DBG("shda: %08X\n", t);
327 t = SW_READ_REG(SLDA);
328 SW_DBG("slda: %08X\n", t);
329 t = SW_READ_REG(RHDA);
330 SW_DBG("rhda: %08X\n", t);
331 t = SW_READ_REG(RLDA);
332 SW_DBG("rlda: %08X\n", t);
333 }
334
335
336 /* ------------------------------------------------------------------------ */
337
338 static inline void adm5120_rx_dma_update(struct dma_desc *desc,
339 struct sk_buff *skb, int end)
340 {
341 desc->misc = 0;
342 desc->buf2 = 0;
343 desc->buflen = RX_MAX_PKTLEN;
344 desc->buf1 = DESC_ADDR(skb->data) |
345 DESC_OWN | (end ? DESC_EOR : 0);
346 }
347
348 static void adm5120_switch_rx_refill(void)
349 {
350 unsigned int entry;
351
352 for (; cur_rxl - dirty_rxl > 0; dirty_rxl++) {
353 struct dma_desc *desc;
354 struct sk_buff *skb;
355
356 entry = dirty_rxl % RX_RING_SIZE;
357 desc = &rxl_descs[entry];
358
359 skb = rxl_skbuff[entry];
360 if (skb == NULL) {
361 skb = alloc_skb(SKB_ALLOC_LEN, GFP_ATOMIC);
362 if (skb) {
363 skb_reserve(skb, SKB_RESERVE_LEN);
364 rxl_skbuff[entry] = skb;
365 } else {
366 SW_ERR("no memory for skb\n");
367 desc->buflen = 0;
368 desc->buf2 = 0;
369 desc->misc = 0;
370 desc->buf1 = (desc->buf1 & DESC_EOR) | DESC_OWN;
371 break;
372 }
373 }
374
375 desc->buf2 = 0;
376 desc->buflen = RX_MAX_PKTLEN;
377 desc->misc = 0;
378 desc->buf1 = (desc->buf1 & DESC_EOR) | DESC_OWN |
379 DESC_ADDR(skb->data);
380 }
381 }
382
383 static int adm5120_switch_rx(int limit)
384 {
385 unsigned int done = 0;
386
387 SW_DBG("rx start, limit=%d, cur_rxl=%u, dirty_rxl=%u\n",
388 limit, cur_rxl, dirty_rxl);
389
390 sw_int_ack(SWITCH_INTS_POLL);
391
392 while (done < limit) {
393 int entry = cur_rxl % RX_RING_SIZE;
394 struct dma_desc *desc = &rxl_descs[entry];
395 struct net_device *rdev;
396 unsigned int port;
397
398 if (desc->buf1 & DESC_OWN)
399 break;
400
401 if (dirty_rxl + RX_RING_SIZE == cur_rxl)
402 break;
403
404 port = desc_get_srcport(desc);
405 rdev = adm5120_port[port];
406
407 SW_DBG("rx descriptor %u, desc=%p, skb=%p\n", entry, desc,
408 rxl_skbuff[entry]);
409
410 if ((rdev) && netif_running(rdev)) {
411 struct sk_buff *skb = rxl_skbuff[entry];
412 int pktlen;
413
414 pktlen = desc_get_pktlen(desc);
415 pktlen -= ETH_CSUM_LEN;
416
417 if ((pktlen == 0) || desc_ipcsum_fail(desc)) {
418 rdev->stats.rx_errors++;
419 if (pktlen == 0)
420 rdev->stats.rx_length_errors++;
421 if (desc_ipcsum_fail(desc))
422 rdev->stats.rx_crc_errors++;
423 SW_DBG("rx error, recycling skb %u\n", entry);
424 } else {
425 skb_put(skb, pktlen);
426
427 skb->dev = rdev;
428 skb->protocol = eth_type_trans(skb, rdev);
429 skb->ip_summed = CHECKSUM_UNNECESSARY;
430
431 dma_cache_wback_inv((unsigned long)skb->data,
432 skb->len);
433
434 netif_receive_skb(skb);
435
436 rdev->last_rx = jiffies;
437 rdev->stats.rx_packets++;
438 rdev->stats.rx_bytes += pktlen;
439
440 rxl_skbuff[entry] = NULL;
441 done++;
442 }
443 } else {
444 SW_DBG("no rx device, recycling skb %u\n", entry);
445 }
446
447 cur_rxl++;
448 if (cur_rxl - dirty_rxl > RX_RING_SIZE / 4)
449 adm5120_switch_rx_refill();
450 }
451
452 adm5120_switch_rx_refill();
453
454 SW_DBG("rx finished, cur_rxl=%u, dirty_rxl=%u, processed %d\n",
455 cur_rxl, dirty_rxl, done);
456
457 return done;
458 }
459
460
461 static int adm5120_switch_poll(struct net_device *dev, int *budget)
462 {
463 int limit = min(dev->quota, *budget);
464 int done;
465 u32 status;
466
467 done = adm5120_switch_rx(limit);
468
469 *budget -= done;
470 dev->quota -= done;
471
472 status = sw_int_status() & SWITCH_INTS_POLL;
473 if ((done < limit) && (!status)) {
474 spin_lock_irq(&poll_lock);
475 SW_DBG("disable polling mode for %s\n", poll_dev->name);
476 netif_rx_complete(poll_dev);
477 sw_int_unmask(SWITCH_INTS_POLL);
478 poll_dev = NULL;
479 spin_unlock_irq(&poll_lock);
480 return 0;
481 }
482
483 return 1;
484 }
485
486 static void adm5120_switch_tx(void)
487 {
488 unsigned int entry;
489
490 /* find and cleanup dirty tx descriptors */
491 entry = dirty_txl % TX_RING_SIZE;
492 while (dirty_txl != cur_txl) {
493 struct dma_desc *desc = &txl_descs[entry];
494 struct sk_buff *skb = txl_skbuff[entry];
495
496 if (desc->buf1 & DESC_OWN)
497 break;
498
499 if (netif_running(skb->dev)) {
500 skb->dev->stats.tx_bytes += skb->len;
501 skb->dev->stats.tx_packets++;
502 }
503
504 dev_kfree_skb_irq(skb);
505 txl_skbuff[entry] = NULL;
506 entry = (++dirty_txl) % TX_RING_SIZE;
507 }
508
509 if ((cur_txl - dirty_txl) < TX_QUEUE_LEN - 4) {
510 /* wake up queue of all devices */
511 int i;
512 for (i = 0; i < SWITCH_NUM_PORTS; i++) {
513 if (!adm5120_devs[i])
514 continue;
515 netif_wake_queue(adm5120_devs[i]);
516 }
517 }
518 }
519
520 static irqreturn_t adm5120_poll_irq(int irq, void *dev_id)
521 {
522 struct net_device *dev = dev_id;
523 u32 status;
524
525 status = sw_int_status();
526 status &= SWITCH_INTS_POLL;
527 if (!status)
528 return IRQ_NONE;
529
530 sw_dump_intr_mask("poll ints", status);
531
532 if (!netif_running(dev)) {
533 SW_DBG("device %s is not running\n", dev->name);
534 return IRQ_NONE;
535 }
536
537 spin_lock(&poll_lock);
538 if (!poll_dev) {
539 SW_DBG("enable polling mode for %s\n", dev->name);
540 poll_dev = dev;
541 sw_int_mask(SWITCH_INTS_POLL);
542 netif_rx_schedule(poll_dev);
543 }
544 spin_unlock(&poll_lock);
545
546 return IRQ_HANDLED;
547 }
548
549 static irqreturn_t adm5120_switch_irq(int irq, void *dev_id)
550 {
551 u32 status;
552
553 status = sw_int_status();
554 status &= SWITCH_INTS_ALL & ~SWITCH_INTS_POLL;
555 if (!status)
556 return IRQ_NONE;
557
558 sw_int_ack(status);
559
560 if (status & SWITCH_INT_SLD) {
561 spin_lock(&sw_lock);
562 adm5120_switch_tx();
563 spin_unlock(&sw_lock);
564 }
565
566 return IRQ_HANDLED;
567 }
568
569 static void adm5120_set_vlan(char *matrix)
570 {
571 unsigned long val;
572 int vlan_port, port;
573
574 val = matrix[0] + (matrix[1]<<8) + (matrix[2]<<16) + (matrix[3]<<24);
575 sw_write_reg(SWITCH_REG_VLAN_G1, val);
576 val = matrix[4] + (matrix[5]<<8);
577 sw_write_reg(SWITCH_REG_VLAN_G2, val);
578
579 /* Now set/update the port vs. device lookup table */
580 for (port=0; port<SWITCH_NUM_PORTS; port++) {
581 for (vlan_port=0; vlan_port<SWITCH_NUM_PORTS && !(matrix[vlan_port] & (0x00000001 << port)); vlan_port++);
582 if (vlan_port <SWITCH_NUM_PORTS)
583 adm5120_port[port] = adm5120_devs[vlan_port];
584 else
585 adm5120_port[port] = NULL;
586 }
587 }
588
589 static void adm5120_set_bw(char *matrix)
590 {
591 unsigned long val;
592
593 /* Port 0 to 3 are set using the bandwidth control 0 register */
594 val = matrix[0] + (matrix[1]<<8) + (matrix[2]<<16) + (matrix[3]<<24);
595 sw_write_reg(SWITCH_REG_BW_CNTL0, val);
596
597 /* Port 4 and 5 are set using the bandwidth control 1 register */
598 val = matrix[4];
599 if (matrix[5] == 1)
600 sw_write_reg(SWITCH_REG_BW_CNTL1, val | 0x80000000);
601 else
602 sw_write_reg(SWITCH_REG_BW_CNTL1, val & ~0x8000000);
603
604 SW_DBG("D: ctl0 0x%ux, ctl1 0x%ux\n", sw_read_reg(SWITCH_REG_BW_CNTL0),
605 sw_read_reg(SWITCH_REG_BW_CNTL1));
606 }
607
608 static int adm5120_switch_open(struct net_device *dev)
609 {
610 u32 t;
611 int i;
612
613 netif_start_queue(dev);
614 if (!sw_used++)
615 /* enable interrupts on first open */
616 sw_int_unmask(SWITCH_INTS_USED);
617
618 /* enable (additional) port */
619 t = sw_read_reg(SWITCH_REG_PORT_CONF0);
620 for (i = 0; i < SWITCH_NUM_PORTS; i++) {
621 if (dev == adm5120_devs[i])
622 t &= ~adm5120_eth_vlans[i];
623 }
624 sw_write_reg(SWITCH_REG_PORT_CONF0, t);
625
626 return 0;
627 }
628
629 static int adm5120_switch_stop(struct net_device *dev)
630 {
631 u32 t;
632 int i;
633
634 if (!--sw_used)
635 sw_int_mask(SWITCH_INTS_USED);
636
637 /* disable port if not assigned to other devices */
638 t = sw_read_reg(SWITCH_REG_PORT_CONF0);
639 t |= SWITCH_PORTS_NOCPU;
640 for (i = 0; i < SWITCH_NUM_PORTS; i++) {
641 if ((dev != adm5120_devs[i]) && netif_running(adm5120_devs[i]))
642 t &= ~adm5120_eth_vlans[i];
643 }
644 sw_write_reg(SWITCH_REG_PORT_CONF0, t);
645
646 netif_stop_queue(dev);
647 return 0;
648 }
649
650 static int adm5120_sw_start_xmit(struct sk_buff *skb, struct net_device *dev)
651 {
652 struct dma_desc *desc;
653 struct adm5120_sw *priv = netdev_priv(dev);
654 unsigned int entry;
655 unsigned long data;
656
657 /* lock switch irq */
658 spin_lock_irq(&sw_lock);
659
660 /* calculate the next TX descriptor entry. */
661 entry = cur_txl % TX_RING_SIZE;
662
663 desc = &txl_descs[entry];
664 if (desc->buf1 & DESC_OWN) {
665 /* We want to write a packet but the TX queue is still
666 * occupied by the DMA. We are faster than the DMA... */
667 dev_kfree_skb(skb);
668 dev->stats.tx_dropped++;
669 return 0;
670 }
671
672 txl_skbuff[entry] = skb;
673 data = (desc->buf1 & DESC_EOR);
674 data |= DESC_ADDR(skb->data);
675
676 desc->misc =
677 ((skb->len<ETH_ZLEN?ETH_ZLEN:skb->len) << DESC_PKTLEN_SHIFT) |
678 (0x1 << priv->port);
679
680 desc->buflen = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
681
682 desc->buf1 = data | DESC_OWN;
683 sw_write_reg(SWITCH_REG_SEND_TRIG, SEND_TRIG_STL);
684
685 cur_txl++;
686 if (cur_txl == dirty_txl + TX_QUEUE_LEN) {
687 /* FIXME: stop queue for all devices */
688 netif_stop_queue(dev);
689 }
690
691 dev->trans_start = jiffies;
692
693 spin_unlock_irq(&sw_lock);
694
695 return 0;
696 }
697
698 static void adm5120_tx_timeout(struct net_device *dev)
699 {
700 SW_INFO("TX timeout on %s\n",dev->name);
701 }
702
703 static void adm5120_set_multicast_list(struct net_device *dev)
704 {
705 struct adm5120_sw *priv = netdev_priv(dev);
706 u32 ports;
707 u32 t;
708
709 ports = adm5120_eth_vlans[priv->port] & SWITCH_PORTS_NOCPU;
710
711 t = sw_read_reg(SWITCH_REG_CPUP_CONF);
712 if (dev->flags & IFF_PROMISC)
713 /* enable unknown packets */
714 t &= ~(ports << CPUP_CONF_DUNP_SHIFT);
715 else
716 /* disable unknown packets */
717 t |= (ports << CPUP_CONF_DUNP_SHIFT);
718
719 if (dev->flags & IFF_PROMISC || dev->flags & IFF_ALLMULTI ||
720 dev->mc_count)
721 /* enable multicast packets */
722 t &= ~(ports << CPUP_CONF_DMCP_SHIFT);
723 else
724 /* disable multicast packets */
725 t |= (ports << CPUP_CONF_DMCP_SHIFT);
726
727 /* If there is any port configured to be in promiscuous mode, then the */
728 /* Bridge Test Mode has to be activated. This will result in */
729 /* transporting also packets learned in another VLAN to be forwarded */
730 /* to the CPU. */
731 /* The difficult scenario is when we want to build a bridge on the CPU.*/
732 /* Assume we have port0 and the CPU port in VLAN0 and port1 and the */
733 /* CPU port in VLAN1. Now we build a bridge on the CPU between */
734 /* VLAN0 and VLAN1. Both ports of the VLANs are set in promisc mode. */
735 /* Now assume a packet with ethernet source address 99 enters port 0 */
736 /* It will be forwarded to the CPU because it is unknown. Then the */
737 /* bridge in the CPU will send it to VLAN1 and it goes out at port 1. */
738 /* When now a packet with ethernet destination address 99 comes in at */
739 /* port 1 in VLAN1, then the switch has learned that this address is */
740 /* located at port 0 in VLAN0. Therefore the switch will drop */
741 /* this packet. In order to avoid this and to send the packet still */
742 /* to the CPU, the Bridge Test Mode has to be activated. */
743
744 /* Check if there is any vlan in promisc mode. */
745 if (t & (SWITCH_PORTS_NOCPU << CPUP_CONF_DUNP_SHIFT))
746 t &= ~CPUP_CONF_BTM; /* Disable Bridge Testing Mode */
747 else
748 t |= CPUP_CONF_BTM; /* Enable Bridge Testing Mode */
749
750 sw_write_reg(SWITCH_REG_CPUP_CONF, t);
751
752 }
753
754 static void adm5120_write_mac(struct net_device *dev)
755 {
756 struct adm5120_sw *priv = netdev_priv(dev);
757 unsigned char *mac = dev->dev_addr;
758 u32 t;
759
760 t = mac[2] | (mac[3] << MAC_WT1_MAC3_SHIFT) |
761 (mac[4] << MAC_WT1_MAC4_SHIFT) | (mac[5] << MAC_WT1_MAC4_SHIFT);
762 sw_write_reg(SWITCH_REG_MAC_WT1, t);
763
764 t = (mac[0] << MAC_WT0_MAC0_SHIFT) | (mac[1] << MAC_WT0_MAC1_SHIFT) |
765 MAC_WT0_MAWC | MAC_WT0_WVE | (priv->port<<3);
766
767 sw_write_reg(SWITCH_REG_MAC_WT0, t);
768
769 while (!(sw_read_reg(SWITCH_REG_MAC_WT0) & MAC_WT0_MWD));
770 }
771
772 static int adm5120_sw_set_mac_address(struct net_device *dev, void *p)
773 {
774 struct sockaddr *addr = p;
775
776 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
777 adm5120_write_mac(dev);
778 return 0;
779 }
780
781 static int adm5120_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
782 {
783 int err;
784 struct adm5120_sw_info info;
785 struct adm5120_sw *priv = netdev_priv(dev);
786
787 switch(cmd) {
788 case SIOCGADMINFO:
789 info.magic = 0x5120;
790 info.ports = adm5120_nrdevs;
791 info.vlan = priv->port;
792 err = copy_to_user(rq->ifr_data, &info, sizeof(info));
793 if (err)
794 return -EFAULT;
795 break;
796 case SIOCSMATRIX:
797 if (!capable(CAP_NET_ADMIN))
798 return -EPERM;
799 err = copy_from_user(adm5120_eth_vlans, rq->ifr_data,
800 sizeof(adm5120_eth_vlans));
801 if (err)
802 return -EFAULT;
803 adm5120_set_vlan(adm5120_eth_vlans);
804 break;
805 case SIOCGMATRIX:
806 err = copy_to_user(rq->ifr_data, adm5120_eth_vlans,
807 sizeof(adm5120_eth_vlans));
808 if (err)
809 return -EFAULT;
810 break;
811 case SIOCGETBW:
812 err = copy_to_user(rq->ifr_data, bw_matrix, sizeof(bw_matrix));
813 if (err)
814 return -EFAULT;
815 break;
816 case SIOCSETBW:
817 if (!capable(CAP_NET_ADMIN))
818 return -EPERM;
819 err = copy_from_user(bw_matrix, rq->ifr_data, sizeof(bw_matrix));
820 if (err)
821 return -EFAULT;
822 adm5120_set_bw(bw_matrix);
823 break;
824 default:
825 return -EOPNOTSUPP;
826 }
827 return 0;
828 }
829
830 static void adm5120_dma_tx_init(struct dma_desc *desc, struct sk_buff **skbl,
831 int num)
832 {
833 memset(desc, 0, num * sizeof(*desc));
834 desc[num-1].buf1 |= DESC_EOR;
835 memset(skbl, 0, sizeof(struct skb*)*num);
836 }
837
838 static void adm5120_dma_rx_init(struct dma_desc *desc, struct sk_buff **skbl,
839 int num)
840 {
841 int i;
842
843 memset(desc, 0, num * sizeof(*desc));
844 for (i=0; i<num; i++) {
845 skbl[i] = dev_alloc_skb(SKB_ALLOC_LEN);
846 if (!skbl[i]) {
847 i=num;
848 break;
849 }
850 skb_reserve(skbl[i], SKB_RESERVE_LEN);
851 adm5120_rx_dma_update(&desc[i], skbl[i], (num-1==i));
852 }
853 }
854
855 static void adm5120_switch_cleanup(void)
856 {
857 int i;
858
859 /* disable interrupts */
860 sw_int_mask(SWITCH_INTS_ALL);
861
862 for (i = 0; i < SWITCH_NUM_PORTS; i++) {
863 struct net_device *dev = adm5120_devs[i];
864 if (dev) {
865 unregister_netdev(dev);
866 free_irq(ADM5120_IRQ_SWITCH, dev);
867 free_netdev(dev);
868 }
869 }
870
871 /* cleanup TX ring */
872 if (txl_skbuff) {
873 for (i = 0; i < TX_RING_SIZE; i++)
874 if (txl_skbuff[i])
875 kfree_skb(txl_skbuff[i]);
876 kfree(txl_skbuff);
877 }
878
879 if (txl_descs)
880 dma_free_coherent(NULL, TX_DESCS_SIZE, txl_descs,
881 txl_descs_dma);
882
883 /* cleanup RX ring */
884 if (rxl_skbuff) {
885 for (i = 0; i < RX_RING_SIZE; i++)
886 if (rxl_skbuff[i])
887 kfree_skb(rxl_skbuff[i]);
888 kfree(rxl_skbuff);
889 }
890
891 if (rxl_descs)
892 dma_free_coherent(NULL, RX_DESCS_SIZE, rxl_descs,
893 rxl_descs_dma);
894
895 free_irq(ADM5120_IRQ_SWITCH, &sw_dev);
896 }
897
898 static int __init adm5120_switch_init(void)
899 {
900 struct net_device *dev;
901 u32 t;
902 int i, err;
903
904 err = request_irq(ADM5120_IRQ_SWITCH, adm5120_switch_irq,
905 (IRQF_SHARED | IRQF_DISABLED), "switch", &sw_dev);
906 if (err) {
907 SW_ERR("request_irq failed with error %d\n", err);
908 goto err;
909 }
910
911 adm5120_nrdevs = adm5120_eth_num_ports;
912
913 t = CPUP_CONF_DCPUP | CPUP_CONF_CRCP |
914 SWITCH_PORTS_NOCPU << CPUP_CONF_DUNP_SHIFT |
915 SWITCH_PORTS_NOCPU << CPUP_CONF_DMCP_SHIFT ;
916 sw_write_reg(SWITCH_REG_CPUP_CONF, t);
917
918 t = (SWITCH_PORTS_NOCPU << PORT_CONF0_EMCP_SHIFT) |
919 (SWITCH_PORTS_NOCPU << PORT_CONF0_BP_SHIFT) |
920 (SWITCH_PORTS_NOCPU);
921 sw_write_reg(SWITCH_REG_PORT_CONF0, t);
922
923 /* setup ports to Autoneg/100M/Full duplex/Auto MDIX */
924 t = SWITCH_PORTS_PHY |
925 (SWITCH_PORTS_PHY << PHY_CNTL2_SC_SHIFT) |
926 (SWITCH_PORTS_PHY << PHY_CNTL2_DC_SHIFT) |
927 (SWITCH_PORTS_PHY << PHY_CNTL2_PHYR_SHIFT) |
928 (SWITCH_PORTS_PHY << PHY_CNTL2_AMDIX_SHIFT) |
929 PHY_CNTL2_RMAE;
930 SW_WRITE_REG(PHY_CNTL2, t);
931
932 t = sw_read_reg(SWITCH_REG_PHY_CNTL3);
933 t |= PHY_CNTL3_RNT;
934 sw_write_reg(SWITCH_REG_PHY_CNTL3, t);
935
936 /* Force all the packets from all ports are low priority */
937 sw_write_reg(SWITCH_REG_PRI_CNTL, 0);
938
939 sw_int_mask(SWITCH_INTS_ALL);
940 sw_int_ack(SWITCH_INTS_ALL);
941
942 /* init RX ring */
943 cur_rxl = dirty_rxl = 0;
944 rxl_descs = dma_alloc_coherent(NULL, RX_DESCS_SIZE, &rxl_descs_dma,
945 GFP_ATOMIC);
946 if (!rxl_descs) {
947 err = -ENOMEM;
948 goto err;
949 }
950
951 rxl_skbuff = kzalloc(RX_SKBS_SIZE, GFP_KERNEL);
952 if (!rxl_skbuff) {
953 err = -ENOMEM;
954 goto err;
955 }
956
957 for (i = 0; i < RX_RING_SIZE; i++) {
958 struct sk_buff *skb;
959 skb = alloc_skb(SKB_ALLOC_LEN, GFP_ATOMIC);
960 if (!skb) {
961 err = -ENOMEM;
962 goto err;
963 }
964 rxl_skbuff[i] = skb;
965 skb_reserve(skb, SKB_RESERVE_LEN);
966 }
967
968 /* init TX ring */
969 cur_txl = dirty_txl = 0;
970 txl_descs = dma_alloc_coherent(NULL, TX_DESCS_SIZE, &txl_descs_dma,
971 GFP_ATOMIC);
972 if (!txl_descs) {
973 err = -ENOMEM;
974 goto err;
975 }
976
977 txl_skbuff = kzalloc(TX_SKBS_SIZE, GFP_KERNEL);
978 if (!txl_skbuff) {
979 err = -ENOMEM;
980 goto err;
981 }
982
983 adm5120_dma_tx_init(txl_descs, txl_skbuff, TX_RING_SIZE);
984 adm5120_dma_rx_init(rxl_descs, rxl_skbuff, RX_RING_SIZE);
985
986 sw_write_reg(SWITCH_REG_SHDA, 0);
987 sw_write_reg(SWITCH_REG_SLDA, KSEG1ADDR(txl_descs));
988 sw_write_reg(SWITCH_REG_RHDA, 0);
989 sw_write_reg(SWITCH_REG_RLDA, KSEG1ADDR(rxl_descs));
990
991 for (i = 0; i < SWITCH_NUM_PORTS; i++) {
992 adm5120_devs[i] = alloc_etherdev(sizeof(struct adm5120_sw));
993 if (!adm5120_devs[i]) {
994 err = -ENOMEM;
995 goto err;
996 }
997
998 dev = adm5120_devs[i];
999 err = request_irq(ADM5120_IRQ_SWITCH, adm5120_poll_irq,
1000 (IRQF_SHARED | IRQF_DISABLED), dev->name, dev);
1001 if (err) {
1002 SW_ERR("unable to get irq for %s\n", dev->name);
1003 goto err;
1004 }
1005
1006 SET_MODULE_OWNER(dev);
1007 memset(netdev_priv(dev), 0, sizeof(struct adm5120_sw));
1008 ((struct adm5120_sw*)netdev_priv(dev))->port = i;
1009 dev->base_addr = ADM5120_SWITCH_BASE;
1010 dev->irq = ADM5120_IRQ_SWITCH;
1011 dev->open = adm5120_switch_open;
1012 dev->hard_start_xmit = adm5120_sw_start_xmit;
1013 dev->stop = adm5120_switch_stop;
1014 dev->set_multicast_list = adm5120_set_multicast_list;
1015 dev->do_ioctl = adm5120_do_ioctl;
1016 dev->tx_timeout = adm5120_tx_timeout;
1017 dev->watchdog_timeo = TX_TIMEOUT;
1018 dev->set_mac_address = adm5120_sw_set_mac_address;
1019 dev->poll = adm5120_switch_poll;
1020 dev->weight = 64;
1021
1022 memcpy(dev->dev_addr, adm5120_eth_macs[i], 6);
1023 adm5120_write_mac(dev);
1024
1025 err = register_netdev(dev);
1026 if (err) {
1027 SW_INFO("%s register failed, error=%d\n",
1028 dev->name, err);
1029 goto err;
1030 }
1031 SW_INFO("%s created for switch port%d\n", dev->name, i);
1032 }
1033
1034 /* setup vlan/port mapping after devs are filled up */
1035 adm5120_set_vlan(adm5120_eth_vlans);
1036
1037 /* enable CPU port */
1038 t = sw_read_reg(SWITCH_REG_CPUP_CONF);
1039 t &= ~CPUP_CONF_DCPUP;
1040 sw_write_reg(SWITCH_REG_CPUP_CONF, t);
1041
1042 return 0;
1043
1044 err:
1045 adm5120_switch_cleanup();
1046
1047 SW_ERR("init failed\n");
1048 return err;
1049 }
1050
1051 static void __exit adm5120_switch_exit(void)
1052 {
1053 adm5120_switch_cleanup();
1054 }
1055
1056 module_init(adm5120_switch_init);
1057 module_exit(adm5120_switch_exit);