[adm5120] switch driver cleanup, 3rd phase
[openwrt/svn-archive/archive.git] / target / linux / adm5120 / files / drivers / net / adm5120sw.c
1 /*
2 * ADM5120 built in ethernet switch driver
3 *
4 * Copyright Jeroen Vreeken (pe1rxq@amsat.org), 2005
5 *
6 * Inspiration for this driver came from the original ADMtek 2.4
7 * driver, Copyright ADMtek Inc.
8 *
9 * NAPI extensions by Thomas Langer (Thomas.Langer@infineon.com)
10 * and Friedrich Beckmann (Friedrich.Beckmann@infineon.com), 2007
11 *
12 * TODO: Add support of high prio queues (currently disabled)
13 *
14 */
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/errno.h>
18 #include <linux/interrupt.h>
19 #include <linux/ioport.h>
20 #include <linux/spinlock.h>
21
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25
26 #include <linux/io.h>
27 #include <linux/irq.h>
28
29 #include <asm/mipsregs.h>
30
31 #include <adm5120_info.h>
32 #include <adm5120_defs.h>
33 #include <adm5120_irq.h>
34 #include <adm5120_switch.h>
35
36 #include "adm5120sw.h"
37
38 #define DRV_NAME "adm5120-switch"
39 #define DRV_DESC "ADM5120 built-in ethernet switch driver"
40 #define DRV_VERSION "0.1.0"
41
42 MODULE_AUTHOR("Jeroen Vreeken (pe1rxq@amsat.org)");
43 MODULE_DESCRIPTION("ADM5120 ethernet switch driver");
44 MODULE_LICENSE("GPL");
45
46 /* ------------------------------------------------------------------------ */
47
48 #if 0 /*def ADM5120_SWITCH_DEBUG*/
49 #define SW_DBG(f, a...) printk(KERN_DEBUG "%s: " f, DRV_NAME , ## a)
50 #else
51 #define SW_DBG(f, a...) do {} while (0)
52 #endif
53 #define SW_ERR(f, a...) printk(KERN_ERR "%s: " f, DRV_NAME , ## a)
54 #define SW_INFO(f, a...) printk(KERN_INFO "%s: " f, DRV_NAME , ## a)
55
56 #define SWITCH_NUM_PORTS 6
57 #define ETH_CSUM_LEN 4
58
59 #define RX_MAX_PKTLEN 1550
60 #define RX_RING_SIZE 64
61
62 #define TX_RING_SIZE 32
63 #define TX_QUEUE_LEN 28 /* Limit ring entries actually used. */
64 #define TX_TIMEOUT HZ*400
65
66 #define RX_DESCS_SIZE (RX_RING_SIZE * sizeof(struct dma_desc *))
67 #define RX_SKBS_SIZE (RX_RING_SIZE * sizeof(struct sk_buff *))
68 #define TX_DESCS_SIZE (TX_RING_SIZE * sizeof(struct dma_desc *))
69 #define TX_SKBS_SIZE (TX_RING_SIZE * sizeof(struct sk_buff *))
70
71 #define SKB_ALLOC_LEN (RX_MAX_PKTLEN + 32)
72 #define SKB_RESERVE_LEN (NET_IP_ALIGN + NET_SKB_PAD)
73
74 #define SWITCH_INTS_HIGH (SWITCH_INT_SHD | SWITCH_INT_RHD | SWITCH_INT_HDF)
75 #define SWITCH_INTS_LOW (SWITCH_INT_SLD | SWITCH_INT_RLD | SWITCH_INT_LDF)
76 #define SWITCH_INTS_ERR (SWITCH_INT_RDE | SWITCH_INT_SDE | SWITCH_INT_CPUH)
77 #define SWITCH_INTS_Q (SWITCH_INT_P0QF | SWITCH_INT_P1QF | SWITCH_INT_P2QF | \
78 SWITCH_INT_P3QF | SWITCH_INT_P4QF | SWITCH_INT_P5QF | \
79 SWITCH_INT_CPQF | SWITCH_INT_GQF)
80
81 #define SWITCH_INTS_ALL (SWITCH_INTS_HIGH | SWITCH_INTS_LOW | \
82 SWITCH_INTS_ERR | SWITCH_INTS_Q | \
83 SWITCH_INT_MD | SWITCH_INT_PSC)
84
85 #define SWITCH_INTS_USED (SWITCH_INTS_LOW | SWITCH_INT_PSC)
86 #define SWITCH_INTS_POLL (SWITCH_INT_RLD | SWITCH_INT_LDF)
87
88 /* ------------------------------------------------------------------------ */
89
90 struct dma_desc {
91 __u32 buf1;
92 #define DESC_OWN (1UL << 31) /* Owned by the switch */
93 #define DESC_EOR (1UL << 28) /* End of Ring */
94 #define DESC_ADDR_MASK 0x1FFFFFF
95 #define DESC_ADDR(x) ((__u32)(x) & DESC_ADDR_MASK)
96 __u32 buf2;
97 #define DESC_BUF2_EN (1UL << 31) /* Buffer 2 enable */
98 __u32 buflen;
99 __u32 misc;
100 /* definitions for tx/rx descriptors */
101 #define DESC_PKTLEN_SHIFT 16
102 #define DESC_PKTLEN_MASK 0x7FF
103 /* tx descriptor specific part */
104 #define DESC_CSUM (1UL << 31) /* Append checksum */
105 #define DESC_DSTPORT_SHIFT 8
106 #define DESC_DSTPORT_MASK 0x3F
107 #define DESC_VLAN_MASK 0x3F
108 /* rx descriptor specific part */
109 #define DESC_SRCPORT_SHIFT 12
110 #define DESC_SRCPORT_MASK 0x7
111 #define DESC_DA_MASK 0x3
112 #define DESC_DA_SHIFT 4
113 #define DESC_IPCSUM_FAIL (1UL << 3) /* IP checksum fail */
114 #define DESC_VLAN_TAG (1UL << 2) /* VLAN tag present */
115 #define DESC_TYPE_MASK 0x3 /* mask for Packet type */
116 #define DESC_TYPE_IP 0x0 /* IP packet */
117 #define DESC_TYPE_PPPoE 0x1 /* PPPoE packet */
118 } __attribute__ ((aligned(16)));
119
120 static inline u32 desc_get_srcport(struct dma_desc *desc)
121 {
122 return (desc->misc >> DESC_SRCPORT_SHIFT) & DESC_SRCPORT_MASK;
123 }
124
125 static inline u32 desc_get_pktlen(struct dma_desc *desc)
126 {
127 return (desc->misc >> DESC_PKTLEN_SHIFT) & DESC_PKTLEN_MASK;
128 }
129
130 static inline int desc_ipcsum_fail(struct dma_desc *desc)
131 {
132 return ((desc->misc & DESC_IPCSUM_FAIL) != 0);
133 }
134
135 /* ------------------------------------------------------------------------ */
136
137 /* default settings - unlimited TX and RX on all ports, default shaper mode */
138 static unsigned char bw_matrix[SWITCH_NUM_PORTS] = {
139 0, 0, 0, 0, 0, 0
140 };
141
142 static int adm5120_nrdevs;
143
144 static struct net_device *adm5120_devs[SWITCH_NUM_PORTS];
145 /* Lookup table port -> device */
146 static struct net_device *adm5120_port[SWITCH_NUM_PORTS];
147
148 static struct dma_desc *txl_descs;
149 static struct dma_desc *rxl_descs;
150
151 static dma_addr_t txl_descs_dma;
152 static dma_addr_t rxl_descs_dma;
153
154 static struct sk_buff **txl_skbuff;
155 static struct sk_buff **rxl_skbuff;
156
157 static unsigned int cur_rxl, dirty_rxl; /* producer/consumer ring indices */
158 static unsigned int cur_txl, dirty_txl;
159
160 static unsigned int sw_used;
161
162 static spinlock_t sw_lock = SPIN_LOCK_UNLOCKED;
163 static spinlock_t poll_lock = SPIN_LOCK_UNLOCKED;
164
165 static struct net_device sw_dev;
166
167 /* ------------------------------------------------------------------------ */
168
169 static inline u32 sw_read_reg(u32 reg)
170 {
171 return __raw_readl((void __iomem *)KSEG1ADDR(ADM5120_SWITCH_BASE)+reg);
172 }
173
174 static inline void sw_write_reg(u32 reg, u32 val)
175 {
176 __raw_writel(val, (void __iomem *)KSEG1ADDR(ADM5120_SWITCH_BASE)+reg);
177 }
178
179 static inline void sw_int_mask(u32 mask)
180 {
181 u32 t;
182
183 t = sw_read_reg(SWITCH_REG_INT_MASK);
184 t |= mask;
185 sw_write_reg(SWITCH_REG_INT_MASK, t);
186 }
187
188 static inline void sw_int_unmask(u32 mask)
189 {
190 u32 t;
191
192 t = sw_read_reg(SWITCH_REG_INT_MASK);
193 t &= ~mask;
194 sw_write_reg(SWITCH_REG_INT_MASK, t);
195 }
196
197 static inline void sw_int_ack(u32 mask)
198 {
199 sw_write_reg(SWITCH_REG_INT_STATUS, mask);
200 }
201
202 static inline u32 sw_int_status(void)
203 {
204 u32 t;
205
206 t = sw_read_reg(SWITCH_REG_INT_STATUS);
207 t &= ~sw_read_reg(SWITCH_REG_INT_MASK);
208 return t;
209 }
210
211 /* ------------------------------------------------------------------------ */
212
213 static void sw_dump_desc(char *label, struct dma_desc *desc, int tx)
214 {
215 u32 t;
216
217 SW_DBG("%s %s desc/%p\n", label, tx ? "tx" : "rx", desc);
218
219 t = desc->buf1;
220 SW_DBG(" buf1 %08X addr=%08X; len=%08X %s%s\n", t,
221 t & DESC_ADDR_MASK,
222 desc->buflen,
223 (t & DESC_OWN) ? "SWITCH" : "CPU",
224 (t & DESC_EOR) ? " RE" : "");
225
226 t = desc->buf2;
227 SW_DBG(" buf2 %08X addr=%08X%s\n", desc->buf2,
228 t & DESC_ADDR_MASK,
229 (t & DESC_BUF2_EN) ? " EN" : "" );
230
231 t = desc->misc;
232 if (tx)
233 SW_DBG(" misc %08X%s pktlen=%04X ports=%02X vlan=%02X\n", t,
234 (t & DESC_CSUM) ? " CSUM" : "",
235 (t >> DESC_PKTLEN_SHIFT) & DESC_PKTLEN_MASK,
236 (t >> DESC_DSTPORT_SHIFT) & DESC_DSTPORT_MASK,
237 t & DESC_VLAN_MASK);
238 else
239 SW_DBG(" misc %08X pktlen=%04X port=%d DA=%d%s%s type=%d\n",
240 t,
241 (t >> DESC_PKTLEN_SHIFT) & DESC_PKTLEN_MASK,
242 (t >> DESC_SRCPORT_SHIFT) & DESC_SRCPORT_MASK,
243 (t >> DESC_DA_SHIFT) & DESC_DA_MASK,
244 (t & DESC_IPCSUM_FAIL) ? " IPCF" : "",
245 (t & DESC_VLAN_TAG) ? " VLAN" : "",
246 (t & DESC_TYPE_MASK));
247 }
248
249 static void sw_dump_intr_mask(char *label, u32 mask)
250 {
251 SW_DBG("%s %08X%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
252 label, mask,
253 (mask & SWITCH_INT_SHD) ? " SHD" : "",
254 (mask & SWITCH_INT_SLD) ? " SLD" : "",
255 (mask & SWITCH_INT_RHD) ? " RHD" : "",
256 (mask & SWITCH_INT_RLD) ? " RLD" : "",
257 (mask & SWITCH_INT_HDF) ? " HDF" : "",
258 (mask & SWITCH_INT_LDF) ? " LDF" : "",
259 (mask & SWITCH_INT_P0QF) ? " P0QF" : "",
260 (mask & SWITCH_INT_P1QF) ? " P1QF" : "",
261 (mask & SWITCH_INT_P2QF) ? " P2QF" : "",
262 (mask & SWITCH_INT_P3QF) ? " P3QF" : "",
263 (mask & SWITCH_INT_P4QF) ? " P4QF" : "",
264 (mask & SWITCH_INT_CPQF) ? " CPQF" : "",
265 (mask & SWITCH_INT_GQF) ? " GQF" : "",
266 (mask & SWITCH_INT_MD) ? " MD" : "",
267 (mask & SWITCH_INT_BCS) ? " BCS" : "",
268 (mask & SWITCH_INT_PSC) ? " PSC" : "",
269 (mask & SWITCH_INT_ID) ? " ID" : "",
270 (mask & SWITCH_INT_W0TE) ? " W0TE" : "",
271 (mask & SWITCH_INT_W1TE) ? " W1TE" : "",
272 (mask & SWITCH_INT_RDE) ? " RDE" : "",
273 (mask & SWITCH_INT_SDE) ? " SDE" : "",
274 (mask & SWITCH_INT_CPUH) ? " CPUH" : "");
275 }
276
277 static void sw_dump_regs(void)
278 {
279 u32 t;
280
281 t = SW_READ_REG(PHY_STATUS);
282 SW_DBG("phy_status: %08X\n", t);
283
284 t = SW_READ_REG(CPUP_CONF);
285 SW_DBG("cpup_conf: %08X%s%s%s\n", t,
286 (t & CPUP_CONF_DCPUP) ? " DCPUP" : "",
287 (t & CPUP_CONF_CRCP) ? " CRCP" : "",
288 (t & CPUP_CONF_BTM) ? " BTM" : "");
289
290 t = SW_READ_REG(PORT_CONF0);
291 SW_DBG("port_conf0: %08X\n", t);
292 t = SW_READ_REG(PORT_CONF1);
293 SW_DBG("port_conf1: %08X\n", t);
294 t = SW_READ_REG(PORT_CONF2);
295 SW_DBG("port_conf2: %08X\n", t);
296
297 t = SW_READ_REG(VLAN_G1);
298 SW_DBG("vlan g1: %08X\n", t);
299 t = SW_READ_REG(VLAN_G2);
300 SW_DBG("vlan g2: %08X\n", t);
301
302 t = SW_READ_REG(BW_CNTL0);
303 SW_DBG("bw_cntl0: %08X\n", t);
304 t = SW_READ_REG(BW_CNTL1);
305 SW_DBG("bw_cntl1: %08X\n", t);
306
307 t = SW_READ_REG(PHY_CNTL0);
308 SW_DBG("phy_cntl0: %08X\n", t);
309 t = SW_READ_REG(PHY_CNTL1);
310 SW_DBG("phy_cntl1: %08X\n", t);
311 t = SW_READ_REG(PHY_CNTL2);
312 SW_DBG("phy_cntl2: %08X\n", t);
313 t = SW_READ_REG(PHY_CNTL3);
314 SW_DBG("phy_cntl3: %08X\n", t);
315 t = SW_READ_REG(PHY_CNTL4);
316 SW_DBG("phy_cntl4: %08X\n", t);
317
318 t = SW_READ_REG(INT_STATUS);
319 sw_dump_intr_mask("int_status: ", t);
320
321 t = SW_READ_REG(INT_MASK);
322 sw_dump_intr_mask("int_mask: ", t);
323
324 t = SW_READ_REG(SHDA);
325 SW_DBG("shda: %08X\n", t);
326 t = SW_READ_REG(SLDA);
327 SW_DBG("slda: %08X\n", t);
328 t = SW_READ_REG(RHDA);
329 SW_DBG("rhda: %08X\n", t);
330 t = SW_READ_REG(RLDA);
331 SW_DBG("rlda: %08X\n", t);
332 }
333
334
335 /* ------------------------------------------------------------------------ */
336
337 static inline void adm5120_rx_dma_update(struct dma_desc *desc,
338 struct sk_buff *skb, int end)
339 {
340 desc->misc = 0;
341 desc->buf2 = 0;
342 desc->buflen = RX_MAX_PKTLEN;
343 desc->buf1 = DESC_ADDR(skb->data) |
344 DESC_OWN | (end ? DESC_EOR : 0);
345 }
346
347 static void adm5120_switch_rx_refill(void)
348 {
349 unsigned int entry;
350
351 for (; cur_rxl - dirty_rxl > 0; dirty_rxl++) {
352 struct dma_desc *desc;
353 struct sk_buff *skb;
354
355 entry = dirty_rxl % RX_RING_SIZE;
356 desc = &rxl_descs[entry];
357
358 skb = rxl_skbuff[entry];
359 if (skb == NULL) {
360 skb = alloc_skb(SKB_ALLOC_LEN, GFP_ATOMIC);
361 if (skb) {
362 skb_reserve(skb, SKB_RESERVE_LEN);
363 rxl_skbuff[entry] = skb;
364 } else {
365 SW_ERR("no memory for skb\n");
366 desc->buflen = 0;
367 desc->buf2 = 0;
368 desc->misc = 0;
369 desc->buf1 = (desc->buf1 & DESC_EOR) | DESC_OWN;
370 break;
371 }
372 }
373
374 desc->buf2 = 0;
375 desc->buflen = RX_MAX_PKTLEN;
376 desc->misc = 0;
377 desc->buf1 = (desc->buf1 & DESC_EOR) | DESC_OWN |
378 DESC_ADDR(skb->data);
379 }
380 }
381
382 static int adm5120_switch_rx(int limit)
383 {
384 unsigned int done = 0;
385
386 SW_DBG("rx start, limit=%d, cur_rxl=%u, dirty_rxl=%u\n",
387 limit, cur_rxl, dirty_rxl);
388
389 sw_int_ack(SWITCH_INTS_POLL);
390
391 while (done < limit) {
392 int entry = cur_rxl % RX_RING_SIZE;
393 struct dma_desc *desc = &rxl_descs[entry];
394 struct net_device *rdev;
395 unsigned int port;
396
397 if (desc->buf1 & DESC_OWN)
398 break;
399
400 if (dirty_rxl + RX_RING_SIZE == cur_rxl)
401 break;
402
403 port = desc_get_srcport(desc);
404 rdev = adm5120_port[port];
405
406 SW_DBG("rx descriptor %u, desc=%p, skb=%p\n", entry, desc,
407 rxl_skbuff[entry]);
408
409 if ((rdev) && netif_running(rdev)) {
410 struct sk_buff *skb = rxl_skbuff[entry];
411 int pktlen;
412
413 pktlen = desc_get_pktlen(desc);
414 pktlen -= ETH_CSUM_LEN;
415
416 if ((pktlen == 0) || desc_ipcsum_fail(desc)) {
417 rdev->stats.rx_errors++;
418 if (pktlen == 0)
419 rdev->stats.rx_length_errors++;
420 if (desc_ipcsum_fail(desc))
421 rdev->stats.rx_crc_errors++;
422 SW_DBG("rx error, recycling skb %u\n", entry);
423 } else {
424 skb_put(skb, pktlen);
425
426 skb->dev = rdev;
427 skb->protocol = eth_type_trans(skb, rdev);
428 skb->ip_summed = CHECKSUM_UNNECESSARY;
429
430 dma_cache_wback_inv((unsigned long)skb->data,
431 skb->len);
432
433 netif_receive_skb(skb);
434
435 rdev->last_rx = jiffies;
436 rdev->stats.rx_packets++;
437 rdev->stats.rx_bytes += pktlen;
438
439 rxl_skbuff[entry] = NULL;
440 done++;
441 }
442 } else {
443 SW_DBG("no rx device, recycling skb %u\n", entry);
444 }
445
446 cur_rxl++;
447 if (cur_rxl - dirty_rxl > RX_RING_SIZE / 4)
448 adm5120_switch_rx_refill();
449 }
450
451 adm5120_switch_rx_refill();
452
453 SW_DBG("rx finished, cur_rxl=%u, dirty_rxl=%u, processed %d\n",
454 cur_rxl, dirty_rxl, done);
455
456 return done;
457 }
458
459
460 static void adm5120_switch_tx(void)
461 {
462 unsigned int entry;
463
464 /* find and cleanup dirty tx descriptors */
465 entry = dirty_txl % TX_RING_SIZE;
466 while (dirty_txl != cur_txl) {
467 struct dma_desc *desc = &txl_descs[entry];
468 struct sk_buff *skb = txl_skbuff[entry];
469
470 if (desc->buf1 & DESC_OWN)
471 break;
472
473 if (netif_running(skb->dev)) {
474 skb->dev->stats.tx_bytes += skb->len;
475 skb->dev->stats.tx_packets++;
476 }
477
478 dev_kfree_skb_irq(skb);
479 txl_skbuff[entry] = NULL;
480 entry = (++dirty_txl) % TX_RING_SIZE;
481 }
482
483 if ((cur_txl - dirty_txl) < TX_QUEUE_LEN - 4) {
484 /* wake up queue of all devices */
485 int i;
486 for (i = 0; i < SWITCH_NUM_PORTS; i++) {
487 if (!adm5120_devs[i])
488 continue;
489 netif_wake_queue(adm5120_devs[i]);
490 }
491 }
492 }
493
494 static int adm5120_if_poll(struct net_device *dev, int *budget)
495 {
496 int limit = min(dev->quota, *budget);
497 int done;
498 u32 status;
499
500 done = adm5120_switch_rx(limit);
501
502 *budget -= done;
503 dev->quota -= done;
504
505 status = sw_int_status() & SWITCH_INTS_POLL;
506 if ((done < limit) && (!status)) {
507 SW_DBG("disable polling mode for %s\n", poll_dev->name);
508 netif_rx_complete(dev);
509 sw_int_unmask(SWITCH_INTS_POLL);
510 return 0;
511 }
512
513 return 1;
514 }
515
516 static irqreturn_t adm5120_poll_irq(int irq, void *dev_id)
517 {
518 struct net_device *dev = dev_id;
519 u32 status;
520
521 status = sw_int_status();
522 status &= SWITCH_INTS_POLL;
523 if (!status)
524 return IRQ_NONE;
525
526 sw_dump_intr_mask("poll ints", status);
527
528 SW_DBG("enable polling mode for %s\n", dev->name);
529 sw_int_mask(SWITCH_INTS_POLL);
530 netif_rx_schedule(dev);
531
532 return IRQ_HANDLED;
533 }
534
535 static irqreturn_t adm5120_switch_irq(int irq, void *dev_id)
536 {
537 u32 status;
538
539 status = sw_int_status();
540 status &= SWITCH_INTS_ALL & ~SWITCH_INTS_POLL;
541 if (!status)
542 return IRQ_NONE;
543
544 sw_int_ack(status);
545
546 if (status & SWITCH_INT_SLD) {
547 spin_lock(&sw_lock);
548 adm5120_switch_tx();
549 spin_unlock(&sw_lock);
550 }
551
552 return IRQ_HANDLED;
553 }
554
555 static void adm5120_set_vlan(char *matrix)
556 {
557 unsigned long val;
558 int vlan_port, port;
559
560 val = matrix[0] + (matrix[1]<<8) + (matrix[2]<<16) + (matrix[3]<<24);
561 sw_write_reg(SWITCH_REG_VLAN_G1, val);
562 val = matrix[4] + (matrix[5]<<8);
563 sw_write_reg(SWITCH_REG_VLAN_G2, val);
564
565 /* Now set/update the port vs. device lookup table */
566 for (port=0; port<SWITCH_NUM_PORTS; port++) {
567 for (vlan_port=0; vlan_port<SWITCH_NUM_PORTS && !(matrix[vlan_port] & (0x00000001 << port)); vlan_port++);
568 if (vlan_port <SWITCH_NUM_PORTS)
569 adm5120_port[port] = adm5120_devs[vlan_port];
570 else
571 adm5120_port[port] = NULL;
572 }
573 }
574
575 static void adm5120_set_bw(char *matrix)
576 {
577 unsigned long val;
578
579 /* Port 0 to 3 are set using the bandwidth control 0 register */
580 val = matrix[0] + (matrix[1]<<8) + (matrix[2]<<16) + (matrix[3]<<24);
581 sw_write_reg(SWITCH_REG_BW_CNTL0, val);
582
583 /* Port 4 and 5 are set using the bandwidth control 1 register */
584 val = matrix[4];
585 if (matrix[5] == 1)
586 sw_write_reg(SWITCH_REG_BW_CNTL1, val | 0x80000000);
587 else
588 sw_write_reg(SWITCH_REG_BW_CNTL1, val & ~0x8000000);
589
590 SW_DBG("D: ctl0 0x%ux, ctl1 0x%ux\n", sw_read_reg(SWITCH_REG_BW_CNTL0),
591 sw_read_reg(SWITCH_REG_BW_CNTL1));
592 }
593
594 static void adm5120_switch_tx_ring_reset(struct dma_desc *desc,
595 struct sk_buff **skbl, int num)
596 {
597 memset(desc, 0, num * sizeof(*desc));
598 desc[num-1].buf1 |= DESC_EOR;
599 memset(skbl, 0, sizeof(struct skb*)*num);
600
601 cur_txl = 0;
602 dirty_txl = 0;
603 }
604
605 static void adm5120_switch_rx_ring_reset(struct dma_desc *desc,
606 struct sk_buff **skbl, int num)
607 {
608 int i;
609
610 memset(desc, 0, num * sizeof(*desc));
611 for (i = 0; i < num; i++) {
612 skbl[i] = dev_alloc_skb(SKB_ALLOC_LEN);
613 if (!skbl[i]) {
614 i = num;
615 break;
616 }
617 skb_reserve(skbl[i], SKB_RESERVE_LEN);
618 adm5120_rx_dma_update(&desc[i], skbl[i], (num-1==i));
619 }
620
621 cur_rxl = 0;
622 dirty_rxl = 0;
623 }
624
625 static int adm5120_switch_tx_ring_alloc(void)
626 {
627 int err;
628
629 txl_descs = dma_alloc_coherent(NULL, TX_DESCS_SIZE, &txl_descs_dma,
630 GFP_ATOMIC);
631 if (!txl_descs) {
632 err = -ENOMEM;
633 goto err;
634 }
635
636 txl_skbuff = kzalloc(TX_SKBS_SIZE, GFP_KERNEL);
637 if (!txl_skbuff) {
638 err = -ENOMEM;
639 goto err;
640 }
641
642 return 0;
643
644 err:
645 return err;
646 }
647
648 static void adm5120_switch_tx_ring_free(void)
649 {
650 int i;
651
652 if (txl_skbuff) {
653 for (i = 0; i < TX_RING_SIZE; i++)
654 if (txl_skbuff[i])
655 kfree_skb(txl_skbuff[i]);
656 kfree(txl_skbuff);
657 }
658
659 if (txl_descs)
660 dma_free_coherent(NULL, TX_DESCS_SIZE, txl_descs,
661 txl_descs_dma);
662 }
663
664 static int adm5120_switch_rx_ring_alloc(void)
665 {
666 int err;
667 int i;
668
669 /* init RX ring */
670 rxl_descs = dma_alloc_coherent(NULL, RX_DESCS_SIZE, &rxl_descs_dma,
671 GFP_ATOMIC);
672 if (!rxl_descs) {
673 err = -ENOMEM;
674 goto err;
675 }
676
677 rxl_skbuff = kzalloc(RX_SKBS_SIZE, GFP_KERNEL);
678 if (!rxl_skbuff) {
679 err = -ENOMEM;
680 goto err;
681 }
682
683 for (i = 0; i < RX_RING_SIZE; i++) {
684 struct sk_buff *skb;
685 skb = alloc_skb(SKB_ALLOC_LEN, GFP_ATOMIC);
686 if (!skb) {
687 err = -ENOMEM;
688 goto err;
689 }
690 rxl_skbuff[i] = skb;
691 skb_reserve(skb, SKB_RESERVE_LEN);
692 }
693
694 return 0;
695
696 err:
697 return err;
698 }
699
700 static void adm5120_switch_rx_ring_free(void)
701 {
702 int i;
703
704 if (rxl_skbuff) {
705 for (i = 0; i < RX_RING_SIZE; i++)
706 if (rxl_skbuff[i])
707 kfree_skb(rxl_skbuff[i]);
708 kfree(rxl_skbuff);
709 }
710
711 if (rxl_descs)
712 dma_free_coherent(NULL, RX_DESCS_SIZE, rxl_descs,
713 rxl_descs_dma);
714 }
715
716 /* ------------------------------------------------------------------------ */
717
718 static int adm5120_if_open(struct net_device *dev)
719 {
720 u32 t;
721 int err;
722 int i;
723
724 err = request_irq(dev->irq, adm5120_poll_irq,
725 (IRQF_SHARED | IRQF_DISABLED), dev->name, dev);
726 if (err) {
727 SW_ERR("unable to get irq for %s\n", dev->name);
728 goto err;
729 }
730
731 if (!sw_used++)
732 /* enable interrupts on first open */
733 sw_int_unmask(SWITCH_INTS_USED);
734
735 /* enable (additional) port */
736 t = sw_read_reg(SWITCH_REG_PORT_CONF0);
737 for (i = 0; i < SWITCH_NUM_PORTS; i++) {
738 if (dev == adm5120_devs[i])
739 t &= ~adm5120_eth_vlans[i];
740 }
741 sw_write_reg(SWITCH_REG_PORT_CONF0, t);
742
743 netif_start_queue(dev);
744
745 return 0;
746
747 err:
748 return err;
749 }
750
751 static int adm5120_if_stop(struct net_device *dev)
752 {
753 u32 t;
754 int i;
755
756 netif_stop_queue(dev);
757
758 /* disable port if not assigned to other devices */
759 t = sw_read_reg(SWITCH_REG_PORT_CONF0);
760 t |= SWITCH_PORTS_NOCPU;
761 for (i = 0; i < SWITCH_NUM_PORTS; i++) {
762 if ((dev != adm5120_devs[i]) && netif_running(adm5120_devs[i]))
763 t &= ~adm5120_eth_vlans[i];
764 }
765 sw_write_reg(SWITCH_REG_PORT_CONF0, t);
766
767 if (!--sw_used)
768 sw_int_mask(SWITCH_INTS_USED);
769
770 free_irq(dev->irq, dev);
771
772 return 0;
773 }
774
775 static int adm5120_if_hard_start_xmit(struct sk_buff *skb,
776 struct net_device *dev)
777 {
778 struct dma_desc *desc;
779 struct adm5120_sw *priv = netdev_priv(dev);
780 unsigned int entry;
781 unsigned long data;
782
783 /* lock switch irq */
784 spin_lock_irq(&sw_lock);
785
786 /* calculate the next TX descriptor entry. */
787 entry = cur_txl % TX_RING_SIZE;
788
789 desc = &txl_descs[entry];
790 if (desc->buf1 & DESC_OWN) {
791 /* We want to write a packet but the TX queue is still
792 * occupied by the DMA. We are faster than the DMA... */
793 dev_kfree_skb(skb);
794 dev->stats.tx_dropped++;
795 return 0;
796 }
797
798 txl_skbuff[entry] = skb;
799 data = (desc->buf1 & DESC_EOR);
800 data |= DESC_ADDR(skb->data);
801
802 desc->misc =
803 ((skb->len<ETH_ZLEN?ETH_ZLEN:skb->len) << DESC_PKTLEN_SHIFT) |
804 (0x1 << priv->port);
805
806 desc->buflen = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
807
808 desc->buf1 = data | DESC_OWN;
809 sw_write_reg(SWITCH_REG_SEND_TRIG, SEND_TRIG_STL);
810
811 cur_txl++;
812 if (cur_txl == dirty_txl + TX_QUEUE_LEN) {
813 /* FIXME: stop queue for all devices */
814 netif_stop_queue(dev);
815 }
816
817 dev->trans_start = jiffies;
818
819 spin_unlock_irq(&sw_lock);
820
821 return 0;
822 }
823
824 static void adm5120_if_tx_timeout(struct net_device *dev)
825 {
826 SW_INFO("TX timeout on %s\n",dev->name);
827 }
828
829 static void adm5120_set_multicast_list(struct net_device *dev)
830 {
831 struct adm5120_sw *priv = netdev_priv(dev);
832 u32 ports;
833 u32 t;
834
835 ports = adm5120_eth_vlans[priv->port] & SWITCH_PORTS_NOCPU;
836
837 t = sw_read_reg(SWITCH_REG_CPUP_CONF);
838 if (dev->flags & IFF_PROMISC)
839 /* enable unknown packets */
840 t &= ~(ports << CPUP_CONF_DUNP_SHIFT);
841 else
842 /* disable unknown packets */
843 t |= (ports << CPUP_CONF_DUNP_SHIFT);
844
845 if (dev->flags & IFF_PROMISC || dev->flags & IFF_ALLMULTI ||
846 dev->mc_count)
847 /* enable multicast packets */
848 t &= ~(ports << CPUP_CONF_DMCP_SHIFT);
849 else
850 /* disable multicast packets */
851 t |= (ports << CPUP_CONF_DMCP_SHIFT);
852
853 /* If there is any port configured to be in promiscuous mode, then the */
854 /* Bridge Test Mode has to be activated. This will result in */
855 /* transporting also packets learned in another VLAN to be forwarded */
856 /* to the CPU. */
857 /* The difficult scenario is when we want to build a bridge on the CPU.*/
858 /* Assume we have port0 and the CPU port in VLAN0 and port1 and the */
859 /* CPU port in VLAN1. Now we build a bridge on the CPU between */
860 /* VLAN0 and VLAN1. Both ports of the VLANs are set in promisc mode. */
861 /* Now assume a packet with ethernet source address 99 enters port 0 */
862 /* It will be forwarded to the CPU because it is unknown. Then the */
863 /* bridge in the CPU will send it to VLAN1 and it goes out at port 1. */
864 /* When now a packet with ethernet destination address 99 comes in at */
865 /* port 1 in VLAN1, then the switch has learned that this address is */
866 /* located at port 0 in VLAN0. Therefore the switch will drop */
867 /* this packet. In order to avoid this and to send the packet still */
868 /* to the CPU, the Bridge Test Mode has to be activated. */
869
870 /* Check if there is any vlan in promisc mode. */
871 if (t & (SWITCH_PORTS_NOCPU << CPUP_CONF_DUNP_SHIFT))
872 t &= ~CPUP_CONF_BTM; /* Disable Bridge Testing Mode */
873 else
874 t |= CPUP_CONF_BTM; /* Enable Bridge Testing Mode */
875
876 sw_write_reg(SWITCH_REG_CPUP_CONF, t);
877
878 }
879
880 static void adm5120_write_mac(struct net_device *dev)
881 {
882 struct adm5120_sw *priv = netdev_priv(dev);
883 unsigned char *mac = dev->dev_addr;
884 u32 t;
885
886 t = mac[2] | (mac[3] << MAC_WT1_MAC3_SHIFT) |
887 (mac[4] << MAC_WT1_MAC4_SHIFT) | (mac[5] << MAC_WT1_MAC4_SHIFT);
888 sw_write_reg(SWITCH_REG_MAC_WT1, t);
889
890 t = (mac[0] << MAC_WT0_MAC0_SHIFT) | (mac[1] << MAC_WT0_MAC1_SHIFT) |
891 MAC_WT0_MAWC | MAC_WT0_WVE | (priv->port<<3);
892
893 sw_write_reg(SWITCH_REG_MAC_WT0, t);
894
895 while (!(sw_read_reg(SWITCH_REG_MAC_WT0) & MAC_WT0_MWD));
896 }
897
898 static int adm5120_if_set_mac_address(struct net_device *dev, void *p)
899 {
900 struct sockaddr *addr = p;
901
902 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
903 adm5120_write_mac(dev);
904 return 0;
905 }
906
907 static int adm5120_if_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
908 {
909 int err;
910 struct adm5120_sw_info info;
911 struct adm5120_sw *priv = netdev_priv(dev);
912
913 switch(cmd) {
914 case SIOCGADMINFO:
915 info.magic = 0x5120;
916 info.ports = adm5120_nrdevs;
917 info.vlan = priv->port;
918 err = copy_to_user(rq->ifr_data, &info, sizeof(info));
919 if (err)
920 return -EFAULT;
921 break;
922 case SIOCSMATRIX:
923 if (!capable(CAP_NET_ADMIN))
924 return -EPERM;
925 err = copy_from_user(adm5120_eth_vlans, rq->ifr_data,
926 sizeof(adm5120_eth_vlans));
927 if (err)
928 return -EFAULT;
929 adm5120_set_vlan(adm5120_eth_vlans);
930 break;
931 case SIOCGMATRIX:
932 err = copy_to_user(rq->ifr_data, adm5120_eth_vlans,
933 sizeof(adm5120_eth_vlans));
934 if (err)
935 return -EFAULT;
936 break;
937 case SIOCGETBW:
938 err = copy_to_user(rq->ifr_data, bw_matrix, sizeof(bw_matrix));
939 if (err)
940 return -EFAULT;
941 break;
942 case SIOCSETBW:
943 if (!capable(CAP_NET_ADMIN))
944 return -EPERM;
945 err = copy_from_user(bw_matrix, rq->ifr_data, sizeof(bw_matrix));
946 if (err)
947 return -EFAULT;
948 adm5120_set_bw(bw_matrix);
949 break;
950 default:
951 return -EOPNOTSUPP;
952 }
953 return 0;
954 }
955
956 static struct net_device *adm5120_if_alloc(void)
957 {
958 struct net_device *dev;
959 struct adm5120_sw *priv;
960
961 dev = alloc_etherdev(sizeof(*priv));
962 if (!dev)
963 return NULL;
964
965 dev->irq = ADM5120_IRQ_SWITCH;
966 dev->open = adm5120_if_open;
967 dev->hard_start_xmit = adm5120_if_hard_start_xmit;
968 dev->stop = adm5120_if_stop;
969 dev->set_multicast_list = adm5120_set_multicast_list;
970 dev->do_ioctl = adm5120_if_do_ioctl;
971 dev->tx_timeout = adm5120_if_tx_timeout;
972 dev->watchdog_timeo = TX_TIMEOUT;
973 dev->set_mac_address = adm5120_if_set_mac_address;
974 dev->poll = adm5120_if_poll;
975 dev->weight = 64;
976
977 SET_MODULE_OWNER(dev);
978
979 return dev;
980 }
981
982 static void adm5120_switch_cleanup(void)
983 {
984 int i;
985
986 /* disable interrupts */
987 sw_int_mask(SWITCH_INTS_ALL);
988
989 for (i = 0; i < SWITCH_NUM_PORTS; i++) {
990 struct net_device *dev = adm5120_devs[i];
991 if (dev) {
992 unregister_netdev(dev);
993 free_netdev(dev);
994 }
995 }
996
997 adm5120_switch_tx_ring_free();
998 adm5120_switch_rx_ring_free();
999
1000 free_irq(ADM5120_IRQ_SWITCH, &sw_dev);
1001 }
1002
1003 static int __init adm5120_switch_init(void)
1004 {
1005 u32 t;
1006 int i, err;
1007
1008 err = request_irq(ADM5120_IRQ_SWITCH, adm5120_switch_irq,
1009 (IRQF_SHARED | IRQF_DISABLED), "switch", &sw_dev);
1010 if (err) {
1011 SW_ERR("request_irq failed with error %d\n", err);
1012 goto err;
1013 }
1014
1015 adm5120_nrdevs = adm5120_eth_num_ports;
1016
1017 t = CPUP_CONF_DCPUP | CPUP_CONF_CRCP |
1018 SWITCH_PORTS_NOCPU << CPUP_CONF_DUNP_SHIFT |
1019 SWITCH_PORTS_NOCPU << CPUP_CONF_DMCP_SHIFT ;
1020 sw_write_reg(SWITCH_REG_CPUP_CONF, t);
1021
1022 t = (SWITCH_PORTS_NOCPU << PORT_CONF0_EMCP_SHIFT) |
1023 (SWITCH_PORTS_NOCPU << PORT_CONF0_BP_SHIFT) |
1024 (SWITCH_PORTS_NOCPU);
1025 sw_write_reg(SWITCH_REG_PORT_CONF0, t);
1026
1027 /* setup ports to Autoneg/100M/Full duplex/Auto MDIX */
1028 t = SWITCH_PORTS_PHY |
1029 (SWITCH_PORTS_PHY << PHY_CNTL2_SC_SHIFT) |
1030 (SWITCH_PORTS_PHY << PHY_CNTL2_DC_SHIFT) |
1031 (SWITCH_PORTS_PHY << PHY_CNTL2_PHYR_SHIFT) |
1032 (SWITCH_PORTS_PHY << PHY_CNTL2_AMDIX_SHIFT) |
1033 PHY_CNTL2_RMAE;
1034 SW_WRITE_REG(PHY_CNTL2, t);
1035
1036 t = sw_read_reg(SWITCH_REG_PHY_CNTL3);
1037 t |= PHY_CNTL3_RNT;
1038 sw_write_reg(SWITCH_REG_PHY_CNTL3, t);
1039
1040 /* Force all the packets from all ports are low priority */
1041 sw_write_reg(SWITCH_REG_PRI_CNTL, 0);
1042
1043 sw_int_mask(SWITCH_INTS_ALL);
1044 sw_int_ack(SWITCH_INTS_ALL);
1045
1046 err = adm5120_switch_rx_ring_alloc();
1047 if (err)
1048 goto err;
1049
1050 err = adm5120_switch_tx_ring_alloc();
1051 if (err)
1052 goto err;
1053
1054 adm5120_switch_tx_ring_reset(txl_descs, txl_skbuff, TX_RING_SIZE);
1055 adm5120_switch_rx_ring_reset(rxl_descs, rxl_skbuff, RX_RING_SIZE);
1056
1057 sw_write_reg(SWITCH_REG_SHDA, 0);
1058 sw_write_reg(SWITCH_REG_SLDA, KSEG1ADDR(txl_descs));
1059 sw_write_reg(SWITCH_REG_RHDA, 0);
1060 sw_write_reg(SWITCH_REG_RLDA, KSEG1ADDR(rxl_descs));
1061
1062 for (i = 0; i < SWITCH_NUM_PORTS; i++) {
1063 struct net_device *dev;
1064 struct adm5120_sw *priv;
1065
1066 dev = adm5120_if_alloc();
1067 if (!dev) {
1068 err = -ENOMEM;
1069 goto err;
1070 }
1071
1072 adm5120_devs[i] = dev;
1073 priv = netdev_priv(dev);
1074
1075 priv->port = i;
1076
1077 memcpy(dev->dev_addr, adm5120_eth_macs[i], 6);
1078 adm5120_write_mac(dev);
1079
1080 err = register_netdev(dev);
1081 if (err) {
1082 SW_INFO("%s register failed, error=%d\n",
1083 dev->name, err);
1084 goto err;
1085 }
1086 }
1087
1088 /* setup vlan/port mapping after devs are filled up */
1089 adm5120_set_vlan(adm5120_eth_vlans);
1090
1091 /* enable CPU port */
1092 t = sw_read_reg(SWITCH_REG_CPUP_CONF);
1093 t &= ~CPUP_CONF_DCPUP;
1094 sw_write_reg(SWITCH_REG_CPUP_CONF, t);
1095
1096 return 0;
1097
1098 err:
1099 adm5120_switch_cleanup();
1100
1101 SW_ERR("init failed\n");
1102 return err;
1103 }
1104
1105 static void __exit adm5120_switch_exit(void)
1106 {
1107 adm5120_switch_cleanup();
1108 }
1109
1110 module_init(adm5120_switch_init);
1111 module_exit(adm5120_switch_exit);