f6a0c74d867783215db8aed56aa574ef1d65c70e
[openwrt/svn-archive/archive.git] / target / linux / adm5120-2.6 / files / drivers / net / adm5120sw.c
1 /*
2 * ADM5120 built in ethernet switch driver
3 *
4 * Copyright Jeroen Vreeken (pe1rxq@amsat.org), 2005
5 *
6 * Inspiration for this driver came from the original ADMtek 2.4
7 * driver, Copyright ADMtek Inc.
8 */
9 #include <linux/autoconf.h>
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/skbuff.h>
15 #include <linux/errno.h>
16 #include <linux/interrupt.h>
17 #include <linux/ioport.h>
18 #include <asm/mipsregs.h>
19 #include <asm/irq.h>
20 #include <asm/io.h>
21 #include "adm5120sw.h"
22
23 #include "adm5120_info.h"
24
25 MODULE_AUTHOR("Jeroen Vreeken (pe1rxq@amsat.org)");
26 MODULE_DESCRIPTION("ADM5120 ethernet switch driver");
27 MODULE_LICENSE("GPL");
28
29 /*
30 * The ADM5120 uses an internal matrix to determine which ports
31 * belong to which VLAN.
32 * The default generates a VLAN (and device) for each port
33 * (including MII port) and the CPU port is part of all of them.
34 *
35 * Another example, one big switch and everything mapped to eth0:
36 * 0x7f, 0x00, 0x00, 0x00, 0x00, 0x00
37 */
38 static unsigned char vlan_matrix[SW_DEVS] = {
39 0x41, 0x42, 0x44, 0x48, 0x50, 0x60
40 };
41
42 static int adm5120_nrdevs;
43
44 static struct net_device *adm5120_devs[SW_DEVS];
45 static struct adm5120_dma
46 adm5120_dma_txh_v[ADM5120_DMA_TXH] __attribute__((aligned(16))),
47 adm5120_dma_txl_v[ADM5120_DMA_TXL] __attribute__((aligned(16))),
48 adm5120_dma_rxh_v[ADM5120_DMA_RXH] __attribute__((aligned(16))),
49 adm5120_dma_rxl_v[ADM5120_DMA_RXL] __attribute__((aligned(16))),
50 *adm5120_dma_txh,
51 *adm5120_dma_txl,
52 *adm5120_dma_rxh,
53 *adm5120_dma_rxl;
54 static struct sk_buff
55 *adm5120_skb_rxh[ADM5120_DMA_RXH],
56 *adm5120_skb_rxl[ADM5120_DMA_RXL],
57 *adm5120_skb_txh[ADM5120_DMA_TXH],
58 *adm5120_skb_txl[ADM5120_DMA_TXL];
59 static int adm5120_rxhi = 0;
60 static int adm5120_rxli = 0;
61 /* We don't use high priority tx for now */
62 /*static int adm5120_txhi = 0;*/
63 static int adm5120_txli = 0;
64 static int adm5120_txhit = 0;
65 static int adm5120_txlit = 0;
66 static int adm5120_if_open = 0;
67
68 static inline void adm5120_set_reg(unsigned int reg, unsigned long val)
69 {
70 *(volatile unsigned long*)(SW_BASE+reg) = val;
71 }
72
73 static inline unsigned long adm5120_get_reg(unsigned int reg)
74 {
75 return *(volatile unsigned long*)(SW_BASE+reg);
76 }
77
78 static inline void adm5120_rxfixup(struct adm5120_dma *dma,
79 struct sk_buff **skbl, int num)
80 {
81 int i;
82
83 /* Resubmit the entire ring */
84 for (i=0; i<num; i++) {
85 dma[i].status = 0;
86 dma[i].cntl = 0;
87 dma[i].len = ADM5120_DMA_RXSIZE;
88 dma[i].data = ADM5120_DMA_ADDR(skbl[i]->data) |
89 ADM5120_DMA_OWN | (i==num-1 ? ADM5120_DMA_RINGEND : 0);
90 }
91 }
92
93 static inline void adm5120_rx(struct adm5120_dma *dma, struct sk_buff **skbl,
94 int *index, int num)
95 {
96 struct sk_buff *skb, *skbn;
97 struct adm5120_sw *priv;
98 struct net_device *dev;
99 int port, vlan, len;
100
101 while (!(dma[*index].data & ADM5120_DMA_OWN)) {
102 port = (dma[*index].status & ADM5120_DMA_PORTID);
103 port >>= ADM5120_DMA_PORTSHIFT;
104 for (vlan = 0; vlan < adm5120_nrdevs; vlan++) {
105 if ((1<<port) & vlan_matrix[vlan])
106 break;
107 }
108 if (vlan == adm5120_nrdevs)
109 vlan = 0;
110 dev = adm5120_devs[vlan];
111 skb = skbl[*index];
112 len = (dma[*index].status & ADM5120_DMA_LEN);
113 len >>= ADM5120_DMA_LENSHIFT;
114 len -= ETH_FCS;
115
116 priv = netdev_priv(dev);
117 if (len <= 0 || len > ADM5120_DMA_RXSIZE ||
118 dma[*index].status & ADM5120_DMA_FCSERR) {
119 priv->stats.rx_errors++;
120 skbn = NULL;
121 } else {
122 skbn = dev_alloc_skb(ADM5120_DMA_RXSIZE+16);
123 if (skbn) {
124 skb_put(skb, len);
125 skb->dev = dev;
126 skb->protocol = eth_type_trans(skb, dev);
127 skb->ip_summed = CHECKSUM_UNNECESSARY;
128 dev->last_rx = jiffies;
129 priv->stats.rx_packets++;
130 priv->stats.rx_bytes+=len;
131 skb_reserve(skbn, 2);
132 skbl[*index] = skbn;
133 } else {
134 printk(KERN_INFO "%s recycling!\n", dev->name);
135 }
136 }
137
138 dma[*index].status = 0;
139 dma[*index].cntl = 0;
140 dma[*index].len = ADM5120_DMA_RXSIZE;
141 dma[*index].data = ADM5120_DMA_ADDR(skbl[*index]->data) |
142 ADM5120_DMA_OWN |
143 (num-1==*index ? ADM5120_DMA_RINGEND : 0);
144 if (num == ++*index)
145 *index = 0;
146 if (skbn)
147 netif_rx(skb);
148 }
149 }
150
151 static inline void adm5120_tx(struct adm5120_dma *dma, struct sk_buff **skbl,
152 int *index, int num)
153 {
154 while((dma[*index].data & ADM5120_DMA_OWN) == 0 && skbl[*index]) {
155 dev_kfree_skb_irq(skbl[*index]);
156 skbl[*index] = NULL;
157 if (++*index == num)
158 *index = 0;
159 }
160 }
161
162 static irqreturn_t adm5120_sw_irq(int irq, void *dev_id)
163 {
164 unsigned long intreg;
165
166 adm5120_set_reg(ADM5120_INT_MASK,
167 adm5120_get_reg(ADM5120_INT_MASK) | ADM5120_INTHANDLE);
168
169 intreg = adm5120_get_reg(ADM5120_INT_ST);
170 adm5120_set_reg(ADM5120_INT_ST, intreg);
171
172 if (intreg & ADM5120_INT_RXH)
173 adm5120_rx(adm5120_dma_rxh, adm5120_skb_rxh, &adm5120_rxhi,
174 ADM5120_DMA_RXH);
175 if (intreg & ADM5120_INT_HFULL)
176 adm5120_rxfixup(adm5120_dma_rxh, adm5120_skb_rxh,
177 ADM5120_DMA_RXH);
178 if (intreg & ADM5120_INT_RXL)
179 adm5120_rx(adm5120_dma_rxl, adm5120_skb_rxl, &adm5120_rxli,
180 ADM5120_DMA_RXL);
181 if (intreg & ADM5120_INT_LFULL)
182 adm5120_rxfixup(adm5120_dma_rxl, adm5120_skb_rxl,
183 ADM5120_DMA_RXL);
184 if (intreg & ADM5120_INT_TXH)
185 adm5120_tx(adm5120_dma_txh, adm5120_skb_txh, &adm5120_txhit,
186 ADM5120_DMA_TXH);
187 if (intreg & ADM5120_INT_TXL)
188 adm5120_tx(adm5120_dma_txl, adm5120_skb_txl, &adm5120_txlit,
189 ADM5120_DMA_TXL);
190
191 adm5120_set_reg(ADM5120_INT_MASK,
192 adm5120_get_reg(ADM5120_INT_MASK) & ~ADM5120_INTHANDLE);
193
194 return IRQ_HANDLED;
195 }
196
197 static void adm5120_set_vlan(char *matrix)
198 {
199 unsigned long val;
200
201 val = matrix[0] + (matrix[1]<<8) + (matrix[2]<<16) + (matrix[3]<<24);
202 adm5120_set_reg(ADM5120_VLAN_GI, val);
203 val = matrix[4] + (matrix[5]<<8);
204 adm5120_set_reg(ADM5120_VLAN_GII, val);
205 }
206
207 static int adm5120_sw_open(struct net_device *dev)
208 {
209 if (!adm5120_if_open++)
210 adm5120_set_reg(ADM5120_INT_MASK,
211 adm5120_get_reg(ADM5120_INT_MASK) & ~ADM5120_INTHANDLE);
212 netif_start_queue(dev);
213 return 0;
214 }
215
216 static int adm5120_sw_stop(struct net_device *dev)
217 {
218 netif_stop_queue(dev);
219 if (!--adm5120_if_open)
220 adm5120_set_reg(ADM5120_INT_MASK,
221 adm5120_get_reg(ADM5120_INT_MASK) | ADM5120_INTMASKALL);
222 return 0;
223 }
224
225 static int adm5120_sw_tx(struct sk_buff *skb, struct net_device *dev)
226 {
227 struct adm5120_dma *dma = adm5120_dma_txl;
228 struct sk_buff **skbl = adm5120_skb_txl;
229 struct adm5120_sw *priv = netdev_priv(dev);
230 int *index = &adm5120_txli;
231 int num = ADM5120_DMA_TXL;
232 int trigger = ADM5120_SEND_TRIG_L;
233
234 dev->trans_start = jiffies;
235 if (dma[*index].data & ADM5120_DMA_OWN) {
236 dev_kfree_skb(skb);
237 priv->stats.tx_dropped++;
238 return 0;
239 }
240
241 dma[*index].data = ADM5120_DMA_ADDR(skb->data) | ADM5120_DMA_OWN;
242 if (*index == num-1)
243 dma[*index].data |= ADM5120_DMA_RINGEND;
244 dma[*index].status =
245 ((skb->len<ETH_ZLEN?ETH_ZLEN:skb->len) << ADM5120_DMA_LENSHIFT) |
246 (0x1 << priv->port);
247 dma[*index].len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
248 priv->stats.tx_packets++;
249 priv->stats.tx_bytes += skb->len;
250 skbl[*index]=skb;
251
252 if (++*index == num)
253 *index = 0;
254 adm5120_set_reg(ADM5120_SEND_TRIG, trigger);
255
256 return 0;
257 }
258
259 static void adm5120_tx_timeout(struct net_device *dev)
260 {
261 netif_wake_queue(dev);
262 }
263
264 static struct net_device_stats *adm5120_sw_stats(struct net_device *dev)
265 {
266 return &((struct adm5120_sw *)netdev_priv(dev))->stats;
267 }
268
269 static void adm5120_set_multicast_list(struct net_device *dev)
270 {
271 struct adm5120_sw *priv = netdev_priv(dev);
272 int portmask;
273
274 portmask = vlan_matrix[priv->port] & 0x3f;
275
276 if (dev->flags & IFF_PROMISC)
277 adm5120_set_reg(ADM5120_CPUP_CONF,
278 adm5120_get_reg(ADM5120_CPUP_CONF) &
279 ~((portmask << ADM5120_DISUNSHIFT) & ADM5120_DISUNALL));
280 else
281 adm5120_set_reg(ADM5120_CPUP_CONF,
282 adm5120_get_reg(ADM5120_CPUP_CONF) |
283 (portmask << ADM5120_DISUNSHIFT));
284
285 if (dev->flags & IFF_PROMISC || dev->flags & IFF_ALLMULTI ||
286 dev->mc_count)
287 adm5120_set_reg(ADM5120_CPUP_CONF,
288 adm5120_get_reg(ADM5120_CPUP_CONF) &
289 ~((portmask << ADM5120_DISMCSHIFT) & ADM5120_DISMCALL));
290 else
291 adm5120_set_reg(ADM5120_CPUP_CONF,
292 adm5120_get_reg(ADM5120_CPUP_CONF) |
293 (portmask << ADM5120_DISMCSHIFT));
294 }
295
296 static void adm5120_write_mac(struct net_device *dev)
297 {
298 struct adm5120_sw *priv = netdev_priv(dev);
299 unsigned char *mac = dev->dev_addr;
300
301 adm5120_set_reg(ADM5120_MAC_WT1,
302 mac[2] | (mac[3]<<8) | (mac[4]<<16) | (mac[5]<<24));
303 adm5120_set_reg(ADM5120_MAC_WT0, (priv->port<<3) |
304 (mac[0]<<16) | (mac[1]<<24) | ADM5120_MAC_WRITE | ADM5120_VLAN_EN);
305
306 while (!(adm5120_get_reg(ADM5120_MAC_WT0) & ADM5120_MAC_WRITE_DONE));
307 }
308
309 static int adm5120_sw_set_mac_address(struct net_device *dev, void *p)
310 {
311 struct sockaddr *addr = p;
312
313 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
314 adm5120_write_mac(dev);
315 return 0;
316 }
317
318 static int adm5120_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
319 {
320 int err;
321 struct adm5120_sw_info info;
322 struct adm5120_sw *priv = netdev_priv(dev);
323
324 switch(cmd) {
325 case SIOCGADMINFO:
326 info.magic = 0x5120;
327 info.ports = adm5120_nrdevs;
328 info.vlan = priv->port;
329 err = copy_to_user(rq->ifr_data, &info, sizeof(info));
330 if (err)
331 return -EFAULT;
332 break;
333 case SIOCSMATRIX:
334 if (!capable(CAP_NET_ADMIN))
335 return -EPERM;
336 err = copy_from_user(vlan_matrix, rq->ifr_data,
337 sizeof(vlan_matrix));
338 if (err)
339 return -EFAULT;
340 adm5120_set_vlan(vlan_matrix);
341 break;
342 case SIOCGMATRIX:
343 err = copy_to_user(rq->ifr_data, vlan_matrix,
344 sizeof(vlan_matrix));
345 if (err)
346 return -EFAULT;
347 break;
348 default:
349 return -EOPNOTSUPP;
350 }
351 return 0;
352 }
353
354 static void adm5120_dma_tx_init(struct adm5120_dma *dma, struct sk_buff **skb,
355 int num)
356 {
357 memset(dma, 0, sizeof(struct adm5120_dma)*num);
358 dma[num-1].data |= ADM5120_DMA_RINGEND;
359 memset(skb, 0, sizeof(struct skb*)*num);
360 }
361
362 static void adm5120_dma_rx_init(struct adm5120_dma *dma, struct sk_buff **skb,
363 int num)
364 {
365 int i;
366
367 memset(dma, 0, sizeof(struct adm5120_dma)*num);
368 for (i=0; i<num; i++) {
369 skb[i] = dev_alloc_skb(ADM5120_DMA_RXSIZE+16);
370 if (!skb[i]) {
371 i=num;
372 break;
373 }
374 skb_reserve(skb[i], 2);
375 dma[i].data = ADM5120_DMA_ADDR(skb[i]->data) | ADM5120_DMA_OWN;
376 dma[i].cntl = 0;
377 dma[i].len = ADM5120_DMA_RXSIZE;
378 dma[i].status = 0;
379 }
380 dma[i-1].data |= ADM5120_DMA_RINGEND;
381 }
382
383 static int __init adm5120_sw_init(void)
384 {
385 int i, err;
386 struct net_device *dev;
387
388 err = request_irq(SW_IRQ, adm5120_sw_irq, SA_SHIRQ, "ethernet switch", NULL);
389 if (err)
390 goto out;
391
392 /* MII port? */
393 if (adm5120_get_reg(ADM5120_CODE) & ADM5120_CODE_PQFP)
394 adm5120_nrdevs = 5;
395 /* CFE based devices only have two enet ports */
396 else if (adm5120_info.boot_loader == BOOT_LOADER_CFE)
397 adm5120_nrdevs = 2;
398 else
399 adm5120_nrdevs = 6;
400
401 adm5120_set_reg(ADM5120_CPUP_CONF,
402 ADM5120_DISCCPUPORT | ADM5120_CRC_PADDING |
403 ADM5120_DISUNALL | ADM5120_DISMCALL);
404 adm5120_set_reg(ADM5120_PORT_CONF0, ADM5120_ENMC | ADM5120_ENBP);
405
406 adm5120_set_reg(ADM5120_PHY_CNTL2, adm5120_get_reg(ADM5120_PHY_CNTL2) |
407 ADM5120_AUTONEG | ADM5120_NORMAL | ADM5120_AUTOMDIX);
408 adm5120_set_reg(ADM5120_PHY_CNTL3, adm5120_get_reg(ADM5120_PHY_CNTL3) |
409 ADM5120_PHY_NTH);
410
411 adm5120_set_reg(ADM5120_INT_MASK, ADM5120_INTMASKALL);
412 adm5120_set_reg(ADM5120_INT_ST, ADM5120_INTMASKALL);
413
414 adm5120_dma_txh = (void *)KSEG1ADDR((u32)adm5120_dma_txh_v);
415 adm5120_dma_txl = (void *)KSEG1ADDR((u32)adm5120_dma_txl_v);
416 adm5120_dma_rxh = (void *)KSEG1ADDR((u32)adm5120_dma_rxh_v);
417 adm5120_dma_rxl = (void *)KSEG1ADDR((u32)adm5120_dma_rxl_v);
418
419 adm5120_dma_tx_init(adm5120_dma_txh, adm5120_skb_txh, ADM5120_DMA_TXH);
420 adm5120_dma_tx_init(adm5120_dma_txl, adm5120_skb_txl, ADM5120_DMA_TXL);
421 adm5120_dma_rx_init(adm5120_dma_rxh, adm5120_skb_rxh, ADM5120_DMA_RXH);
422 adm5120_dma_rx_init(adm5120_dma_rxl, adm5120_skb_rxl, ADM5120_DMA_RXL);
423 adm5120_set_reg(ADM5120_SEND_HBADDR, KSEG1ADDR(adm5120_dma_txh));
424 adm5120_set_reg(ADM5120_SEND_LBADDR, KSEG1ADDR(adm5120_dma_txl));
425 adm5120_set_reg(ADM5120_RECEIVE_HBADDR, KSEG1ADDR(adm5120_dma_rxh));
426 adm5120_set_reg(ADM5120_RECEIVE_LBADDR, KSEG1ADDR(adm5120_dma_rxl));
427
428 adm5120_set_vlan(vlan_matrix);
429
430 for (i=0; i<adm5120_nrdevs; i++) {
431 adm5120_devs[i] = alloc_etherdev(sizeof(struct adm5120_sw));
432 if (!adm5120_devs[i]) {
433 err = -ENOMEM;
434 goto out_int;
435 }
436
437 dev = adm5120_devs[i];
438 SET_MODULE_OWNER(dev);
439 memset(netdev_priv(dev), 0, sizeof(struct adm5120_sw));
440 ((struct adm5120_sw*)netdev_priv(dev))->port = i;
441 dev->base_addr = SW_BASE;
442 dev->irq = SW_IRQ;
443 dev->open = adm5120_sw_open;
444 dev->hard_start_xmit = adm5120_sw_tx;
445 dev->stop = adm5120_sw_stop;
446 dev->get_stats = adm5120_sw_stats;
447 dev->set_multicast_list = adm5120_set_multicast_list;
448 dev->do_ioctl = adm5120_do_ioctl;
449 dev->tx_timeout = adm5120_tx_timeout;
450 dev->watchdog_timeo = ETH_TX_TIMEOUT;
451 dev->set_mac_address = adm5120_sw_set_mac_address;
452 /* HACK alert!!! In the original admtek driver it is asumed
453 that you can read the MAC addressess from flash, but edimax
454 decided to leave that space intentionally blank...
455 */
456 memcpy(dev->dev_addr, "\x00\x50\xfc\x11\x22\x01", 6);
457 dev->dev_addr[5] += i;
458 adm5120_write_mac(dev);
459
460 if ((err = register_netdev(dev))) {
461 free_netdev(dev);
462 goto out_int;
463 }
464 printk(KERN_INFO "%s: ADM5120 switch port%d\n", dev->name, i);
465 }
466 adm5120_set_reg(ADM5120_CPUP_CONF,
467 ADM5120_CRC_PADDING | ADM5120_DISUNALL | ADM5120_DISMCALL);
468
469 return 0;
470
471 out_int:
472 /* Undo everything that did succeed */
473 for (; i; i--) {
474 unregister_netdev(adm5120_devs[i-1]);
475 free_netdev(adm5120_devs[i-1]);
476 }
477 free_irq(SW_IRQ, NULL);
478 out:
479 printk(KERN_ERR "ADM5120 Ethernet switch init failed\n");
480 return err;
481 }
482
483 static void __exit adm5120_sw_exit(void)
484 {
485 int i;
486
487 for (i = 0; i < adm5120_nrdevs; i++) {
488 unregister_netdev(adm5120_devs[i]);
489 free_netdev(adm5120_devs[i-1]);
490 }
491
492 free_irq(SW_IRQ, NULL);
493
494 for (i = 0; i < ADM5120_DMA_RXH; i++) {
495 if (!adm5120_skb_rxh[i])
496 break;
497 kfree_skb(adm5120_skb_rxh[i]);
498 }
499 for (i = 0; i < ADM5120_DMA_RXL; i++) {
500 if (!adm5120_skb_rxl[i])
501 break;
502 kfree_skb(adm5120_skb_rxl[i]);
503 }
504 }
505
506 module_init(adm5120_sw_init);
507 module_exit(adm5120_sw_exit);