33489e50f0f53a9d84b1e4fb5039d078c480eb51
[openwrt/svn-archive/archive.git] / target / linux / adm5120-2.6 / files / drivers / net / adm5120sw.c
1 /*
2 * ADM5120 built in ethernet switch driver
3 *
4 * Copyright Jeroen Vreeken (pe1rxq@amsat.org), 2005
5 *
6 * Inspiration for this driver came from the original ADMtek 2.4
7 * driver, Copyright ADMtek Inc.
8 */
9 #include <linux/autoconf.h>
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/skbuff.h>
15 #include <linux/errno.h>
16 #include <linux/interrupt.h>
17 #include <linux/ioport.h>
18 #include <asm/mipsregs.h>
19 #include <asm/irq.h>
20 #include <asm/io.h>
21 #include "adm5120sw.h"
22
23 #include <asm/mach-adm5120/adm5120_info.h>
24 #include <asm/mach-adm5120/adm5120_irq.h>
25
26 MODULE_AUTHOR("Jeroen Vreeken (pe1rxq@amsat.org)");
27 MODULE_DESCRIPTION("ADM5120 ethernet switch driver");
28 MODULE_LICENSE("GPL");
29
30 /*
31 * The ADM5120 uses an internal matrix to determine which ports
32 * belong to which VLAN.
33 * The default generates a VLAN (and device) for each port
34 * (including MII port) and the CPU port is part of all of them.
35 *
36 * Another example, one big switch and everything mapped to eth0:
37 * 0x7f, 0x00, 0x00, 0x00, 0x00, 0x00
38 */
39 static unsigned char vlan_matrix[SW_DEVS] = {
40 0x41, 0x42, 0x44, 0x48, 0x50, 0x60
41 };
42
43 static int adm5120_nrdevs;
44
45 static struct net_device *adm5120_devs[SW_DEVS];
46 static struct adm5120_dma
47 adm5120_dma_txh_v[ADM5120_DMA_TXH] __attribute__((aligned(16))),
48 adm5120_dma_txl_v[ADM5120_DMA_TXL] __attribute__((aligned(16))),
49 adm5120_dma_rxh_v[ADM5120_DMA_RXH] __attribute__((aligned(16))),
50 adm5120_dma_rxl_v[ADM5120_DMA_RXL] __attribute__((aligned(16))),
51 *adm5120_dma_txh,
52 *adm5120_dma_txl,
53 *adm5120_dma_rxh,
54 *adm5120_dma_rxl;
55 static struct sk_buff
56 *adm5120_skb_rxh[ADM5120_DMA_RXH],
57 *adm5120_skb_rxl[ADM5120_DMA_RXL],
58 *adm5120_skb_txh[ADM5120_DMA_TXH],
59 *adm5120_skb_txl[ADM5120_DMA_TXL];
60 static int adm5120_rxhi = 0;
61 static int adm5120_rxli = 0;
62 /* We don't use high priority tx for now */
63 /*static int adm5120_txhi = 0;*/
64 static int adm5120_txli = 0;
65 static int adm5120_txhit = 0;
66 static int adm5120_txlit = 0;
67 static int adm5120_if_open = 0;
68
69 static inline void adm5120_set_reg(unsigned int reg, unsigned long val)
70 {
71 *(volatile unsigned long*)(SW_BASE+reg) = val;
72 }
73
74 static inline unsigned long adm5120_get_reg(unsigned int reg)
75 {
76 return *(volatile unsigned long*)(SW_BASE+reg);
77 }
78
79 static inline void adm5120_rxfixup(struct adm5120_dma *dma,
80 struct sk_buff **skbl, int num)
81 {
82 int i;
83
84 /* Resubmit the entire ring */
85 for (i=0; i<num; i++) {
86 dma[i].status = 0;
87 dma[i].cntl = 0;
88 dma[i].len = ADM5120_DMA_RXSIZE;
89 dma[i].data = ADM5120_DMA_ADDR(skbl[i]->data) |
90 ADM5120_DMA_OWN | (i==num-1 ? ADM5120_DMA_RINGEND : 0);
91 }
92 }
93
94 static inline void adm5120_rx(struct adm5120_dma *dma, struct sk_buff **skbl,
95 int *index, int num)
96 {
97 struct sk_buff *skb, *skbn;
98 struct adm5120_sw *priv;
99 struct net_device *dev;
100 int port, vlan, len;
101
102 while (!(dma[*index].data & ADM5120_DMA_OWN)) {
103 port = (dma[*index].status & ADM5120_DMA_PORTID);
104 port >>= ADM5120_DMA_PORTSHIFT;
105 for (vlan = 0; vlan < adm5120_nrdevs; vlan++) {
106 if ((1<<port) & vlan_matrix[vlan])
107 break;
108 }
109 if (vlan == adm5120_nrdevs)
110 vlan = 0;
111 dev = adm5120_devs[vlan];
112 skb = skbl[*index];
113 len = (dma[*index].status & ADM5120_DMA_LEN);
114 len >>= ADM5120_DMA_LENSHIFT;
115 len -= ETH_FCS;
116
117 priv = netdev_priv(dev);
118 if (len <= 0 || len > ADM5120_DMA_RXSIZE ||
119 dma[*index].status & ADM5120_DMA_FCSERR) {
120 priv->stats.rx_errors++;
121 skbn = NULL;
122 } else {
123 skbn = dev_alloc_skb(ADM5120_DMA_RXSIZE+16);
124 if (skbn) {
125 skb_put(skb, len);
126 skb->dev = dev;
127 skb->protocol = eth_type_trans(skb, dev);
128 skb->ip_summed = CHECKSUM_UNNECESSARY;
129 dev->last_rx = jiffies;
130 priv->stats.rx_packets++;
131 priv->stats.rx_bytes+=len;
132 skb_reserve(skbn, 2);
133 skbl[*index] = skbn;
134 } else {
135 printk(KERN_INFO "%s recycling!\n", dev->name);
136 }
137 }
138
139 dma[*index].status = 0;
140 dma[*index].cntl = 0;
141 dma[*index].len = ADM5120_DMA_RXSIZE;
142 dma[*index].data = ADM5120_DMA_ADDR(skbl[*index]->data) |
143 ADM5120_DMA_OWN |
144 (num-1==*index ? ADM5120_DMA_RINGEND : 0);
145 if (num == ++*index)
146 *index = 0;
147 if (skbn)
148 netif_rx(skb);
149 }
150 }
151
152 static inline void adm5120_tx(struct adm5120_dma *dma, struct sk_buff **skbl,
153 int *index, int num)
154 {
155 while((dma[*index].data & ADM5120_DMA_OWN) == 0 && skbl[*index]) {
156 dev_kfree_skb_irq(skbl[*index]);
157 skbl[*index] = NULL;
158 if (++*index == num)
159 *index = 0;
160 }
161 }
162
163 static irqreturn_t adm5120_sw_irq(int irq, void *dev_id)
164 {
165 unsigned long intreg;
166
167 adm5120_set_reg(ADM5120_INT_MASK,
168 adm5120_get_reg(ADM5120_INT_MASK) | ADM5120_INTHANDLE);
169
170 intreg = adm5120_get_reg(ADM5120_INT_ST);
171 adm5120_set_reg(ADM5120_INT_ST, intreg);
172
173 if (intreg & ADM5120_INT_RXH)
174 adm5120_rx(adm5120_dma_rxh, adm5120_skb_rxh, &adm5120_rxhi,
175 ADM5120_DMA_RXH);
176 if (intreg & ADM5120_INT_HFULL)
177 adm5120_rxfixup(adm5120_dma_rxh, adm5120_skb_rxh,
178 ADM5120_DMA_RXH);
179 if (intreg & ADM5120_INT_RXL)
180 adm5120_rx(adm5120_dma_rxl, adm5120_skb_rxl, &adm5120_rxli,
181 ADM5120_DMA_RXL);
182 if (intreg & ADM5120_INT_LFULL)
183 adm5120_rxfixup(adm5120_dma_rxl, adm5120_skb_rxl,
184 ADM5120_DMA_RXL);
185 if (intreg & ADM5120_INT_TXH)
186 adm5120_tx(adm5120_dma_txh, adm5120_skb_txh, &adm5120_txhit,
187 ADM5120_DMA_TXH);
188 if (intreg & ADM5120_INT_TXL)
189 adm5120_tx(adm5120_dma_txl, adm5120_skb_txl, &adm5120_txlit,
190 ADM5120_DMA_TXL);
191
192 adm5120_set_reg(ADM5120_INT_MASK,
193 adm5120_get_reg(ADM5120_INT_MASK) & ~ADM5120_INTHANDLE);
194
195 return IRQ_HANDLED;
196 }
197
198 static void adm5120_set_vlan(char *matrix)
199 {
200 unsigned long val;
201
202 val = matrix[0] + (matrix[1]<<8) + (matrix[2]<<16) + (matrix[3]<<24);
203 adm5120_set_reg(ADM5120_VLAN_GI, val);
204 val = matrix[4] + (matrix[5]<<8);
205 adm5120_set_reg(ADM5120_VLAN_GII, val);
206 }
207
208 static int adm5120_sw_open(struct net_device *dev)
209 {
210 if (!adm5120_if_open++)
211 adm5120_set_reg(ADM5120_INT_MASK,
212 adm5120_get_reg(ADM5120_INT_MASK) & ~ADM5120_INTHANDLE);
213 netif_start_queue(dev);
214 return 0;
215 }
216
217 static int adm5120_sw_stop(struct net_device *dev)
218 {
219 netif_stop_queue(dev);
220 if (!--adm5120_if_open)
221 adm5120_set_reg(ADM5120_INT_MASK,
222 adm5120_get_reg(ADM5120_INT_MASK) | ADM5120_INTMASKALL);
223 return 0;
224 }
225
226 static int adm5120_sw_tx(struct sk_buff *skb, struct net_device *dev)
227 {
228 struct adm5120_dma *dma = adm5120_dma_txl;
229 struct sk_buff **skbl = adm5120_skb_txl;
230 struct adm5120_sw *priv = netdev_priv(dev);
231 int *index = &adm5120_txli;
232 int num = ADM5120_DMA_TXL;
233 int trigger = ADM5120_SEND_TRIG_L;
234
235 dev->trans_start = jiffies;
236 if (dma[*index].data & ADM5120_DMA_OWN) {
237 dev_kfree_skb(skb);
238 priv->stats.tx_dropped++;
239 return 0;
240 }
241
242 dma[*index].data = ADM5120_DMA_ADDR(skb->data) | ADM5120_DMA_OWN;
243 if (*index == num-1)
244 dma[*index].data |= ADM5120_DMA_RINGEND;
245 dma[*index].status =
246 ((skb->len<ETH_ZLEN?ETH_ZLEN:skb->len) << ADM5120_DMA_LENSHIFT) |
247 (0x1 << priv->port);
248 dma[*index].len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
249 priv->stats.tx_packets++;
250 priv->stats.tx_bytes += skb->len;
251 skbl[*index]=skb;
252
253 if (++*index == num)
254 *index = 0;
255 adm5120_set_reg(ADM5120_SEND_TRIG, trigger);
256
257 return 0;
258 }
259
260 static void adm5120_tx_timeout(struct net_device *dev)
261 {
262 netif_wake_queue(dev);
263 }
264
265 static struct net_device_stats *adm5120_sw_stats(struct net_device *dev)
266 {
267 return &((struct adm5120_sw *)netdev_priv(dev))->stats;
268 }
269
270 static void adm5120_set_multicast_list(struct net_device *dev)
271 {
272 struct adm5120_sw *priv = netdev_priv(dev);
273 int portmask;
274
275 portmask = vlan_matrix[priv->port] & 0x3f;
276
277 if (dev->flags & IFF_PROMISC)
278 adm5120_set_reg(ADM5120_CPUP_CONF,
279 adm5120_get_reg(ADM5120_CPUP_CONF) &
280 ~((portmask << ADM5120_DISUNSHIFT) & ADM5120_DISUNALL));
281 else
282 adm5120_set_reg(ADM5120_CPUP_CONF,
283 adm5120_get_reg(ADM5120_CPUP_CONF) |
284 (portmask << ADM5120_DISUNSHIFT));
285
286 if (dev->flags & IFF_PROMISC || dev->flags & IFF_ALLMULTI ||
287 dev->mc_count)
288 adm5120_set_reg(ADM5120_CPUP_CONF,
289 adm5120_get_reg(ADM5120_CPUP_CONF) &
290 ~((portmask << ADM5120_DISMCSHIFT) & ADM5120_DISMCALL));
291 else
292 adm5120_set_reg(ADM5120_CPUP_CONF,
293 adm5120_get_reg(ADM5120_CPUP_CONF) |
294 (portmask << ADM5120_DISMCSHIFT));
295 }
296
297 static void adm5120_write_mac(struct net_device *dev)
298 {
299 struct adm5120_sw *priv = netdev_priv(dev);
300 unsigned char *mac = dev->dev_addr;
301
302 adm5120_set_reg(ADM5120_MAC_WT1,
303 mac[2] | (mac[3]<<8) | (mac[4]<<16) | (mac[5]<<24));
304 adm5120_set_reg(ADM5120_MAC_WT0, (priv->port<<3) |
305 (mac[0]<<16) | (mac[1]<<24) | ADM5120_MAC_WRITE | ADM5120_VLAN_EN);
306
307 while (!(adm5120_get_reg(ADM5120_MAC_WT0) & ADM5120_MAC_WRITE_DONE));
308 }
309
310 static int adm5120_sw_set_mac_address(struct net_device *dev, void *p)
311 {
312 struct sockaddr *addr = p;
313
314 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
315 adm5120_write_mac(dev);
316 return 0;
317 }
318
319 static int adm5120_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
320 {
321 int err;
322 struct adm5120_sw_info info;
323 struct adm5120_sw *priv = netdev_priv(dev);
324
325 switch(cmd) {
326 case SIOCGADMINFO:
327 info.magic = 0x5120;
328 info.ports = adm5120_nrdevs;
329 info.vlan = priv->port;
330 err = copy_to_user(rq->ifr_data, &info, sizeof(info));
331 if (err)
332 return -EFAULT;
333 break;
334 case SIOCSMATRIX:
335 if (!capable(CAP_NET_ADMIN))
336 return -EPERM;
337 err = copy_from_user(vlan_matrix, rq->ifr_data,
338 sizeof(vlan_matrix));
339 if (err)
340 return -EFAULT;
341 adm5120_set_vlan(vlan_matrix);
342 break;
343 case SIOCGMATRIX:
344 err = copy_to_user(rq->ifr_data, vlan_matrix,
345 sizeof(vlan_matrix));
346 if (err)
347 return -EFAULT;
348 break;
349 default:
350 return -EOPNOTSUPP;
351 }
352 return 0;
353 }
354
355 static void adm5120_dma_tx_init(struct adm5120_dma *dma, struct sk_buff **skb,
356 int num)
357 {
358 memset(dma, 0, sizeof(struct adm5120_dma)*num);
359 dma[num-1].data |= ADM5120_DMA_RINGEND;
360 memset(skb, 0, sizeof(struct skb*)*num);
361 }
362
363 static void adm5120_dma_rx_init(struct adm5120_dma *dma, struct sk_buff **skb,
364 int num)
365 {
366 int i;
367
368 memset(dma, 0, sizeof(struct adm5120_dma)*num);
369 for (i=0; i<num; i++) {
370 skb[i] = dev_alloc_skb(ADM5120_DMA_RXSIZE+16);
371 if (!skb[i]) {
372 i=num;
373 break;
374 }
375 skb_reserve(skb[i], 2);
376 dma[i].data = ADM5120_DMA_ADDR(skb[i]->data) | ADM5120_DMA_OWN;
377 dma[i].cntl = 0;
378 dma[i].len = ADM5120_DMA_RXSIZE;
379 dma[i].status = 0;
380 }
381 dma[i-1].data |= ADM5120_DMA_RINGEND;
382 }
383
384 static int __init adm5120_sw_init(void)
385 {
386 int i, err;
387 struct net_device *dev;
388
389 err = request_irq(ADM5120_IRQ_SWITCH, adm5120_sw_irq, 0, "ethernet switch", NULL);
390 if (err)
391 goto out;
392
393 adm5120_nrdevs = adm5120_board.iface_num;
394 if (adm5120_nrdevs > 5 && !adm5120_has_gmii())
395 adm5120_nrdevs = 5;
396
397 adm5120_set_reg(ADM5120_CPUP_CONF,
398 ADM5120_DISCCPUPORT | ADM5120_CRC_PADDING |
399 ADM5120_DISUNALL | ADM5120_DISMCALL);
400 adm5120_set_reg(ADM5120_PORT_CONF0, ADM5120_ENMC | ADM5120_ENBP);
401
402 adm5120_set_reg(ADM5120_PHY_CNTL2, adm5120_get_reg(ADM5120_PHY_CNTL2) |
403 ADM5120_AUTONEG | ADM5120_NORMAL | ADM5120_AUTOMDIX);
404 adm5120_set_reg(ADM5120_PHY_CNTL3, adm5120_get_reg(ADM5120_PHY_CNTL3) |
405 ADM5120_PHY_NTH);
406
407 adm5120_set_reg(ADM5120_INT_MASK, ADM5120_INTMASKALL);
408 adm5120_set_reg(ADM5120_INT_ST, ADM5120_INTMASKALL);
409
410 adm5120_dma_txh = (void *)KSEG1ADDR((u32)adm5120_dma_txh_v);
411 adm5120_dma_txl = (void *)KSEG1ADDR((u32)adm5120_dma_txl_v);
412 adm5120_dma_rxh = (void *)KSEG1ADDR((u32)adm5120_dma_rxh_v);
413 adm5120_dma_rxl = (void *)KSEG1ADDR((u32)adm5120_dma_rxl_v);
414
415 adm5120_dma_tx_init(adm5120_dma_txh, adm5120_skb_txh, ADM5120_DMA_TXH);
416 adm5120_dma_tx_init(adm5120_dma_txl, adm5120_skb_txl, ADM5120_DMA_TXL);
417 adm5120_dma_rx_init(adm5120_dma_rxh, adm5120_skb_rxh, ADM5120_DMA_RXH);
418 adm5120_dma_rx_init(adm5120_dma_rxl, adm5120_skb_rxl, ADM5120_DMA_RXL);
419 adm5120_set_reg(ADM5120_SEND_HBADDR, KSEG1ADDR(adm5120_dma_txh));
420 adm5120_set_reg(ADM5120_SEND_LBADDR, KSEG1ADDR(adm5120_dma_txl));
421 adm5120_set_reg(ADM5120_RECEIVE_HBADDR, KSEG1ADDR(adm5120_dma_rxh));
422 adm5120_set_reg(ADM5120_RECEIVE_LBADDR, KSEG1ADDR(adm5120_dma_rxl));
423
424 adm5120_set_vlan(vlan_matrix);
425
426 for (i=0; i<adm5120_nrdevs; i++) {
427 adm5120_devs[i] = alloc_etherdev(sizeof(struct adm5120_sw));
428 if (!adm5120_devs[i]) {
429 err = -ENOMEM;
430 goto out_int;
431 }
432
433 dev = adm5120_devs[i];
434 SET_MODULE_OWNER(dev);
435 memset(netdev_priv(dev), 0, sizeof(struct adm5120_sw));
436 ((struct adm5120_sw*)netdev_priv(dev))->port = i;
437 dev->base_addr = SW_BASE;
438 dev->irq = ADM5120_IRQ_SWITCH;
439 dev->open = adm5120_sw_open;
440 dev->hard_start_xmit = adm5120_sw_tx;
441 dev->stop = adm5120_sw_stop;
442 dev->get_stats = adm5120_sw_stats;
443 dev->set_multicast_list = adm5120_set_multicast_list;
444 dev->do_ioctl = adm5120_do_ioctl;
445 dev->tx_timeout = adm5120_tx_timeout;
446 dev->watchdog_timeo = ETH_TX_TIMEOUT;
447 dev->set_mac_address = adm5120_sw_set_mac_address;
448 /* HACK alert!!! In the original admtek driver it is asumed
449 that you can read the MAC addressess from flash, but edimax
450 decided to leave that space intentionally blank...
451 */
452 memcpy(dev->dev_addr, "\x00\x50\xfc\x11\x22\x01", 6);
453 dev->dev_addr[5] += i;
454 adm5120_write_mac(dev);
455
456 if ((err = register_netdev(dev))) {
457 free_netdev(dev);
458 goto out_int;
459 }
460 printk(KERN_INFO "%s: ADM5120 switch port%d\n", dev->name, i);
461 }
462 adm5120_set_reg(ADM5120_CPUP_CONF,
463 ADM5120_CRC_PADDING | ADM5120_DISUNALL | ADM5120_DISMCALL);
464
465 return 0;
466
467 out_int:
468 /* Undo everything that did succeed */
469 for (; i; i--) {
470 unregister_netdev(adm5120_devs[i-1]);
471 free_netdev(adm5120_devs[i-1]);
472 }
473 free_irq(ADM5120_IRQ_SWITCH, NULL);
474 out:
475 printk(KERN_ERR "ADM5120 Ethernet switch init failed\n");
476 return err;
477 }
478
479 static void __exit adm5120_sw_exit(void)
480 {
481 int i;
482
483 for (i = 0; i < adm5120_nrdevs; i++) {
484 unregister_netdev(adm5120_devs[i]);
485 free_netdev(adm5120_devs[i-1]);
486 }
487
488 free_irq(ADM5120_IRQ_SWITCH, NULL);
489
490 for (i = 0; i < ADM5120_DMA_RXH; i++) {
491 if (!adm5120_skb_rxh[i])
492 break;
493 kfree_skb(adm5120_skb_rxh[i]);
494 }
495 for (i = 0; i < ADM5120_DMA_RXL; i++) {
496 if (!adm5120_skb_rxl[i])
497 break;
498 kfree_skb(adm5120_skb_rxl[i]);
499 }
500 }
501
502 module_init(adm5120_sw_init);
503 module_exit(adm5120_sw_exit);