fix bug in ethernet irq handler
[openwrt/staging/dedeckeh.git] / openwrt / target / linux / aruba-2.6 / patches / 010-ar2313_enet.patch
1 diff -Nur linux-2.6.17/drivers/net/ar2313/ar2313.c linux-2.6.17-owrt/drivers/net/ar2313/ar2313.c
2 --- linux-2.6.17/drivers/net/ar2313/ar2313.c 1970-01-01 01:00:00.000000000 +0100
3 +++ linux-2.6.17-owrt/drivers/net/ar2313/ar2313.c 2006-06-19 12:57:27.000000000 +0200
4 @@ -0,0 +1,1649 @@
5 +/*
6 + * ar2313.c: Linux driver for the Atheros AR2313 Ethernet device.
7 + *
8 + * Copyright 2004 by Sameer Dekate, <sdekate@arubanetworks.com>.
9 + * Copyright (C) 2006 Imre Kaloz <kaloz@openwrt.org>
10 + *
11 + * Thanks to Atheros for providing hardware and documentation
12 + * enabling me to write this driver.
13 + *
14 + * This program is free software; you can redistribute it and/or modify
15 + * it under the terms of the GNU General Public License as published by
16 + * the Free Software Foundation; either version 2 of the License, or
17 + * (at your option) any later version.
18 + *
19 + * Additional credits:
20 + * This code is taken from John Taylor's Sibyte driver and then
21 + * modified for the AR2313.
22 + */
23 +
24 +#include <linux/config.h>
25 +#include <linux/module.h>
26 +#include <linux/version.h>
27 +#include <linux/types.h>
28 +#include <linux/errno.h>
29 +#include <linux/ioport.h>
30 +#include <linux/pci.h>
31 +#include <linux/netdevice.h>
32 +#include <linux/etherdevice.h>
33 +#include <linux/skbuff.h>
34 +#include <linux/init.h>
35 +#include <linux/delay.h>
36 +#include <linux/mm.h>
37 +#include <linux/highmem.h>
38 +#include <linux/sockios.h>
39 +#include <linux/pkt_sched.h>
40 +#include <linux/compile.h>
41 +#include <linux/mii.h>
42 +#include <linux/ethtool.h>
43 +#include <linux/ctype.h>
44 +
45 +#include <net/sock.h>
46 +#include <net/ip.h>
47 +
48 +#include <asm/system.h>
49 +#include <asm/io.h>
50 +#include <asm/irq.h>
51 +#include <asm/byteorder.h>
52 +#include <asm/uaccess.h>
53 +#include <asm/bootinfo.h>
54 +
55 +extern char *getenv(char *e);
56 +
57 +
58 +#undef INDEX_DEBUG
59 +#define DEBUG 0
60 +#define DEBUG_TX 0
61 +#define DEBUG_RX 0
62 +#define DEBUG_INT 0
63 +#define DEBUG_MC 0
64 +#define DEBUG_ERR 1
65 +
66 +#ifndef __exit
67 +#define __exit
68 +#endif
69 +
70 +#ifndef min
71 +#define min(a,b) (((a)<(b))?(a):(b))
72 +#endif
73 +
74 +#ifndef SMP_CACHE_BYTES
75 +#define SMP_CACHE_BYTES L1_CACHE_BYTES
76 +#endif
77 +
78 +#ifndef SET_MODULE_OWNER
79 +#define SET_MODULE_OWNER(dev) {do{} while(0);}
80 +#define AR2313_MOD_INC_USE_COUNT MOD_INC_USE_COUNT
81 +#define AR2313_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT
82 +#else
83 +#define AR2313_MOD_INC_USE_COUNT {do{} while(0);}
84 +#define AR2313_MOD_DEC_USE_COUNT {do{} while(0);}
85 +#endif
86 +
87 +#define PHYSADDR(a) ((_ACAST32_ (a)) & 0x1fffffff)
88 +
89 +static char ethaddr[18] = "00:00:00:00:00:00";
90 +static char ifname[5] = "bond";
91 +
92 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,52)
93 +module_param_string(ethaddr, ethaddr, 18, 0);
94 +module_param_string(ifname, ifname, 5, 0);
95 +#else
96 +MODULE_PARM(ethaddr, "c18");
97 +MODULE_PARM(ifname, "c5");
98 +#endif
99 +
100 +#define AR2313_MBOX_SET_BIT 0x8
101 +
102 +#define BOARD_IDX_STATIC 0
103 +#define BOARD_IDX_OVERFLOW -1
104 +
105 +/* margot includes */
106 +#include <asm/idt-boards/rc32434/rc32434.h>
107 +
108 +#include "ar2313_msg.h"
109 +#include "platform.h"
110 +#include "dma.h"
111 +#include "ar2313.h"
112 +
113 +/*
114 + * New interrupt handler strategy:
115 + *
116 + * An old interrupt handler worked using the traditional method of
117 + * replacing an skbuff with a new one when a packet arrives. However
118 + * the rx rings do not need to contain a static number of buffer
119 + * descriptors, thus it makes sense to move the memory allocation out
120 + * of the main interrupt handler and do it in a bottom half handler
121 + * and only allocate new buffers when the number of buffers in the
122 + * ring is below a certain threshold. In order to avoid starving the
123 + * NIC under heavy load it is however necessary to force allocation
124 + * when hitting a minimum threshold. The strategy for alloction is as
125 + * follows:
126 + *
127 + * RX_LOW_BUF_THRES - allocate buffers in the bottom half
128 + * RX_PANIC_LOW_THRES - we are very low on buffers, allocate
129 + * the buffers in the interrupt handler
130 + * RX_RING_THRES - maximum number of buffers in the rx ring
131 + *
132 + * One advantagous side effect of this allocation approach is that the
133 + * entire rx processing can be done without holding any spin lock
134 + * since the rx rings and registers are totally independent of the tx
135 + * ring and its registers. This of course includes the kmalloc's of
136 + * new skb's. Thus start_xmit can run in parallel with rx processing
137 + * and the memory allocation on SMP systems.
138 + *
139 + * Note that running the skb reallocation in a bottom half opens up
140 + * another can of races which needs to be handled properly. In
141 + * particular it can happen that the interrupt handler tries to run
142 + * the reallocation while the bottom half is either running on another
143 + * CPU or was interrupted on the same CPU. To get around this the
144 + * driver uses bitops to prevent the reallocation routines from being
145 + * reentered.
146 + *
147 + * TX handling can also be done without holding any spin lock, wheee
148 + * this is fun! since tx_csm is only written to by the interrupt
149 + * handler.
150 + */
151 +
152 +/*
153 + * Threshold values for RX buffer allocation - the low water marks for
154 + * when to start refilling the rings are set to 75% of the ring
155 + * sizes. It seems to make sense to refill the rings entirely from the
156 + * intrrupt handler once it gets below the panic threshold, that way
157 + * we don't risk that the refilling is moved to another CPU when the
158 + * one running the interrupt handler just got the slab code hot in its
159 + * cache.
160 + */
161 +#define RX_RING_SIZE AR2313_DESCR_ENTRIES
162 +#define RX_PANIC_THRES (RX_RING_SIZE/4)
163 +#define RX_LOW_THRES ((3*RX_RING_SIZE)/4)
164 +#define CRC_LEN 4
165 +#define RX_OFFSET 2
166 +
167 +#define AR2313_BUFSIZE (AR2313_MTU + ETH_HLEN + CRC_LEN + RX_OFFSET)
168 +
169 +#ifdef MODULE
170 +MODULE_AUTHOR("Sameer Dekate<sdekate@arubanetworks.com>");
171 +MODULE_DESCRIPTION("AR2313 Ethernet driver");
172 +#endif
173 +
174 +#if DEBUG
175 +static char version[] __initdata =
176 + "ar2313.c: v0.02 2006/06/19 sdekate@arubanetworks.com\n";
177 +#endif /* DEBUG */
178 +
179 +#define virt_to_phys(x) ((u32)(x) & 0x1fffffff)
180 +
181 +// prototypes
182 +static short armiiread(short phy, short reg);
183 +static void armiiwrite(short phy, short reg, short data);
184 +#ifdef TX_TIMEOUT
185 +static void ar2313_tx_timeout(struct net_device *dev);
186 +#endif
187 +static void ar2313_halt(struct net_device *dev);
188 +static void rx_tasklet_func(unsigned long data);
189 +static void ar2313_multicast_list(struct net_device *dev);
190 +
191 +static struct net_device *root_dev;
192 +static int probed __initdata = 0;
193 +static unsigned long ar_eth_base;
194 +static unsigned long ar_dma_base;
195 +static unsigned long ar_int_base;
196 +static unsigned long ar_int_mac_mask;
197 +static unsigned long ar_int_phy_mask;
198 +
199 +#ifndef ERR
200 +#define ERR(fmt, args...) printk("%s: " fmt, __func__, ##args)
201 +#endif
202 +
203 +static int parse_mac_addr(struct net_device *dev, char* macstr){
204 + int i, j;
205 + unsigned char result, value;
206 +
207 + for (i=0; i<6; i++) {
208 + result = 0;
209 + if (i != 5 && *(macstr+2) != ':') {
210 + ERR("invalid mac address format: %d %c\n",
211 + i, *(macstr+2));
212 + return -EINVAL;
213 + }
214 + for (j=0; j<2; j++) {
215 + if (isxdigit(*macstr) && (value = isdigit(*macstr) ? *macstr-'0' :
216 + toupper(*macstr)-'A'+10) < 16)
217 + {
218 + result = result*16 + value;
219 + macstr++;
220 + }
221 + else {
222 + ERR("invalid mac address "
223 + "character: %c\n", *macstr);
224 + return -EINVAL;
225 + }
226 + }
227 +
228 + macstr++;
229 + dev->dev_addr[i] = result;
230 + }
231 +
232 + return 0;
233 +}
234 +
235 +
236 +int __init ar2313_probe(void)
237 +{
238 + struct net_device *dev;
239 + struct ar2313_private *sp;
240 + int version_disp;
241 + char name[64] ;
242 +
243 + if (probed)
244 + return -ENODEV;
245 + probed++;
246 +
247 + version_disp = 0;
248 + sprintf(name, "%s%%d", ifname) ;
249 + dev = alloc_etherdev(sizeof(struct ar2313_private));
250 +
251 + if (dev == NULL) {
252 + printk(KERN_ERR "ar2313: Unable to allocate net_device structure!\n");
253 + return -ENOMEM;
254 + }
255 +
256 + SET_MODULE_OWNER(dev);
257 +
258 + sp = dev->priv;
259 +
260 + sp->link = 0;
261 + switch (mips_machtype) {
262 + case MACH_ARUBA_AP60:
263 + ar_eth_base = 0xb8100000;
264 + ar_dma_base = ar_eth_base + 0x1000;
265 + ar_int_base = 0x1C003020;
266 + ar_int_mac_mask = RESET_ENET0|RESET_ENET1;
267 + ar_int_phy_mask = RESET_EPHY0|RESET_EPHY1;
268 + sp->mac = 1;
269 + sp->phy = 1;
270 + dev->irq = 4;
271 + break;
272 +
273 + case MACH_ARUBA_AP40:
274 + ar_eth_base = 0xb0500000;
275 + ar_dma_base = ar_eth_base + 0x1000;
276 + ar_int_base = 0x11000004;
277 + ar_int_mac_mask = 0x800;
278 + ar_int_phy_mask = 0x400;
279 + sp->mac = 0;
280 + sp->phy = 1;
281 + dev->irq = 4;
282 + break;
283 +
284 + case MACH_ARUBA_AP65:
285 + ar_eth_base = 0xb8100000;
286 + ar_dma_base = ar_eth_base + 0x1000;
287 + ar_int_base = 0x1C003020;
288 + ar_int_mac_mask = RESET_ENET0|RESET_ENET1;
289 + ar_int_phy_mask = RESET_EPHY0|RESET_EPHY1;
290 + sp->mac = 0;
291 +#if 0
292 + // commented out, for now
293 +
294 + if (mips_machtype == MACH_ARUBA_SAMSUNG) {
295 + sp->phy = 0x1f;
296 + } else {
297 + sp->phy = 1;
298 + }
299 +#else
300 + sp->phy = 1;
301 +#endif
302 + dev->irq = 3;
303 + break;
304 +
305 + default:
306 + printk("%s: unsupported mips_machtype=0x%lx\n",
307 + __FUNCTION__, mips_machtype) ;
308 + return -ENODEV;
309 + }
310 +
311 + spin_lock_init(&sp->lock);
312 +
313 + /* initialize func pointers */
314 + dev->open = &ar2313_open;
315 + dev->stop = &ar2313_close;
316 + dev->hard_start_xmit = &ar2313_start_xmit;
317 +
318 + dev->get_stats = &ar2313_get_stats;
319 + dev->set_multicast_list = &ar2313_multicast_list;
320 +#ifdef TX_TIMEOUT
321 + dev->tx_timeout = ar2313_tx_timeout;
322 + dev->watchdog_timeo = AR2313_TX_TIMEOUT;
323 +#endif
324 + dev->do_ioctl = &ar2313_ioctl;
325 +
326 + // SAMEER: do we need this?
327 + dev->features |= NETIF_F_SG | NETIF_F_HIGHDMA;
328 +
329 + tasklet_init(&sp->rx_tasklet, rx_tasklet_func, (unsigned long) dev);
330 + tasklet_disable(&sp->rx_tasklet);
331 +
332 + /* display version info if adapter is found */
333 + if (!version_disp) {
334 + /* set display flag to TRUE so that */
335 + /* we only display this string ONCE */
336 + version_disp = 1;
337 +#if DEBUG
338 + printk(version);
339 +#endif /* DEBUG */
340 + }
341 +
342 + request_region(PHYSADDR(ETHERNET_BASE), ETHERNET_SIZE*ETHERNET_MACS,
343 + "AR2313ENET");
344 +
345 + sp->eth_regs = ioremap_nocache(PHYSADDR(ETHERNET_BASE + ETHERNET_SIZE*sp->mac),
346 + sizeof(*sp->eth_regs));
347 + if (!sp->eth_regs) {
348 + printk("Can't remap eth registers\n");
349 + return(-ENXIO);
350 + }
351 +
352 + sp->dma_regs = ioremap_nocache(PHYSADDR(DMA_BASE + DMA_SIZE*sp->mac),
353 + sizeof(*sp->dma_regs));
354 + dev->base_addr = (unsigned int) sp->dma_regs;
355 + if (!sp->dma_regs) {
356 + printk("Can't remap DMA registers\n");
357 + return(-ENXIO);
358 + }
359 +
360 + sp->int_regs = ioremap_nocache(PHYSADDR(INTERRUPT_BASE),
361 + sizeof(*sp->int_regs));
362 + if (!sp->int_regs) {
363 + printk("Can't remap INTERRUPT registers\n");
364 + return(-ENXIO);
365 + }
366 +
367 + strncpy(sp->name, "Atheros AR2313", sizeof (sp->name) - 1);
368 + sp->name [sizeof (sp->name) - 1] = '\0';
369 +
370 + {
371 + char mac[32];
372 + extern char *getenv(char *e);
373 + unsigned char def_mac[6] = {0, 0x0b, 0x86, 0xba, 0xdb, 0xad};
374 + memset(mac, 0, 32);
375 + memcpy(mac, getenv("ethaddr"), 17);
376 + if (parse_mac_addr(dev, mac)){
377 + printk("%s: MAC address not found, using default\n", __func__);
378 + memcpy(dev->dev_addr, def_mac, 6);
379 + }
380 + }
381 +
382 + sp->board_idx = BOARD_IDX_STATIC;
383 +
384 + if (ar2313_init(dev)) {
385 + /*
386 + * ar2313_init() calls ar2313_init_cleanup() on error.
387 + */
388 + kfree(dev);
389 + return -ENODEV;
390 + }
391 +
392 + if (register_netdev(dev)){
393 + printk("%s: register_netdev failed\n", __func__);
394 + return -1;
395 + }
396 +
397 + printk("%s: %s: %02x:%02x:%02x:%02x:%02x:%02x, irq %d\n",
398 + dev->name, sp->name,
399 + dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
400 + dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5],
401 + dev->irq);
402 +
403 + /* start link poll timer */
404 + ar2313_setup_timer(dev);
405 +
406 + /*
407 + * Register the device
408 + */
409 + root_dev = dev;
410 +
411 + return 0;
412 +}
413 +
414 +#if 0
415 +static void ar2313_dump_regs(struct net_device *dev)
416 +{
417 + unsigned int *ptr, i;
418 + struct ar2313_private *sp = (struct ar2313_private *)dev->priv;
419 +
420 + ptr = (unsigned int *)sp->eth_regs;
421 + for(i=0; i< (sizeof(ETHERNET_STRUCT)/ sizeof(unsigned int)); i++, ptr++) {
422 + printk("ENET: %08x = %08x\n", (int)ptr, *ptr);
423 + }
424 +
425 + ptr = (unsigned int *)sp->dma_regs;
426 + for(i=0; i< (sizeof(DMA)/ sizeof(unsigned int)); i++, ptr++) {
427 + printk("DMA: %08x = %08x\n", (int)ptr, *ptr);
428 + }
429 +
430 + ptr = (unsigned int *)sp->int_regs;
431 + for(i=0; i< (sizeof(INTERRUPT)/ sizeof(unsigned int)); i++, ptr++){
432 + printk("INT: %08x = %08x\n", (int)ptr, *ptr);
433 + }
434 +
435 + for (i = 0; i < AR2313_DESCR_ENTRIES; i++) {
436 + ar2313_descr_t *td = &sp->tx_ring[i];
437 + printk("Tx desc %2d: %08x %08x %08x %08x\n", i,
438 + td->status, td->devcs, td->addr, td->descr);
439 + }
440 +}
441 +#endif
442 +
443 +#ifdef TX_TIMEOUT
444 +static void
445 +ar2313_tx_timeout(struct net_device *dev)
446 +{
447 + struct ar2313_private *sp = (struct ar2313_private *)dev->priv;
448 + unsigned long flags;
449 +
450 +#if DEBUG_TX
451 + printk("Tx timeout\n");
452 +#endif
453 + spin_lock_irqsave(&sp->lock, flags);
454 + ar2313_restart(dev);
455 + spin_unlock_irqrestore(&sp->lock, flags);
456 +}
457 +#endif
458 +
459 +#if DEBUG_MC
460 +static void
461 +printMcList(struct net_device *dev)
462 +{
463 + struct dev_mc_list *list = dev->mc_list;
464 + int num=0, i;
465 + while(list){
466 + printk("%d MC ADDR ", num);
467 + for(i=0;i<list->dmi_addrlen;i++) {
468 + printk(":%02x", list->dmi_addr[i]);
469 + }
470 + list = list->next;
471 + printk("\n");
472 + }
473 +}
474 +#endif
475 +
476 +/*
477 + * Set or clear the multicast filter for this adaptor.
478 + * THIS IS ABSOLUTE CRAP, disabled
479 + */
480 +static void
481 +ar2313_multicast_list(struct net_device *dev)
482 +{
483 + /*
484 + * Always listen to broadcasts and
485 + * treat IFF bits independently
486 + */
487 + struct ar2313_private *sp = (struct ar2313_private *)dev->priv;
488 + unsigned int recognise;
489 +
490 + recognise = sp->eth_regs->mac_control;
491 +
492 + if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
493 + recognise |= MAC_CONTROL_PR;
494 + } else {
495 + recognise &= ~MAC_CONTROL_PR;
496 + }
497 +
498 + if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 15)) {
499 +#if DEBUG_MC
500 + printMcList(dev);
501 + printk("%s: all MULTICAST mc_count %d\n", __FUNCTION__, dev->mc_count);
502 +#endif
503 + recognise |= MAC_CONTROL_PM;/* all multicast */
504 + } else if (dev->mc_count > 0) {
505 +#if DEBUG_MC
506 + printMcList(dev);
507 + printk("%s: mc_count %d\n", __FUNCTION__, dev->mc_count);
508 +#endif
509 + recognise |= MAC_CONTROL_PM; /* for the time being */
510 + }
511 +#if DEBUG_MC
512 + printk("%s: setting %08x to %08x\n", __FUNCTION__, (int)sp->eth_regs, recognise);
513 +#endif
514 +
515 + sp->eth_regs->mac_control = recognise;
516 +}
517 +
518 +static void rx_tasklet_cleanup(struct net_device *dev)
519 +{
520 + struct ar2313_private *sp = dev->priv;
521 +
522 + /*
523 + * Tasklet may be scheduled. Need to get it removed from the list
524 + * since we're about to free the struct.
525 + */
526 +
527 + sp->unloading = 1;
528 + tasklet_enable(&sp->rx_tasklet);
529 + tasklet_kill(&sp->rx_tasklet);
530 +}
531 +
532 +static void __exit ar2313_module_cleanup(void)
533 +{
534 + rx_tasklet_cleanup(root_dev);
535 + ar2313_init_cleanup(root_dev);
536 + unregister_netdev(root_dev);
537 + kfree(root_dev);
538 + release_region(PHYSADDR(ETHERNET_BASE), ETHERNET_SIZE*ETHERNET_MACS);
539 +}
540 +
541 +
542 +/*
543 + * Restart the AR2313 ethernet controller.
544 + */
545 +static int ar2313_restart(struct net_device *dev)
546 +{
547 + /* disable interrupts */
548 + disable_irq(dev->irq);
549 +
550 + /* stop mac */
551 + ar2313_halt(dev);
552 +
553 + /* initialize */
554 + ar2313_init(dev);
555 +
556 + /* enable interrupts */
557 + enable_irq(dev->irq);
558 +
559 + return 0;
560 +}
561 +
562 +extern unsigned long mips_machtype;
563 +
564 +int __init ar2313_module_init(void)
565 +{
566 + int status=-1;
567 + switch (mips_machtype){
568 + case MACH_ARUBA_AP60:
569 + case MACH_ARUBA_AP65:
570 + case MACH_ARUBA_AP40:
571 + root_dev = NULL;
572 + status = ar2313_probe();
573 + break;
574 + }
575 + return status;
576 +}
577 +
578 +
579 +module_init(ar2313_module_init);
580 +module_exit(ar2313_module_cleanup);
581 +
582 +
583 +static void ar2313_free_descriptors(struct net_device *dev)
584 +{
585 + struct ar2313_private *sp = dev->priv;
586 + if (sp->rx_ring != NULL) {
587 + kfree((void*)KSEG0ADDR(sp->rx_ring));
588 + sp->rx_ring = NULL;
589 + sp->tx_ring = NULL;
590 + }
591 +}
592 +
593 +
594 +static int ar2313_allocate_descriptors(struct net_device *dev)
595 +{
596 + struct ar2313_private *sp = dev->priv;
597 + int size;
598 + int j;
599 + ar2313_descr_t *space;
600 +
601 + if(sp->rx_ring != NULL){
602 + printk("%s: already done.\n", __FUNCTION__);
603 + return 0;
604 + }
605 +
606 + size = (sizeof(ar2313_descr_t) * (AR2313_DESCR_ENTRIES * AR2313_QUEUES));
607 + space = kmalloc(size, GFP_KERNEL);
608 + if (space == NULL)
609 + return 1;
610 +
611 + /* invalidate caches */
612 + dma_cache_inv((unsigned int)space, size);
613 +
614 + /* now convert pointer to KSEG1 */
615 + space = (ar2313_descr_t *)KSEG1ADDR(space);
616 +
617 + memset((void *)space, 0, size);
618 +
619 + sp->rx_ring = space;
620 + space += AR2313_DESCR_ENTRIES;
621 +
622 + sp->tx_ring = space;
623 + space += AR2313_DESCR_ENTRIES;
624 +
625 + /* Initialize the transmit Descriptors */
626 + for (j = 0; j < AR2313_DESCR_ENTRIES; j++) {
627 + ar2313_descr_t *td = &sp->tx_ring[j];
628 + td->status = 0;
629 + td->devcs = DMA_TX1_CHAINED;
630 + td->addr = 0;
631 + td->descr = K1_TO_PHYS(&sp->tx_ring[(j+1) & (AR2313_DESCR_ENTRIES-1)]);
632 + }
633 +
634 + return 0;
635 +}
636 +
637 +
638 +/*
639 + * Generic cleanup handling data allocated during init. Used when the
640 + * module is unloaded or if an error occurs during initialization
641 + */
642 +static void ar2313_init_cleanup(struct net_device *dev)
643 +{
644 + struct ar2313_private *sp = dev->priv;
645 + struct sk_buff *skb;
646 + int j;
647 +
648 + ar2313_free_descriptors(dev);
649 +
650 + if (sp->eth_regs) iounmap((void*)sp->eth_regs);
651 + if (sp->dma_regs) iounmap((void*)sp->dma_regs);
652 +
653 + if (sp->rx_skb) {
654 + for (j = 0; j < AR2313_DESCR_ENTRIES; j++) {
655 + skb = sp->rx_skb[j];
656 + if (skb) {
657 + sp->rx_skb[j] = NULL;
658 + dev_kfree_skb(skb);
659 + }
660 + }
661 + kfree(sp->rx_skb);
662 + sp->rx_skb = NULL;
663 + }
664 +
665 + if (sp->tx_skb) {
666 + for (j = 0; j < AR2313_DESCR_ENTRIES; j++) {
667 + skb = sp->tx_skb[j];
668 + if (skb) {
669 + sp->tx_skb[j] = NULL;
670 + dev_kfree_skb(skb);
671 + }
672 + }
673 + kfree(sp->tx_skb);
674 + sp->tx_skb = NULL;
675 + }
676 +}
677 +
678 +static int ar2313_setup_timer(struct net_device *dev)
679 +{
680 + struct ar2313_private *sp = dev->priv;
681 +
682 + init_timer(&sp->link_timer);
683 +
684 + sp->link_timer.function = ar2313_link_timer_fn;
685 + sp->link_timer.data = (int) dev;
686 + sp->link_timer.expires = jiffies + HZ;
687 +
688 + add_timer(&sp->link_timer);
689 + return 0;
690 +
691 +}
692 +
693 +static void ar2313_link_timer_fn(unsigned long data)
694 +{
695 + struct net_device *dev = (struct net_device *) data;
696 + struct ar2313_private *sp = dev->priv;
697 +
698 + // see if the link status changed
699 + // This was needed to make sure we set the PHY to the
700 + // autonegotiated value of half or full duplex.
701 + ar2313_check_link(dev);
702 +
703 + // Loop faster when we don't have link.
704 + // This was needed to speed up the AP bootstrap time.
705 + if(sp->link == 0) {
706 + mod_timer(&sp->link_timer, jiffies + HZ/2);
707 + } else {
708 + mod_timer(&sp->link_timer, jiffies + LINK_TIMER);
709 + }
710 +}
711 +
712 +static void ar2313_check_link(struct net_device *dev)
713 +{
714 + struct ar2313_private *sp = dev->priv;
715 + u16 phyData;
716 +
717 + phyData = armiiread(sp->phy, MII_BMSR);
718 + if (sp->phyData != phyData) {
719 + if (phyData & BMSR_LSTATUS) {
720 + /* link is present, ready link partner ability to deterine duplexity */
721 + int duplex = 0;
722 + u16 reg;
723 +
724 + sp->link = 1;
725 + reg = armiiread(sp->phy, MII_BMCR);
726 + if (reg & BMCR_ANENABLE) {
727 + /* auto neg enabled */
728 + reg = armiiread(sp->phy, MII_LPA);
729 + duplex = (reg & (LPA_100FULL|LPA_10FULL))? 1:0;
730 + } else {
731 + /* no auto neg, just read duplex config */
732 + duplex = (reg & BMCR_FULLDPLX)? 1:0;
733 + }
734 +
735 + printk(KERN_INFO "%s: Configuring MAC for %s duplex\n", dev->name,
736 + (duplex)? "full":"half");
737 +
738 + if (duplex) {
739 + /* full duplex */
740 + sp->eth_regs->mac_control = ((sp->eth_regs->mac_control | MAC_CONTROL_F) &
741 + ~MAC_CONTROL_DRO);
742 + } else {
743 + /* half duplex */
744 + sp->eth_regs->mac_control = ((sp->eth_regs->mac_control | MAC_CONTROL_DRO) &
745 + ~MAC_CONTROL_F);
746 + }
747 + } else {
748 + /* no link */
749 + sp->link = 0;
750 + }
751 + sp->phyData = phyData;
752 + }
753 +}
754 +
755 +static int
756 +ar2313_reset_reg(struct net_device *dev)
757 +{
758 + struct ar2313_private *sp = (struct ar2313_private *)dev->priv;
759 + unsigned int ethsal, ethsah;
760 + unsigned int flags;
761 +
762 + *sp->int_regs |= ar_int_mac_mask;
763 + mdelay(10);
764 + *sp->int_regs &= ~ar_int_mac_mask;
765 + mdelay(10);
766 + *sp->int_regs |= ar_int_phy_mask;
767 + mdelay(10);
768 + *sp->int_regs &= ~ar_int_phy_mask;
769 + mdelay(10);
770 +
771 + sp->dma_regs->bus_mode = (DMA_BUS_MODE_SWR);
772 + mdelay(10);
773 + sp->dma_regs->bus_mode = ((32 << DMA_BUS_MODE_PBL_SHIFT) | DMA_BUS_MODE_BLE);
774 +
775 + /* enable interrupts */
776 + sp->dma_regs->intr_ena = (DMA_STATUS_AIS |
777 + DMA_STATUS_NIS |
778 + DMA_STATUS_RI |
779 + DMA_STATUS_TI |
780 + DMA_STATUS_FBE);
781 + sp->dma_regs->xmt_base = K1_TO_PHYS(sp->tx_ring);
782 + sp->dma_regs->rcv_base = K1_TO_PHYS(sp->rx_ring);
783 + sp->dma_regs->control = (DMA_CONTROL_SR | DMA_CONTROL_ST | DMA_CONTROL_SF);
784 +
785 + sp->eth_regs->flow_control = (FLOW_CONTROL_FCE);
786 + sp->eth_regs->vlan_tag = (0x8100);
787 +
788 + /* Enable Ethernet Interface */
789 + flags = (MAC_CONTROL_TE | /* transmit enable */
790 + MAC_CONTROL_PM | /* pass mcast */
791 + MAC_CONTROL_F | /* full duplex */
792 + MAC_CONTROL_HBD); /* heart beat disabled */
793 +
794 + if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
795 + flags |= MAC_CONTROL_PR;
796 + }
797 + sp->eth_regs->mac_control = flags;
798 +
799 + /* Set all Ethernet station address registers to their initial values */
800 + ethsah = ((((u_int)(dev->dev_addr[5]) << 8) & (u_int)0x0000FF00) |
801 + (((u_int)(dev->dev_addr[4]) << 0) & (u_int)0x000000FF));
802 +
803 + ethsal = ((((u_int)(dev->dev_addr[3]) << 24) & (u_int)0xFF000000) |
804 + (((u_int)(dev->dev_addr[2]) << 16) & (u_int)0x00FF0000) |
805 + (((u_int)(dev->dev_addr[1]) << 8) & (u_int)0x0000FF00) |
806 + (((u_int)(dev->dev_addr[0]) << 0) & (u_int)0x000000FF) );
807 +
808 + sp->eth_regs->mac_addr[0] = ethsah;
809 + sp->eth_regs->mac_addr[1] = ethsal;
810 +
811 + mdelay(10);
812 +
813 + return(0);
814 +}
815 +
816 +
817 +static int ar2313_init(struct net_device *dev)
818 +{
819 + struct ar2313_private *sp = dev->priv;
820 + int ecode=0;
821 +
822 + /*
823 + * Allocate descriptors
824 + */
825 + if (ar2313_allocate_descriptors(dev)) {
826 + printk("%s: %s: ar2313_allocate_descriptors failed\n",
827 + dev->name, __FUNCTION__);
828 + ecode = -EAGAIN;
829 + goto init_error;
830 + }
831 +
832 + /*
833 + * Get the memory for the skb rings.
834 + */
835 + if(sp->rx_skb == NULL) {
836 + sp->rx_skb = kmalloc(sizeof(struct sk_buff *) * AR2313_DESCR_ENTRIES, GFP_KERNEL);
837 + if (!(sp->rx_skb)) {
838 + printk("%s: %s: rx_skb kmalloc failed\n",
839 + dev->name, __FUNCTION__);
840 + ecode = -EAGAIN;
841 + goto init_error;
842 + }
843 + }
844 + memset(sp->rx_skb, 0, sizeof(struct sk_buff *) * AR2313_DESCR_ENTRIES);
845 +
846 + if(sp->tx_skb == NULL) {
847 + sp->tx_skb = kmalloc(sizeof(struct sk_buff *) * AR2313_DESCR_ENTRIES, GFP_KERNEL);
848 + if (!(sp->tx_skb)) {
849 + printk("%s: %s: tx_skb kmalloc failed\n",
850 + dev->name, __FUNCTION__);
851 + ecode = -EAGAIN;
852 + goto init_error;
853 + }
854 + }
855 + memset(sp->tx_skb, 0, sizeof(struct sk_buff *) * AR2313_DESCR_ENTRIES);
856 +
857 + /*
858 + * Set tx_csm before we start receiving interrupts, otherwise
859 + * the interrupt handler might think it is supposed to process
860 + * tx ints before we are up and running, which may cause a null
861 + * pointer access in the int handler.
862 + */
863 + sp->rx_skbprd = 0;
864 + sp->cur_rx = 0;
865 + sp->tx_prd = 0;
866 + sp->tx_csm = 0;
867 +
868 + /*
869 + * Zero the stats before starting the interface
870 + */
871 + memset(&sp->stats, 0, sizeof(sp->stats));
872 +
873 + /*
874 + * We load the ring here as there seem to be no way to tell the
875 + * firmware to wipe the ring without re-initializing it.
876 + */
877 + ar2313_load_rx_ring(dev, RX_RING_SIZE);
878 +
879 + /*
880 + * Init hardware
881 + */
882 + ar2313_reset_reg(dev);
883 +
884 + /*
885 + * Get the IRQ
886 + */
887 + ecode = request_irq(dev->irq, &ar2313_interrupt, SA_SHIRQ | SA_INTERRUPT, dev->name, dev);
888 + if (ecode) {
889 + printk(KERN_WARNING "%s: %s: Requested IRQ %d is busy\n",
890 + dev->name, __FUNCTION__, dev->irq);
891 + goto init_error;
892 + }
893 +
894 +#if 0
895 + // commented out, for now
896 +
897 + if(mips_machtype == MACH_ARUBA_SAMSUNG) {
898 + int i;
899 + /* configure Marvell 88E6060 */
900 + /* reset chip */
901 + armiiwrite(0x1f, 0xa, 0xa130);
902 + do {
903 + udelay(1000);
904 + i = armiiread(sp->phy, 0xa);
905 + } while (i & 0x8000);
906 +
907 + /* configure MAC address */
908 + armiiwrite(sp->phy, 0x1, dev->dev_addr[0] << 8 | dev->dev_addr[1]);
909 + armiiwrite(sp->phy, 0x2, dev->dev_addr[2] << 8 | dev->dev_addr[3]);
910 + armiiwrite(sp->phy, 0x3, dev->dev_addr[4] << 8 | dev->dev_addr[5]);
911 +
912 + /* set ports to forwarding */
913 + armiiwrite(0x18, 0x4, 0x3);
914 + armiiwrite(0x1c, 0x4, 0x3);
915 + armiiwrite(0x1d, 0x4, 0x3);
916 + }
917 +#endif
918 +
919 + tasklet_enable(&sp->rx_tasklet);
920 +
921 + return 0;
922 +
923 + init_error:
924 + ar2313_init_cleanup(dev);
925 + return ecode;
926 +}
927 +
928 +/*
929 + * Load the rx ring.
930 + *
931 + * Loading rings is safe without holding the spin lock since this is
932 + * done only before the device is enabled, thus no interrupts are
933 + * generated and by the interrupt handler/tasklet handler.
934 + */
935 +static void ar2313_load_rx_ring(struct net_device *dev, int nr_bufs)
936 +{
937 +
938 + struct ar2313_private *sp = ((struct net_device *)dev)->priv;
939 + short i, idx;
940 +
941 + idx = sp->rx_skbprd;
942 +
943 + for (i = 0; i < nr_bufs; i++) {
944 + struct sk_buff *skb;
945 + ar2313_descr_t *rd;
946 +
947 + if (sp->rx_skb[idx]) {
948 +#if DEBUG_RX
949 + printk(KERN_INFO "ar2313 rx refill full\n");
950 +#endif /* DEBUG */
951 + break;
952 + }
953 +
954 + // partha: create additional room for the second GRE fragment
955 + skb = alloc_skb(AR2313_BUFSIZE+128, GFP_ATOMIC);
956 + if (!skb) {
957 + printk("\n\n\n\n %s: No memory in system\n\n\n\n", __FUNCTION__);
958 + break;
959 + }
960 + // partha: create additional room in the front for tx pkt capture
961 + skb_reserve(skb, 32);
962 +
963 + /*
964 + * Make sure IP header starts on a fresh cache line.
965 + */
966 + skb->dev = dev;
967 + skb_reserve(skb, RX_OFFSET);
968 + sp->rx_skb[idx] = skb;
969 +
970 + rd = (ar2313_descr_t *) &sp->rx_ring[idx];
971 +
972 + /* initialize dma descriptor */
973 + rd->devcs = ((AR2313_BUFSIZE << DMA_RX1_BSIZE_SHIFT) |
974 + DMA_RX1_CHAINED);
975 + rd->addr = virt_to_phys(skb->data);
976 + rd->descr = virt_to_phys(&sp->rx_ring[(idx+1) & (AR2313_DESCR_ENTRIES-1)]);
977 + rd->status = DMA_RX_OWN;
978 +
979 + idx = DSC_NEXT(idx);
980 + }
981 +
982 + if (!i) {
983 +#if DEBUG_ERR
984 + printk(KERN_INFO "Out of memory when allocating standard receive buffers\n");
985 +#endif /* DEBUG */
986 + } else {
987 + sp->rx_skbprd = idx;
988 + }
989 +
990 + return;
991 +}
992 +
993 +#define AR2313_MAX_PKTS_PER_CALL 64
994 +
995 +static int ar2313_rx_int(struct net_device *dev)
996 +{
997 + struct ar2313_private *sp = dev->priv;
998 + struct sk_buff *skb, *skb_new;
999 + ar2313_descr_t *rxdesc;
1000 + unsigned int status;
1001 + u32 idx;
1002 + int pkts = 0;
1003 + int rval;
1004 +
1005 + idx = sp->cur_rx;
1006 +
1007 + /* process at most the entire ring and then wait for another interrupt */
1008 + while(1) {
1009 +
1010 + rxdesc = &sp->rx_ring[idx];
1011 + status = rxdesc->status;
1012 + if (status & DMA_RX_OWN) {
1013 + /* SiByte owns descriptor or descr not yet filled in */
1014 + rval = 0;
1015 + break;
1016 + }
1017 +
1018 + if (++pkts > AR2313_MAX_PKTS_PER_CALL) {
1019 + rval = 1;
1020 + break;
1021 + }
1022 +
1023 +#if DEBUG_RX
1024 + printk("index %d\n", idx);
1025 + printk("RX status %08x\n", rxdesc->status);
1026 + printk("RX devcs %08x\n", rxdesc->devcs );
1027 + printk("RX addr %08x\n", rxdesc->addr );
1028 + printk("RX descr %08x\n", rxdesc->descr );
1029 +#endif
1030 +
1031 + if ((status & (DMA_RX_ERROR|DMA_RX_ERR_LENGTH)) &&
1032 + (!(status & DMA_RX_LONG))){
1033 +#if DEBUG_RX
1034 + printk("%s: rx ERROR %08x\n", __FUNCTION__, status);
1035 +#endif
1036 + sp->stats.rx_errors++;
1037 + sp->stats.rx_dropped++;
1038 +
1039 + /* add statistics counters */
1040 + if (status & DMA_RX_ERR_CRC) sp->stats.rx_crc_errors++;
1041 + if (status & DMA_RX_ERR_COL) sp->stats.rx_over_errors++;
1042 + if (status & DMA_RX_ERR_LENGTH)
1043 + sp->stats.rx_length_errors++;
1044 + if (status & DMA_RX_ERR_RUNT) sp->stats.rx_over_errors++;
1045 + if (status & DMA_RX_ERR_DESC) sp->stats.rx_over_errors++;
1046 +
1047 + } else {
1048 + /* alloc new buffer. */
1049 + skb_new = dev_alloc_skb(AR2313_BUFSIZE + RX_OFFSET + 128);
1050 + if (skb_new != NULL) {
1051 +
1052 + skb = sp->rx_skb[idx];
1053 + /* set skb */
1054 + skb_put(skb, ((status >> DMA_RX_LEN_SHIFT) & 0x3fff) - CRC_LEN);
1055 +
1056 +#ifdef CONFIG_MERLOT
1057 + if ((dev->am_pkt_handler == NULL) ||
1058 + (dev->am_pkt_handler(skb, dev) == 0)) {
1059 +#endif
1060 + sp->stats.rx_bytes += skb->len;
1061 + skb->protocol = eth_type_trans(skb, dev);
1062 + /* pass the packet to upper layers */
1063 +
1064 +#ifdef CONFIG_MERLOT
1065 + if (dev->asap_netif_rx)
1066 + dev->asap_netif_rx(skb);
1067 + else
1068 +#endif
1069 + netif_rx(skb);
1070 +#ifdef CONFIG_MERLOT
1071 + }
1072 +#endif
1073 + skb_new->dev = dev;
1074 + /* 16 bit align */
1075 + skb_reserve(skb_new, RX_OFFSET+32);
1076 + /* reset descriptor's curr_addr */
1077 + rxdesc->addr = virt_to_phys(skb_new->data);
1078 +
1079 + sp->stats.rx_packets++;
1080 + sp->rx_skb[idx] = skb_new;
1081 +
1082 + } else {
1083 + sp->stats.rx_dropped++;
1084 + }
1085 + }
1086 +
1087 + rxdesc->devcs = ((AR2313_BUFSIZE << DMA_RX1_BSIZE_SHIFT) |
1088 + DMA_RX1_CHAINED);
1089 + rxdesc->status = DMA_RX_OWN;
1090 +
1091 + idx = DSC_NEXT(idx);
1092 + }
1093 +
1094 + sp->cur_rx = idx;
1095 +
1096 + return rval;
1097 +}
1098 +
1099 +
1100 +static void ar2313_tx_int(struct net_device *dev)
1101 +{
1102 + struct ar2313_private *sp = dev->priv;
1103 + u32 idx;
1104 + struct sk_buff *skb;
1105 + ar2313_descr_t *txdesc;
1106 + unsigned int status=0;
1107 +
1108 + idx = sp->tx_csm;
1109 +
1110 + while (idx != sp->tx_prd) {
1111 +
1112 + txdesc = &sp->tx_ring[idx];
1113 +
1114 +#if DEBUG_TX
1115 + printk("%s: TXINT: csm=%d idx=%d prd=%d status=%x devcs=%x addr=%08x descr=%x\n",
1116 + dev->name, sp->tx_csm, idx, sp->tx_prd,
1117 + txdesc->status, txdesc->devcs, txdesc->addr, txdesc->descr);
1118 +#endif /* DEBUG */
1119 +
1120 + if ((status = txdesc->status) & DMA_TX_OWN) {
1121 + /* ar2313 dma still owns descr */
1122 + break;
1123 + }
1124 + /* done with this descriptor */
1125 + txdesc->status = 0;
1126 +
1127 + if (status & DMA_TX_ERROR){
1128 + sp->stats.tx_errors++;
1129 + sp->stats.tx_dropped++;
1130 + if(status & DMA_TX_ERR_UNDER)
1131 + sp->stats.tx_fifo_errors++;
1132 + if(status & DMA_TX_ERR_HB)
1133 + sp->stats.tx_heartbeat_errors++;
1134 + if(status & (DMA_TX_ERR_LOSS |
1135 + DMA_TX_ERR_LINK))
1136 + sp->stats.tx_carrier_errors++;
1137 + if (status & (DMA_TX_ERR_LATE|
1138 + DMA_TX_ERR_COL |
1139 + DMA_TX_ERR_JABBER |
1140 + DMA_TX_ERR_DEFER))
1141 + sp->stats.tx_aborted_errors++;
1142 + } else {
1143 + /* transmit OK */
1144 + sp->stats.tx_packets++;
1145 + }
1146 +
1147 + skb = sp->tx_skb[idx];
1148 + sp->tx_skb[idx] = NULL;
1149 + idx = DSC_NEXT(idx);
1150 + sp->stats.tx_bytes += skb->len;
1151 + dev_kfree_skb_irq(skb);
1152 + }
1153 +
1154 + sp->tx_csm = idx;
1155 +
1156 + return;
1157 +}
1158 +
1159 +
1160 +static void
1161 +rx_tasklet_func(unsigned long data)
1162 +{
1163 + struct net_device *dev = (struct net_device *) data;
1164 + struct ar2313_private *sp = dev->priv;
1165 +
1166 + if (sp->unloading) {
1167 + return;
1168 + }
1169 +
1170 + if (ar2313_rx_int(dev)) {
1171 + tasklet_hi_schedule(&sp->rx_tasklet);
1172 + }
1173 + else {
1174 + unsigned long flags;
1175 + spin_lock_irqsave(&sp->lock, flags);
1176 + sp->dma_regs->intr_ena |= DMA_STATUS_RI;
1177 + spin_unlock_irqrestore(&sp->lock, flags);
1178 + }
1179 +}
1180 +
1181 +static void
1182 +rx_schedule(struct net_device *dev)
1183 +{
1184 + struct ar2313_private *sp = dev->priv;
1185 +
1186 + sp->dma_regs->intr_ena &= ~DMA_STATUS_RI;
1187 +
1188 + tasklet_hi_schedule(&sp->rx_tasklet);
1189 +}
1190 +
1191 +static irqreturn_t ar2313_interrupt(int irq, void *dev_id, struct pt_regs *ptregs)
1192 +{
1193 + struct net_device *dev = (struct net_device *)dev_id;
1194 + struct ar2313_private *sp = dev->priv;
1195 + unsigned int status, enabled;
1196 +
1197 + /* clear interrupt */
1198 + /*
1199 + * Don't clear RI bit if currently disabled.
1200 + */
1201 + status = sp->dma_regs->status;
1202 + enabled = sp->dma_regs->intr_ena;
1203 + sp->dma_regs->status = status & enabled;
1204 +
1205 + if (status & DMA_STATUS_NIS) {
1206 + /* normal status */
1207 + /*
1208 + * Don't schedule rx processing if interrupt
1209 + * is already disabled.
1210 + */
1211 + if (status & enabled & DMA_STATUS_RI) {
1212 + /* receive interrupt */
1213 + rx_schedule(dev);
1214 + }
1215 + if (status & DMA_STATUS_TI) {
1216 + /* transmit interrupt */
1217 + ar2313_tx_int(dev);
1218 + }
1219 + }
1220 +
1221 + if (status & DMA_STATUS_AIS) {
1222 +#if DEBUG_INT
1223 + printk("%s: AIS set %08x & %x\n", __FUNCTION__,
1224 + status, (DMA_STATUS_FBE | DMA_STATUS_TPS));
1225 +#endif
1226 + /* abnormal status */
1227 + if (status & (DMA_STATUS_FBE | DMA_STATUS_TPS)) {
1228 + ar2313_restart(dev);
1229 + }
1230 + }
1231 + return IRQ_HANDLED;
1232 +}
1233 +
1234 +
1235 +static int ar2313_open(struct net_device *dev)
1236 +{
1237 + struct ar2313_private *sp;
1238 +
1239 + sp = dev->priv;
1240 +
1241 + dev->mtu = 1500;
1242 + netif_start_queue(dev);
1243 +
1244 + sp->eth_regs->mac_control |= MAC_CONTROL_RE;
1245 +
1246 + AR2313_MOD_INC_USE_COUNT;
1247 +
1248 + return 0;
1249 +}
1250 +
1251 +static void ar2313_halt(struct net_device *dev)
1252 +{
1253 + struct ar2313_private *sp = dev->priv;
1254 + int j;
1255 +
1256 + tasklet_disable(&sp->rx_tasklet);
1257 +
1258 + /* kill the MAC */
1259 + sp->eth_regs->mac_control &= ~(MAC_CONTROL_RE | /* disable Receives */
1260 + MAC_CONTROL_TE); /* disable Transmits */
1261 + /* stop dma */
1262 + sp->dma_regs->control = 0;
1263 + sp->dma_regs->bus_mode = DMA_BUS_MODE_SWR;
1264 +
1265 + /* place phy and MAC in reset */
1266 + *sp->int_regs |= (ar_int_mac_mask | ar_int_phy_mask);
1267 +
1268 + /* free buffers on tx ring */
1269 + for (j = 0; j < AR2313_DESCR_ENTRIES; j++) {
1270 + struct sk_buff *skb;
1271 + ar2313_descr_t *txdesc;
1272 +
1273 + txdesc = &sp->tx_ring[j];
1274 + txdesc->descr = 0;
1275 +
1276 + skb = sp->tx_skb[j];
1277 + if (skb) {
1278 + dev_kfree_skb(skb);
1279 + sp->tx_skb[j] = NULL;
1280 + }
1281 + }
1282 +}
1283 +
1284 +/*
1285 + * close should do nothing. Here's why. It's called when
1286 + * 'ifconfig bond0 down' is run. If it calls free_irq then
1287 + * the irq is gone forever ! When bond0 is made 'up' again,
1288 + * the ar2313_open () does not call request_irq (). Worse,
1289 + * the call to ar2313_halt() generates a WDOG reset due to
1290 + * the write to 'sp->int_regs' and the box reboots.
1291 + * Commenting this out is good since it allows the
1292 + * system to resume when bond0 is made up again.
1293 + */
1294 +static int ar2313_close(struct net_device *dev)
1295 +{
1296 +#if 0
1297 + /*
1298 + * Disable interrupts
1299 + */
1300 + disable_irq(dev->irq);
1301 +
1302 + /*
1303 + * Without (or before) releasing irq and stopping hardware, this
1304 + * is an absolute non-sense, by the way. It will be reset instantly
1305 + * by the first irq.
1306 + */
1307 + netif_stop_queue(dev);
1308 +
1309 + /* stop the MAC and DMA engines */
1310 + ar2313_halt(dev);
1311 +
1312 + /* release the interrupt */
1313 + free_irq(dev->irq, dev);
1314 +
1315 +#endif
1316 + AR2313_MOD_DEC_USE_COUNT;
1317 + return 0;
1318 +}
1319 +
1320 +static int ar2313_start_xmit(struct sk_buff *skb, struct net_device *dev)
1321 +{
1322 + struct ar2313_private *sp = dev->priv;
1323 + ar2313_descr_t *td;
1324 + u32 idx;
1325 +
1326 + idx = sp->tx_prd;
1327 + td = &sp->tx_ring[idx];
1328 +
1329 + if (td->status & DMA_TX_OWN) {
1330 +#if DEBUG_TX
1331 + printk("%s: No space left to Tx\n", __FUNCTION__);
1332 +#endif
1333 + /* free skbuf and lie to the caller that we sent it out */
1334 + sp->stats.tx_dropped++;
1335 + dev_kfree_skb(skb);
1336 +
1337 + /* restart transmitter in case locked */
1338 + sp->dma_regs->xmt_poll = 0;
1339 + return 0;
1340 + }
1341 +
1342 + /* Setup the transmit descriptor. */
1343 + td->devcs = ((skb->len << DMA_TX1_BSIZE_SHIFT) |
1344 + (DMA_TX1_LS|DMA_TX1_IC|DMA_TX1_CHAINED));
1345 + td->addr = virt_to_phys(skb->data);
1346 + td->status = DMA_TX_OWN;
1347 +
1348 + /* kick transmitter last */
1349 + sp->dma_regs->xmt_poll = 0;
1350 +
1351 +#if DEBUG_TX
1352 + printk("index %d\n", idx);
1353 + printk("TX status %08x\n", td->status);
1354 + printk("TX devcs %08x\n", td->devcs );
1355 + printk("TX addr %08x\n", td->addr );
1356 + printk("TX descr %08x\n", td->descr );
1357 +#endif
1358 +
1359 + sp->tx_skb[idx] = skb;
1360 + idx = DSC_NEXT(idx);
1361 + sp->tx_prd = idx;
1362 +
1363 + //dev->trans_start = jiffies;
1364 +
1365 + return 0;
1366 +}
1367 +
1368 +static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
1369 +{
1370 + struct ar2313_private *np = dev->priv;
1371 + u32 tmp;
1372 +
1373 + ecmd->supported =
1374 + (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
1375 + SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
1376 + SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII);
1377 +
1378 + ecmd->port = PORT_TP;
1379 + /* only supports internal transceiver */
1380 + ecmd->transceiver = XCVR_INTERNAL;
1381 + /* not sure what this is for */
1382 + ecmd->phy_address = 1;
1383 +
1384 + ecmd->advertising = ADVERTISED_MII;
1385 + tmp = armiiread(np->phy, MII_ADVERTISE);
1386 + if (tmp & ADVERTISE_10HALF)
1387 + ecmd->advertising |= ADVERTISED_10baseT_Half;
1388 + if (tmp & ADVERTISE_10FULL)
1389 + ecmd->advertising |= ADVERTISED_10baseT_Full;
1390 + if (tmp & ADVERTISE_100HALF)
1391 + ecmd->advertising |= ADVERTISED_100baseT_Half;
1392 + if (tmp & ADVERTISE_100FULL)
1393 + ecmd->advertising |= ADVERTISED_100baseT_Full;
1394 +
1395 + tmp = armiiread(np->phy, MII_BMCR);
1396 + if (tmp & BMCR_ANENABLE) {
1397 + ecmd->advertising |= ADVERTISED_Autoneg;
1398 + ecmd->autoneg = AUTONEG_ENABLE;
1399 + } else {
1400 + ecmd->autoneg = AUTONEG_DISABLE;
1401 + }
1402 +
1403 + if (ecmd->autoneg == AUTONEG_ENABLE) {
1404 + tmp = armiiread(np->phy, MII_LPA);
1405 + if (tmp & (LPA_100FULL|LPA_10FULL)) {
1406 + ecmd->duplex = DUPLEX_FULL;
1407 + } else {
1408 + ecmd->duplex = DUPLEX_HALF;
1409 + }
1410 + if (tmp & (LPA_100FULL|LPA_100HALF)) {
1411 + ecmd->speed = SPEED_100;
1412 + } else {
1413 + ecmd->speed = SPEED_10;
1414 + }
1415 + } else {
1416 + if (tmp & BMCR_FULLDPLX) {
1417 + ecmd->duplex = DUPLEX_FULL;
1418 + } else {
1419 + ecmd->duplex = DUPLEX_HALF;
1420 + }
1421 + if (tmp & BMCR_SPEED100) {
1422 + ecmd->speed = SPEED_100;
1423 + } else {
1424 + ecmd->speed = SPEED_10;
1425 + }
1426 + }
1427 +
1428 + /* ignore maxtxpkt, maxrxpkt for now */
1429 +
1430 + return 0;
1431 +}
1432 +
1433 +static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
1434 +{
1435 + struct ar2313_private *np = dev->priv;
1436 + u32 tmp;
1437 +
1438 + if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
1439 + return -EINVAL;
1440 + if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
1441 + return -EINVAL;
1442 + if (ecmd->port != PORT_TP)
1443 + return -EINVAL;
1444 + if (ecmd->transceiver != XCVR_INTERNAL)
1445 + return -EINVAL;
1446 + if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
1447 + return -EINVAL;
1448 + /* ignore phy_address, maxtxpkt, maxrxpkt for now */
1449 +
1450 + /* WHEW! now lets bang some bits */
1451 +
1452 + tmp = armiiread(np->phy, MII_BMCR);
1453 + if (ecmd->autoneg == AUTONEG_ENABLE) {
1454 + /* turn on autonegotiation */
1455 + tmp |= BMCR_ANENABLE;
1456 + printk("%s: Enabling auto-neg\n", dev->name);
1457 + } else {
1458 + /* turn off auto negotiation, set speed and duplexity */
1459 + tmp &= ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX);
1460 + if (ecmd->speed == SPEED_100)
1461 + tmp |= BMCR_SPEED100;
1462 + if (ecmd->duplex == DUPLEX_FULL)
1463 + tmp |= BMCR_FULLDPLX;
1464 + printk("%s: Hard coding %d/%s\n", dev->name,
1465 + (ecmd->speed == SPEED_100)? 100:10,
1466 + (ecmd->duplex == DUPLEX_FULL)? "full":"half");
1467 + }
1468 + armiiwrite(np->phy, MII_BMCR, tmp);
1469 + np->phyData = 0;
1470 + return 0;
1471 +}
1472 +
1473 +static int netdev_ethtool_ioctl(struct net_device *dev, void *useraddr)
1474 +{
1475 + struct ar2313_private *np = dev->priv;
1476 + u32 cmd;
1477 +
1478 + if (get_user(cmd, (u32 *)useraddr))
1479 + return -EFAULT;
1480 +
1481 + switch (cmd) {
1482 + /* get settings */
1483 + case ETHTOOL_GSET: {
1484 + struct ethtool_cmd ecmd = { ETHTOOL_GSET };
1485 + spin_lock_irq(&np->lock);
1486 + netdev_get_ecmd(dev, &ecmd);
1487 + spin_unlock_irq(&np->lock);
1488 + if (copy_to_user(useraddr, &ecmd, sizeof(ecmd)))
1489 + return -EFAULT;
1490 + return 0;
1491 + }
1492 + /* set settings */
1493 + case ETHTOOL_SSET: {
1494 + struct ethtool_cmd ecmd;
1495 + int r;
1496 + if (copy_from_user(&ecmd, useraddr, sizeof(ecmd)))
1497 + return -EFAULT;
1498 + spin_lock_irq(&np->lock);
1499 + r = netdev_set_ecmd(dev, &ecmd);
1500 + spin_unlock_irq(&np->lock);
1501 + return r;
1502 + }
1503 + /* restart autonegotiation */
1504 + case ETHTOOL_NWAY_RST: {
1505 + int tmp;
1506 + int r = -EINVAL;
1507 + /* if autoneg is off, it's an error */
1508 + tmp = armiiread(np->phy, MII_BMCR);
1509 + if (tmp & BMCR_ANENABLE) {
1510 + tmp |= (BMCR_ANRESTART);
1511 + armiiwrite(np->phy, MII_BMCR, tmp);
1512 + r = 0;
1513 + }
1514 + return r;
1515 + }
1516 + /* get link status */
1517 + case ETHTOOL_GLINK: {
1518 + struct ethtool_value edata = {ETHTOOL_GLINK};
1519 + edata.data = (armiiread(np->phy, MII_BMSR)&BMSR_LSTATUS) ? 1:0;
1520 + if (copy_to_user(useraddr, &edata, sizeof(edata)))
1521 + return -EFAULT;
1522 + return 0;
1523 + }
1524 + }
1525 +
1526 + return -EOPNOTSUPP;
1527 +}
1528 +
1529 +static int ar2313_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1530 +{
1531 + struct mii_ioctl_data *data = (struct mii_ioctl_data *)&ifr->ifr_data;
1532 +
1533 + switch (cmd) {
1534 + case SIOCDEVPRIVATE: {
1535 + struct ar2313_cmd scmd;
1536 +
1537 + if (copy_from_user(&scmd, ifr->ifr_data, sizeof(scmd)))
1538 + return -EFAULT;
1539 +
1540 +#if DEBUG
1541 + printk("%s: ioctl devprivate c=%d a=%x l=%d m=%d d=%x,%x\n",
1542 + dev->name, scmd.cmd,
1543 + scmd.address, scmd.length,
1544 + scmd.mailbox, scmd.data[0], scmd.data[1]);
1545 +#endif /* DEBUG */
1546 +
1547 + switch (scmd.cmd) {
1548 + case AR2313_READ_DATA:
1549 + if(scmd.length==4){
1550 + scmd.data[0] = *((u32*)scmd.address);
1551 + } else if(scmd.length==2) {
1552 + scmd.data[0] = *((u16*)scmd.address);
1553 + } else if (scmd.length==1) {
1554 + scmd.data[0] = *((u8*)scmd.address);
1555 + } else {
1556 + return -EOPNOTSUPP;
1557 + }
1558 + if(copy_to_user(ifr->ifr_data, &scmd, sizeof(scmd)))
1559 + return -EFAULT;
1560 + break;
1561 +
1562 + case AR2313_WRITE_DATA:
1563 + if(scmd.length==4){
1564 + *((u32*)scmd.address) = scmd.data[0];
1565 + } else if(scmd.length==2) {
1566 + *((u16*)scmd.address) = scmd.data[0];
1567 + } else if (scmd.length==1) {
1568 + *((u8*)scmd.address) = scmd.data[0];
1569 + } else {
1570 + return -EOPNOTSUPP;
1571 + }
1572 + break;
1573 +
1574 + case AR2313_GET_VERSION:
1575 + // SAMEER: sprintf((char*) &scmd, "%s", ARUBA_VERSION);
1576 + if(copy_to_user(ifr->ifr_data, &scmd, sizeof(scmd)))
1577 + return -EFAULT;
1578 + break;
1579 +
1580 + default:
1581 + return -EOPNOTSUPP;
1582 + }
1583 + return 0;
1584 + }
1585 +
1586 + case SIOCETHTOOL:
1587 + return netdev_ethtool_ioctl(dev, (void *) ifr->ifr_data);
1588 +
1589 + case SIOCGMIIPHY: /* Get address of MII PHY in use. */
1590 + data->phy_id = 1;
1591 + /* Fall Through */
1592 +
1593 + case SIOCGMIIREG: /* Read MII PHY register. */
1594 + case SIOCDEVPRIVATE+1: /* for binary compat, remove in 2.5 */
1595 + data->val_out = armiiread(data->phy_id & 0x1f,
1596 + data->reg_num & 0x1f);
1597 + return 0;
1598 + case SIOCSMIIREG: /* Write MII PHY register. */
1599 + case SIOCDEVPRIVATE+2: /* for binary compat, remove in 2.5 */
1600 + if (!capable(CAP_NET_ADMIN))
1601 + return -EPERM;
1602 + armiiwrite(data->phy_id & 0x1f,
1603 + data->reg_num & 0x1f, data->val_in);
1604 + return 0;
1605 +
1606 + case SIOCSIFHWADDR:
1607 + if (copy_from_user(dev->dev_addr, ifr->ifr_data, sizeof(dev->dev_addr)))
1608 + return -EFAULT;
1609 + return 0;
1610 +
1611 + case SIOCGIFHWADDR:
1612 + if (copy_to_user(ifr->ifr_data, dev->dev_addr, sizeof(dev->dev_addr)))
1613 + return -EFAULT;
1614 + return 0;
1615 +
1616 + default:
1617 + break;
1618 + }
1619 +
1620 + return -EOPNOTSUPP;
1621 +}
1622 +
1623 +static struct net_device_stats *ar2313_get_stats(struct net_device *dev)
1624 +{
1625 + struct ar2313_private *sp = dev->priv;
1626 + return &sp->stats;
1627 +}
1628 +
1629 +static short
1630 +armiiread(short phy, short reg)
1631 +{
1632 + volatile ETHERNET_STRUCT * ethernet;
1633 +
1634 + ethernet = (volatile ETHERNET_STRUCT *)ETHERNET_BASE; /* always MAC 0 */
1635 + ethernet->mii_addr = ((reg << MII_ADDR_REG_SHIFT) |
1636 + (phy << MII_ADDR_PHY_SHIFT));
1637 + while (ethernet->mii_addr & MII_ADDR_BUSY);
1638 + return (ethernet->mii_data >> MII_DATA_SHIFT);
1639 +}
1640 +
1641 +static void
1642 +armiiwrite(short phy, short reg, short data)
1643 +{
1644 + volatile ETHERNET_STRUCT * ethernet;
1645 +
1646 + ethernet = (volatile ETHERNET_STRUCT *)ETHERNET_BASE; /* always MAC 0 */
1647 + while (ethernet->mii_addr & MII_ADDR_BUSY);
1648 + ethernet->mii_data = data << MII_DATA_SHIFT;
1649 + ethernet->mii_addr = ((reg << MII_ADDR_REG_SHIFT) |
1650 + (phy << MII_ADDR_PHY_SHIFT) |
1651 + MII_ADDR_WRITE);
1652 +}
1653 +
1654 diff -Nur linux-2.6.17/drivers/net/ar2313/ar2313.h linux-2.6.17-owrt/drivers/net/ar2313/ar2313.h
1655 --- linux-2.6.17/drivers/net/ar2313/ar2313.h 1970-01-01 01:00:00.000000000 +0100
1656 +++ linux-2.6.17-owrt/drivers/net/ar2313/ar2313.h 2006-06-19 12:05:29.000000000 +0200
1657 @@ -0,0 +1,190 @@
1658 +#ifndef _AR2313_H_
1659 +#define _AR2313_H_
1660 +
1661 +#include <linux/config.h>
1662 +#include <asm/bootinfo.h>
1663 +#include "platform.h"
1664 +
1665 +extern unsigned long mips_machtype;
1666 +
1667 +#undef ETHERNET_BASE
1668 +#define ETHERNET_BASE ar_eth_base
1669 +#define ETHERNET_SIZE 0x00100000
1670 +#define ETHERNET_MACS 2
1671 +
1672 +#undef DMA_BASE
1673 +#define DMA_BASE ar_dma_base
1674 +#define DMA_SIZE 0x00100000
1675 +
1676 +
1677 +/*
1678 + * probe link timer - 5 secs
1679 + */
1680 +#define LINK_TIMER (5*HZ)
1681 +
1682 +/*
1683 + * Interrupt register base address
1684 + */
1685 +#define INTERRUPT_BASE PHYS_TO_K1(ar_int_base)
1686 +
1687 +/*
1688 + * Reset Register
1689 + */
1690 +#define AR531X_RESET (AR531X_RESETTMR + 0x0020)
1691 +#define RESET_SYSTEM 0x00000001 /* cold reset full system */
1692 +#define RESET_PROC 0x00000002 /* cold reset MIPS core */
1693 +#define RESET_WLAN0 0x00000004 /* cold reset WLAN MAC and BB */
1694 +#define RESET_EPHY0 0x00000008 /* cold reset ENET0 phy */
1695 +#define RESET_EPHY1 0x00000010 /* cold reset ENET1 phy */
1696 +#define RESET_ENET0 0x00000020 /* cold reset ENET0 mac */
1697 +#define RESET_ENET1 0x00000040 /* cold reset ENET1 mac */
1698 +
1699 +#define IS_DMA_TX_INT(X) (((X) & (DMA_STATUS_TI)) != 0)
1700 +#define IS_DMA_RX_INT(X) (((X) & (DMA_STATUS_RI)) != 0)
1701 +#define IS_DRIVER_OWNED(X) (((X) & (DMA_TX_OWN)) == 0)
1702 +
1703 +#ifndef K1_TO_PHYS
1704 +// hack
1705 +#define K1_TO_PHYS(x) (((unsigned int)(x)) & 0x1FFFFFFF) /* kseg1 to physical */
1706 +#endif
1707 +
1708 +#ifndef PHYS_TO_K1
1709 +// hack
1710 +#define PHYS_TO_K1(x) (((unsigned int)(x)) | 0xA0000000) /* physical to kseg1 */
1711 +#endif
1712 +
1713 +#define AR2313_TX_TIMEOUT (HZ/4)
1714 +
1715 +/*
1716 + * Rings
1717 + */
1718 +#define DSC_RING_ENTRIES_SIZE (AR2313_DESCR_ENTRIES * sizeof(struct desc))
1719 +#define DSC_NEXT(idx) ((idx + 1) & (AR2313_DESCR_ENTRIES - 1))
1720 +
1721 +static inline int tx_space (u32 csm, u32 prd)
1722 +{
1723 + return (csm - prd - 1) & (AR2313_DESCR_ENTRIES - 1);
1724 +}
1725 +
1726 +#if MAX_SKB_FRAGS
1727 +#define TX_RESERVED (MAX_SKB_FRAGS+1) /* +1 for message header */
1728 +#define tx_ring_full(csm, prd) (tx_space(csm, prd) <= TX_RESERVED)
1729 +#else
1730 +#define tx_ring_full 0
1731 +#endif
1732 +
1733 +#define AR2313_MBGET 2
1734 +#define AR2313_MBSET 3
1735 +#define AR2313_PCI_RECONFIG 4
1736 +#define AR2313_PCI_DUMP 5
1737 +#define AR2313_TEST_PANIC 6
1738 +#define AR2313_TEST_NULLPTR 7
1739 +#define AR2313_READ_DATA 8
1740 +#define AR2313_WRITE_DATA 9
1741 +#define AR2313_GET_VERSION 10
1742 +#define AR2313_TEST_HANG 11
1743 +#define AR2313_SYNC 12
1744 +
1745 +
1746 +struct ar2313_cmd {
1747 + u32 cmd;
1748 + u32 address; /* virtual address of image */
1749 + u32 length; /* size of image to download */
1750 + u32 mailbox; /* mailbox to get/set */
1751 + u32 data[2]; /* contents of mailbox to read/write */
1752 +};
1753 +
1754 +
1755 +/*
1756 + * Struct private for the Sibyte.
1757 + *
1758 + * Elements are grouped so variables used by the tx handling goes
1759 + * together, and will go into the same cache lines etc. in order to
1760 + * avoid cache line contention between the rx and tx handling on SMP.
1761 + *
1762 + * Frequently accessed variables are put at the beginning of the
1763 + * struct to help the compiler generate better/shorter code.
1764 + */
1765 +struct ar2313_private
1766 +{
1767 + int version;
1768 + u32 mb[2];
1769 +
1770 + volatile ETHERNET_STRUCT *eth_regs;
1771 + volatile DMA *dma_regs;
1772 + volatile u32 *int_regs;
1773 +
1774 + spinlock_t lock; /* Serialise access to device */
1775 +
1776 + /*
1777 + * RX and TX descriptors, must be adjacent
1778 + */
1779 + ar2313_descr_t *rx_ring;
1780 + ar2313_descr_t *tx_ring;
1781 +
1782 +
1783 + struct sk_buff **rx_skb;
1784 + struct sk_buff **tx_skb;
1785 +
1786 + /*
1787 + * RX elements
1788 + */
1789 + u32 rx_skbprd;
1790 + u32 cur_rx;
1791 +
1792 + /*
1793 + * TX elements
1794 + */
1795 + u32 tx_prd;
1796 + u32 tx_csm;
1797 +
1798 + /*
1799 + * Misc elements
1800 + */
1801 + int board_idx;
1802 + char name[48];
1803 + struct net_device_stats stats;
1804 + struct {
1805 + u32 address;
1806 + u32 length;
1807 + char *mapping;
1808 + } desc;
1809 +
1810 +
1811 + struct timer_list link_timer;
1812 + unsigned short phy; /* merlot phy = 1, samsung phy = 0x1f */
1813 + unsigned short mac;
1814 + unsigned short link; /* 0 - link down, 1 - link up */
1815 + u16 phyData;
1816 +
1817 + struct tasklet_struct rx_tasklet;
1818 + int unloading;
1819 +};
1820 +
1821 +
1822 +/*
1823 + * Prototypes
1824 + */
1825 +static int ar2313_init(struct net_device *dev);
1826 +#ifdef TX_TIMEOUT
1827 +static void ar2313_tx_timeout(struct net_device *dev);
1828 +#endif
1829 +#if 0
1830 +static void ar2313_multicast_list(struct net_device *dev);
1831 +#endif
1832 +static int ar2313_restart(struct net_device *dev);
1833 +#if DEBUG
1834 +static void ar2313_dump_regs(struct net_device *dev);
1835 +#endif
1836 +static void ar2313_load_rx_ring(struct net_device *dev, int bufs);
1837 +static irqreturn_t ar2313_interrupt(int irq, void *dev_id, struct pt_regs *regs);
1838 +static int ar2313_open(struct net_device *dev);
1839 +static int ar2313_start_xmit(struct sk_buff *skb, struct net_device *dev);
1840 +static int ar2313_close(struct net_device *dev);
1841 +static int ar2313_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
1842 +static void ar2313_init_cleanup(struct net_device *dev);
1843 +static int ar2313_setup_timer(struct net_device *dev);
1844 +static void ar2313_link_timer_fn(unsigned long data);
1845 +static void ar2313_check_link(struct net_device *dev);
1846 +static struct net_device_stats *ar2313_get_stats(struct net_device *dev);
1847 +#endif /* _AR2313_H_ */
1848 diff -Nur linux-2.6.17/drivers/net/ar2313/ar2313_msg.h linux-2.6.17-owrt/drivers/net/ar2313/ar2313_msg.h
1849 --- linux-2.6.17/drivers/net/ar2313/ar2313_msg.h 1970-01-01 01:00:00.000000000 +0100
1850 +++ linux-2.6.17-owrt/drivers/net/ar2313/ar2313_msg.h 2006-06-19 12:05:29.000000000 +0200
1851 @@ -0,0 +1,17 @@
1852 +#ifndef _AR2313_MSG_H_
1853 +#define _AR2313_MSG_H_
1854 +
1855 +#define AR2313_MTU 1692
1856 +#define AR2313_PRIOS 1
1857 +#define AR2313_QUEUES (2*AR2313_PRIOS)
1858 +
1859 +#define AR2313_DESCR_ENTRIES 64
1860 +
1861 +typedef struct {
1862 + volatile unsigned int status; // OWN, Device control and status.
1863 + volatile unsigned int devcs; // pkt Control bits + Length
1864 + volatile unsigned int addr; // Current Address.
1865 + volatile unsigned int descr; // Next descriptor in chain.
1866 +} ar2313_descr_t;
1867 +
1868 +#endif /* _AR2313_MSG_H_ */
1869 diff -Nur linux-2.6.17/drivers/net/ar2313/dma.h linux-2.6.17-owrt/drivers/net/ar2313/dma.h
1870 --- linux-2.6.17/drivers/net/ar2313/dma.h 1970-01-01 01:00:00.000000000 +0100
1871 +++ linux-2.6.17-owrt/drivers/net/ar2313/dma.h 2006-06-19 12:05:29.000000000 +0200
1872 @@ -0,0 +1,135 @@
1873 +#ifndef __ARUBA_DMA_H__
1874 +#define __ARUBA_DMA_H__
1875 +
1876 +/*******************************************************************************
1877 + *
1878 + * Copyright 2002 Integrated Device Technology, Inc.
1879 + * All rights reserved.
1880 + *
1881 + * DMA register definition.
1882 + *
1883 + * File : $Id: dma.h,v 1.3 2002/06/06 18:34:03 astichte Exp $
1884 + *
1885 + * Author : ryan.holmQVist@idt.com
1886 + * Date : 20011005
1887 + * Update :
1888 + * $Log: dma.h,v $
1889 + * Revision 1.3 2002/06/06 18:34:03 astichte
1890 + * Added XXX_PhysicalAddress and XXX_VirtualAddress
1891 + *
1892 + * Revision 1.2 2002/06/05 18:30:46 astichte
1893 + * Removed IDTField
1894 + *
1895 + * Revision 1.1 2002/05/29 17:33:21 sysarch
1896 + * jba File moved from vcode/include/idt/acacia
1897 + *
1898 + *
1899 + ******************************************************************************/
1900 +
1901 +#define AR_BIT(x) (1 << (x))
1902 +#define DMA_RX_ERR_CRC AR_BIT(1)
1903 +#define DMA_RX_ERR_DRIB AR_BIT(2)
1904 +#define DMA_RX_ERR_MII AR_BIT(3)
1905 +#define DMA_RX_EV2 AR_BIT(5)
1906 +#define DMA_RX_ERR_COL AR_BIT(6)
1907 +#define DMA_RX_LONG AR_BIT(7)
1908 +#define DMA_RX_LS AR_BIT(8) /* last descriptor */
1909 +#define DMA_RX_FS AR_BIT(9) /* first descriptor */
1910 +#define DMA_RX_MF AR_BIT(10) /* multicast frame */
1911 +#define DMA_RX_ERR_RUNT AR_BIT(11) /* runt frame */
1912 +#define DMA_RX_ERR_LENGTH AR_BIT(12) /* length error */
1913 +#define DMA_RX_ERR_DESC AR_BIT(14) /* descriptor error */
1914 +#define DMA_RX_ERROR AR_BIT(15) /* error summary */
1915 +#define DMA_RX_LEN_MASK 0x3fff0000
1916 +#define DMA_RX_LEN_SHIFT 16
1917 +#define DMA_RX_FILT AR_BIT(30)
1918 +#define DMA_RX_OWN AR_BIT(31) /* desc owned by DMA controller */
1919 +
1920 +#define DMA_RX1_BSIZE_MASK 0x000007ff
1921 +#define DMA_RX1_BSIZE_SHIFT 0
1922 +#define DMA_RX1_CHAINED AR_BIT(24)
1923 +#define DMA_RX1_RER AR_BIT(25)
1924 +
1925 +#define DMA_TX_ERR_UNDER AR_BIT(1) /* underflow error */
1926 +#define DMA_TX_ERR_DEFER AR_BIT(2) /* excessive deferral */
1927 +#define DMA_TX_COL_MASK 0x78
1928 +#define DMA_TX_COL_SHIFT 3
1929 +#define DMA_TX_ERR_HB AR_BIT(7) /* hearbeat failure */
1930 +#define DMA_TX_ERR_COL AR_BIT(8) /* excessive collisions */
1931 +#define DMA_TX_ERR_LATE AR_BIT(9) /* late collision */
1932 +#define DMA_TX_ERR_LINK AR_BIT(10) /* no carrier */
1933 +#define DMA_TX_ERR_LOSS AR_BIT(11) /* loss of carrier */
1934 +#define DMA_TX_ERR_JABBER AR_BIT(14) /* transmit jabber timeout */
1935 +#define DMA_TX_ERROR AR_BIT(15) /* frame aborted */
1936 +#define DMA_TX_OWN AR_BIT(31) /* descr owned by DMA controller */
1937 +
1938 +#define DMA_TX1_BSIZE_MASK 0x000007ff
1939 +#define DMA_TX1_BSIZE_SHIFT 0
1940 +#define DMA_TX1_CHAINED AR_BIT(24) /* chained descriptors */
1941 +#define DMA_TX1_TER AR_BIT(25) /* transmit end of ring */
1942 +#define DMA_TX1_FS AR_BIT(29) /* first segment */
1943 +#define DMA_TX1_LS AR_BIT(30) /* last segment */
1944 +#define DMA_TX1_IC AR_BIT(31) /* interrupt on completion */
1945 +
1946 +#define RCVPKT_LENGTH(X) (X >> 16) /* Received pkt Length */
1947 +
1948 +#define MAC_CONTROL_RE AR_BIT(2) /* receive enable */
1949 +#define MAC_CONTROL_TE AR_BIT(3) /* transmit enable */
1950 +#define MAC_CONTROL_DC AR_BIT(5) /* Deferral check*/
1951 +#define MAC_CONTROL_ASTP AR_BIT(8) /* Auto pad strip */
1952 +#define MAC_CONTROL_DRTY AR_BIT(10) /* Disable retry */
1953 +#define MAC_CONTROL_DBF AR_BIT(11) /* Disable bcast frames */
1954 +#define MAC_CONTROL_LCC AR_BIT(12) /* late collision ctrl */
1955 +#define MAC_CONTROL_HP AR_BIT(13) /* Hash Perfect filtering */
1956 +#define MAC_CONTROL_HASH AR_BIT(14) /* Unicast hash filtering */
1957 +#define MAC_CONTROL_HO AR_BIT(15) /* Hash only filtering */
1958 +#define MAC_CONTROL_PB AR_BIT(16) /* Pass Bad frames */
1959 +#define MAC_CONTROL_IF AR_BIT(17) /* Inverse filtering */
1960 +#define MAC_CONTROL_PR AR_BIT(18) /* promiscuous mode (valid frames only) */
1961 +#define MAC_CONTROL_PM AR_BIT(19) /* pass multicast */
1962 +#define MAC_CONTROL_F AR_BIT(20) /* full-duplex */
1963 +#define MAC_CONTROL_DRO AR_BIT(23) /* Disable Receive Own */
1964 +#define MAC_CONTROL_HBD AR_BIT(28) /* heart-beat disabled (MUST BE SET) */
1965 +#define MAC_CONTROL_BLE AR_BIT(30) /* big endian mode */
1966 +#define MAC_CONTROL_RA AR_BIT(31) /* receive all (valid and invalid frames) */
1967 +
1968 +#define MII_ADDR_BUSY AR_BIT(0)
1969 +#define MII_ADDR_WRITE AR_BIT(1)
1970 +#define MII_ADDR_REG_SHIFT 6
1971 +#define MII_ADDR_PHY_SHIFT 11
1972 +#define MII_DATA_SHIFT 0
1973 +
1974 +#define FLOW_CONTROL_FCE AR_BIT(1)
1975 +
1976 +#define DMA_BUS_MODE_SWR AR_BIT(0) /* software reset */
1977 +#define DMA_BUS_MODE_BLE AR_BIT(7) /* big endian mode */
1978 +#define DMA_BUS_MODE_PBL_SHIFT 8 /* programmable burst length 32 */
1979 +#define DMA_BUS_MODE_DBO AR_BIT(20) /* big-endian descriptors */
1980 +
1981 +#define DMA_STATUS_TI AR_BIT(0) /* transmit interrupt */
1982 +#define DMA_STATUS_TPS AR_BIT(1) /* transmit process stopped */
1983 +#define DMA_STATUS_TU AR_BIT(2) /* transmit buffer unavailable */
1984 +#define DMA_STATUS_TJT AR_BIT(3) /* transmit buffer timeout */
1985 +#define DMA_STATUS_UNF AR_BIT(5) /* transmit underflow */
1986 +#define DMA_STATUS_RI AR_BIT(6) /* receive interrupt */
1987 +#define DMA_STATUS_RU AR_BIT(7) /* receive buffer unavailable */
1988 +#define DMA_STATUS_RPS AR_BIT(8) /* receive process stopped */
1989 +#define DMA_STATUS_ETI AR_BIT(10) /* early transmit interrupt */
1990 +#define DMA_STATUS_FBE AR_BIT(13) /* fatal bus interrupt */
1991 +#define DMA_STATUS_ERI AR_BIT(14) /* early receive interrupt */
1992 +#define DMA_STATUS_AIS AR_BIT(15) /* abnormal interrupt summary */
1993 +#define DMA_STATUS_NIS AR_BIT(16) /* normal interrupt summary */
1994 +#define DMA_STATUS_RS_SHIFT 17 /* receive process state */
1995 +#define DMA_STATUS_TS_SHIFT 20 /* transmit process state */
1996 +#define DMA_STATUS_EB_SHIFT 23 /* error bits */
1997 +
1998 +#define DMA_CONTROL_SR AR_BIT(1) /* start receive */
1999 +#define DMA_CONTROL_ST AR_BIT(13) /* start transmit */
2000 +#define DMA_CONTROL_SF AR_BIT(21) /* store and forward */
2001 +
2002 +#endif // __ARUBA_DMA_H__
2003 +
2004 +
2005 +
2006 +
2007 +
2008 diff -Nur linux-2.6.17/drivers/net/ar2313/Makefile linux-2.6.17-owrt/drivers/net/ar2313/Makefile
2009 --- linux-2.6.17/drivers/net/ar2313/Makefile 1970-01-01 01:00:00.000000000 +0100
2010 +++ linux-2.6.17-owrt/drivers/net/ar2313/Makefile 2006-06-19 12:25:58.000000000 +0200
2011 @@ -0,0 +1,5 @@
2012 +#
2013 +# Makefile for the AR2313 ethernet driver
2014 +#
2015 +
2016 +obj-$(CONFIG_AR2313) += ar2313.o
2017 diff -Nur linux-2.6.17/drivers/net/ar2313/platform.h linux-2.6.17-owrt/drivers/net/ar2313/platform.h
2018 --- linux-2.6.17/drivers/net/ar2313/platform.h 1970-01-01 01:00:00.000000000 +0100
2019 +++ linux-2.6.17-owrt/drivers/net/ar2313/platform.h 2006-06-19 12:05:29.000000000 +0200
2020 @@ -0,0 +1,128 @@
2021 +/********************************************************************************
2022 + Title: $Source: platform.h,v $
2023 +
2024 + Author: Dan Steinberg
2025 + Copyright Integrated Device Technology 2001
2026 +
2027 + Purpose: AR2313 Register/Bit Definitions
2028 +
2029 + Update:
2030 + $Log: platform.h,v $
2031 +
2032 + Notes: See Merlot architecture spec for complete details. Note, all
2033 + addresses are virtual addresses in kseg1 (Uncached, Unmapped).
2034 +
2035 +********************************************************************************/
2036 +
2037 +#ifndef PLATFORM_H
2038 +#define PLATFORM_H
2039 +
2040 +#define BIT(x) (1 << (x))
2041 +
2042 +#define RESET_BASE 0xBC003020
2043 +#define RESET_VALUE 0x00000001
2044 +
2045 +/********************************************************************
2046 + * Device controller
2047 + ********************************************************************/
2048 +typedef struct {
2049 + volatile unsigned int flash0;
2050 +} DEVICE;
2051 +
2052 +#define device (*((volatile DEVICE *) DEV_CTL_BASE))
2053 +
2054 +// DDRC register
2055 +#define DEV_WP (1<<26)
2056 +
2057 +/********************************************************************
2058 + * DDR controller
2059 + ********************************************************************/
2060 +typedef struct {
2061 + volatile unsigned int ddrc0;
2062 + volatile unsigned int ddrc1;
2063 + volatile unsigned int ddrrefresh;
2064 +} DDR;
2065 +
2066 +#define ddr (*((volatile DDR *) DDR_BASE))
2067 +
2068 +// DDRC register
2069 +#define DDRC_CS(i) ((i&0x3)<<0)
2070 +#define DDRC_WE (1<<2)
2071 +
2072 +/********************************************************************
2073 + * Ethernet interfaces
2074 + ********************************************************************/
2075 +#define ETHERNET_BASE 0xB8200000
2076 +
2077 +//
2078 +// New Combo structure for Both Eth0 AND eth1
2079 +//
2080 +typedef struct {
2081 + volatile unsigned int mac_control; /* 0x00 */
2082 + volatile unsigned int mac_addr[2]; /* 0x04 - 0x08*/
2083 + volatile unsigned int mcast_table[2]; /* 0x0c - 0x10 */
2084 + volatile unsigned int mii_addr; /* 0x14 */
2085 + volatile unsigned int mii_data; /* 0x18 */
2086 + volatile unsigned int flow_control; /* 0x1c */
2087 + volatile unsigned int vlan_tag; /* 0x20 */
2088 + volatile unsigned int pad[7]; /* 0x24 - 0x3c */
2089 + volatile unsigned int ucast_table[8]; /* 0x40-0x5c */
2090 +
2091 +} ETHERNET_STRUCT;
2092 +
2093 +/********************************************************************
2094 + * Interrupt controller
2095 + ********************************************************************/
2096 +
2097 +typedef struct {
2098 + volatile unsigned int wdog_control; /* 0x08 */
2099 + volatile unsigned int wdog_timer; /* 0x0c */
2100 + volatile unsigned int misc_status; /* 0x10 */
2101 + volatile unsigned int misc_mask; /* 0x14 */
2102 + volatile unsigned int global_status; /* 0x18 */
2103 + volatile unsigned int reserved; /* 0x1c */
2104 + volatile unsigned int reset_control; /* 0x20 */
2105 +} INTERRUPT;
2106 +
2107 +#define interrupt (*((volatile INTERRUPT *) INTERRUPT_BASE))
2108 +
2109 +#define INTERRUPT_MISC_TIMER BIT(0)
2110 +#define INTERRUPT_MISC_AHBPROC BIT(1)
2111 +#define INTERRUPT_MISC_AHBDMA BIT(2)
2112 +#define INTERRUPT_MISC_GPIO BIT(3)
2113 +#define INTERRUPT_MISC_UART BIT(4)
2114 +#define INTERRUPT_MISC_UARTDMA BIT(5)
2115 +#define INTERRUPT_MISC_WATCHDOG BIT(6)
2116 +#define INTERRUPT_MISC_LOCAL BIT(7)
2117 +
2118 +#define INTERRUPT_GLOBAL_ETH BIT(2)
2119 +#define INTERRUPT_GLOBAL_WLAN BIT(3)
2120 +#define INTERRUPT_GLOBAL_MISC BIT(4)
2121 +#define INTERRUPT_GLOBAL_ITIMER BIT(5)
2122 +
2123 +/********************************************************************
2124 + * DMA controller
2125 + ********************************************************************/
2126 +#define DMA_BASE 0xB8201000
2127 +
2128 +typedef struct {
2129 + volatile unsigned int bus_mode; /* 0x00 (CSR0) */
2130 + volatile unsigned int xmt_poll; /* 0x04 (CSR1) */
2131 + volatile unsigned int rcv_poll; /* 0x08 (CSR2) */
2132 + volatile unsigned int rcv_base; /* 0x0c (CSR3) */
2133 + volatile unsigned int xmt_base; /* 0x10 (CSR4) */
2134 + volatile unsigned int status; /* 0x14 (CSR5) */
2135 + volatile unsigned int control; /* 0x18 (CSR6) */
2136 + volatile unsigned int intr_ena; /* 0x1c (CSR7) */
2137 + volatile unsigned int rcv_missed; /* 0x20 (CSR8) */
2138 + volatile unsigned int reserved[11]; /* 0x24-0x4c (CSR9-19) */
2139 + volatile unsigned int cur_tx_buf_addr; /* 0x50 (CSR20) */
2140 + volatile unsigned int cur_rx_buf_addr; /* 0x50 (CSR21) */
2141 +} DMA;
2142 +
2143 +#define dma (*((volatile DMA *) DMA_BASE))
2144 +
2145 +// macro to convert from virtual to physical address
2146 +#define phys_addr(x) (x & 0x1fffffff)
2147 +
2148 +#endif /* PLATFORM_H */
2149 diff -Nur linux-2.6.17/drivers/net/Kconfig linux-2.6.17-owrt/drivers/net/Kconfig
2150 --- linux-2.6.17/drivers/net/Kconfig 2006-06-19 12:05:01.000000000 +0200
2151 +++ linux-2.6.17-owrt/drivers/net/Kconfig 2006-06-19 12:26:35.000000000 +0200
2152 @@ -310,6 +310,12 @@
2153
2154 source "drivers/net/arm/Kconfig"
2155
2156 +config AR2313
2157 + tristate "AR2313 Ethernet support"
2158 + depends on NET_ETHERNET && MACH_ARUBA
2159 + help
2160 + Support for the AR2313 Ethernet part on Aruba AP60/61
2161 +
2162 config IDT_RC32434_ETH
2163 tristate "IDT RC32434 Local Ethernet support"
2164 depends on NET_ETHERNET
2165 diff -Nur linux-2.6.17/drivers/net/Makefile linux-2.6.17-owrt/drivers/net/Makefile
2166 --- linux-2.6.17/drivers/net/Makefile 2006-06-19 12:05:01.000000000 +0200
2167 +++ linux-2.6.17-owrt/drivers/net/Makefile 2006-06-19 12:27:02.000000000 +0200
2168 @@ -12,6 +12,7 @@
2169 obj-$(CONFIG_CHELSIO_T1) += chelsio/
2170 obj-$(CONFIG_BONDING) += bonding/
2171 obj-$(CONFIG_GIANFAR) += gianfar_driver.o
2172 +obj-$(CONFIG_AR2313) += ar2313/
2173
2174 gianfar_driver-objs := gianfar.o \
2175 gianfar_ethtool.o \