Add support for Atheros SoC (used in Fonera, Meraki) - Merry Christmas, everybody :)
[openwrt/openwrt.git] / target / linux / atheros-2.6 / patches / 130-ar2313_ethernet.patch
1 diff -urN linux.old/drivers/net/ar2313/ar2313.c linux.eth/drivers/net/ar2313/ar2313.c
2 --- linux.old/drivers/net/ar2313/ar2313.c 1970-01-01 01:00:00.000000000 +0100
3 +++ linux.eth/drivers/net/ar2313/ar2313.c 2006-12-16 04:30:44.000000000 +0100
4 @@ -0,0 +1,1545 @@
5 +/*
6 + * ar2313.c: Linux driver for the Atheros AR231z Ethernet device.
7 + *
8 + * Copyright (C) 2004 by Sameer Dekate <sdekate@arubanetworks.com>
9 + * Copyright (C) 2006 Imre Kaloz <kaloz@openwrt.org>
10 + * Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org>
11 + *
12 + * Thanks to Atheros for providing hardware and documentation
13 + * enabling me to write this driver.
14 + *
15 + * This program is free software; you can redistribute it and/or modify
16 + * it under the terms of the GNU General Public License as published by
17 + * the Free Software Foundation; either version 2 of the License, or
18 + * (at your option) any later version.
19 + *
20 + * Additional credits:
21 + * This code is taken from John Taylor's Sibyte driver and then
22 + * modified for the AR2313.
23 + */
24 +
25 +#include <linux/autoconf.h>
26 +#include <linux/module.h>
27 +#include <linux/version.h>
28 +#include <linux/types.h>
29 +#include <linux/errno.h>
30 +#include <linux/ioport.h>
31 +#include <linux/pci.h>
32 +#include <linux/netdevice.h>
33 +#include <linux/etherdevice.h>
34 +#include <linux/skbuff.h>
35 +#include <linux/init.h>
36 +#include <linux/delay.h>
37 +#include <linux/mm.h>
38 +#include <linux/highmem.h>
39 +#include <linux/sockios.h>
40 +#include <linux/pkt_sched.h>
41 +#include <linux/compile.h>
42 +#include <linux/mii.h>
43 +#include <linux/ethtool.h>
44 +#include <linux/ctype.h>
45 +#include <linux/platform_device.h>
46 +
47 +#include <net/sock.h>
48 +#include <net/ip.h>
49 +
50 +#include <asm/system.h>
51 +#include <asm/io.h>
52 +#include <asm/irq.h>
53 +#include <asm/byteorder.h>
54 +#include <asm/uaccess.h>
55 +#include <asm/bootinfo.h>
56 +
57 +#include <ar531x_platform.h>
58 +
59 +#undef INDEX_DEBUG
60 +#define DEBUG 0
61 +#define DEBUG_TX 0
62 +#define DEBUG_RX 0
63 +#define DEBUG_INT 0
64 +#define DEBUG_MC 0
65 +#define DEBUG_ERR 1
66 +
67 +#ifndef min
68 +#define min(a,b) (((a)<(b))?(a):(b))
69 +#endif
70 +
71 +#ifndef SMP_CACHE_BYTES
72 +#define SMP_CACHE_BYTES L1_CACHE_BYTES
73 +#endif
74 +
75 +#ifndef SET_MODULE_OWNER
76 +#define SET_MODULE_OWNER(dev) {do{} while(0);}
77 +#define AR2313_MOD_INC_USE_COUNT MOD_INC_USE_COUNT
78 +#define AR2313_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT
79 +#else
80 +#define AR2313_MOD_INC_USE_COUNT {do{} while(0);}
81 +#define AR2313_MOD_DEC_USE_COUNT {do{} while(0);}
82 +#endif
83 +
84 +#define PHYSADDR(a) ((_ACAST32_ (a)) & 0x1fffffff)
85 +
86 +static char ifname[5] = "bond";
87 +
88 +module_param_string(ifname, ifname, 5, 0);
89 +
90 +#define AR2313_MBOX_SET_BIT 0x8
91 +
92 +#define BOARD_IDX_STATIC 0
93 +#define BOARD_IDX_OVERFLOW -1
94 +
95 +#include "ar2313_msg.h"
96 +#include "platform.h"
97 +#include "dma.h"
98 +#include "ar2313.h"
99 +
100 +/*
101 + * New interrupt handler strategy:
102 + *
103 + * An old interrupt handler worked using the traditional method of
104 + * replacing an skbuff with a new one when a packet arrives. However
105 + * the rx rings do not need to contain a static number of buffer
106 + * descriptors, thus it makes sense to move the memory allocation out
107 + * of the main interrupt handler and do it in a bottom half handler
108 + * and only allocate new buffers when the number of buffers in the
109 + * ring is below a certain threshold. In order to avoid starving the
110 + * NIC under heavy load it is however necessary to force allocation
111 + * when hitting a minimum threshold. The strategy for alloction is as
112 + * follows:
113 + *
114 + * RX_LOW_BUF_THRES - allocate buffers in the bottom half
115 + * RX_PANIC_LOW_THRES - we are very low on buffers, allocate
116 + * the buffers in the interrupt handler
117 + * RX_RING_THRES - maximum number of buffers in the rx ring
118 + *
119 + * One advantagous side effect of this allocation approach is that the
120 + * entire rx processing can be done without holding any spin lock
121 + * since the rx rings and registers are totally independent of the tx
122 + * ring and its registers. This of course includes the kmalloc's of
123 + * new skb's. Thus start_xmit can run in parallel with rx processing
124 + * and the memory allocation on SMP systems.
125 + *
126 + * Note that running the skb reallocation in a bottom half opens up
127 + * another can of races which needs to be handled properly. In
128 + * particular it can happen that the interrupt handler tries to run
129 + * the reallocation while the bottom half is either running on another
130 + * CPU or was interrupted on the same CPU. To get around this the
131 + * driver uses bitops to prevent the reallocation routines from being
132 + * reentered.
133 + *
134 + * TX handling can also be done without holding any spin lock, wheee
135 + * this is fun! since tx_csm is only written to by the interrupt
136 + * handler.
137 + */
138 +
139 +/*
140 + * Threshold values for RX buffer allocation - the low water marks for
141 + * when to start refilling the rings are set to 75% of the ring
142 + * sizes. It seems to make sense to refill the rings entirely from the
143 + * intrrupt handler once it gets below the panic threshold, that way
144 + * we don't risk that the refilling is moved to another CPU when the
145 + * one running the interrupt handler just got the slab code hot in its
146 + * cache.
147 + */
148 +#define RX_RING_SIZE AR2313_DESCR_ENTRIES
149 +#define RX_PANIC_THRES (RX_RING_SIZE/4)
150 +#define RX_LOW_THRES ((3*RX_RING_SIZE)/4)
151 +#define CRC_LEN 4
152 +#define RX_OFFSET 2
153 +
154 +#define AR2313_BUFSIZE (AR2313_MTU + ETH_HLEN + CRC_LEN + RX_OFFSET)
155 +
156 +#ifdef MODULE
157 +MODULE_AUTHOR("Sameer Dekate <sdekate@arubanetworks.com>, Imre Kaloz <kaloz@openwrt.org>, Felix Fietkau <nbd@openwrt.org>");
158 +MODULE_DESCRIPTION("AR2313 Ethernet driver");
159 +#endif
160 +
161 +#if DEBUG
162 +static char version[] __initdata =
163 + "ar2313.c: v0.03 2006/07/12 sdekate@arubanetworks.com\n";
164 +#endif /* DEBUG */
165 +
166 +#define virt_to_phys(x) ((u32)(x) & 0x1fffffff)
167 +
168 +// prototypes
169 +static short armiiread(short phy, short reg);
170 +static void armiiwrite(short phy, short reg, short data);
171 +#ifdef TX_TIMEOUT
172 +static void ar2313_tx_timeout(struct net_device *dev);
173 +#endif
174 +static void ar2313_halt(struct net_device *dev);
175 +static void rx_tasklet_func(unsigned long data);
176 +static void ar2313_multicast_list(struct net_device *dev);
177 +
178 +static int probed __initdata = 0;
179 +static unsigned long ar_eth_base;
180 +static unsigned long ar_dma_base;
181 +static unsigned long ar_int_base;
182 +static unsigned long ar_int_mac_mask;
183 +static unsigned long ar_int_phy_mask;
184 +
185 +#ifndef ERR
186 +#define ERR(fmt, args...) printk("%s: " fmt, __func__, ##args)
187 +#endif
188 +
189 +
190 +int __init ar2313_probe(struct platform_device *pdev)
191 +{
192 + struct net_device *dev;
193 + struct ar2313_private *sp;
194 + struct ar531x_eth *cfg;
195 + struct resource *res;
196 + int version_disp;
197 + char name[64] ;
198 +
199 + if (probed)
200 + return -ENODEV;
201 + probed++;
202 +
203 + version_disp = 0;
204 + sprintf(name, "%s%%d", ifname) ;
205 + dev = alloc_etherdev(sizeof(struct ar2313_private));
206 +
207 + if (dev == NULL) {
208 + printk(KERN_ERR "ar2313: Unable to allocate net_device structure!\n");
209 + return -ENOMEM;
210 + }
211 +
212 + SET_MODULE_OWNER(dev);
213 + platform_set_drvdata(pdev, dev);
214 +
215 + sp = dev->priv;
216 + sp->dev = dev;
217 + cfg = pdev->dev.platform_data;
218 +
219 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "eth_membase");
220 + if (!res)
221 + return -ENODEV;
222 +
223 + sp->link = 0;
224 + ar_eth_base = res->start;
225 + ar_dma_base = ar_eth_base + 0x1000;
226 + ar_int_base = cfg->reset_base;
227 + ar_int_mac_mask = cfg->reset_mac;
228 + ar_int_phy_mask = cfg->reset_phy;
229 + sp->phy = cfg->phy;
230 +
231 + dev->irq = platform_get_irq_byname(pdev, "eth_irq");
232 +
233 + spin_lock_init(&sp->lock);
234 +
235 + /* initialize func pointers */
236 + dev->open = &ar2313_open;
237 + dev->stop = &ar2313_close;
238 + dev->hard_start_xmit = &ar2313_start_xmit;
239 +
240 + dev->get_stats = &ar2313_get_stats;
241 + dev->set_multicast_list = &ar2313_multicast_list;
242 +#ifdef TX_TIMEOUT
243 + dev->tx_timeout = ar2313_tx_timeout;
244 + dev->watchdog_timeo = AR2313_TX_TIMEOUT;
245 +#endif
246 + dev->do_ioctl = &ar2313_ioctl;
247 +
248 + // SAMEER: do we need this?
249 + dev->features |= NETIF_F_SG | NETIF_F_HIGHDMA;
250 +
251 + tasklet_init(&sp->rx_tasklet, rx_tasklet_func, (unsigned long) dev);
252 + tasklet_disable(&sp->rx_tasklet);
253 +
254 + /* display version info if adapter is found */
255 + if (!version_disp) {
256 + /* set display flag to TRUE so that */
257 + /* we only display this string ONCE */
258 + version_disp = 1;
259 +#if DEBUG
260 + printk(version);
261 +#endif /* DEBUG */
262 + }
263 +
264 +#if 0
265 + request_region(PHYSADDR(ar_eth_base), ETHERNET_SIZE*ETHERNET_MACS,
266 + "AR2313ENET");
267 +#endif
268 +
269 + sp->eth_regs = ioremap_nocache(PHYSADDR(ar_eth_base), sizeof(*sp->eth_regs));
270 + if (!sp->eth_regs) {
271 + printk("Can't remap eth registers\n");
272 + return(-ENXIO);
273 + }
274 +
275 + sp->dma_regs = ioremap_nocache(PHYSADDR(ar_eth_base + 0x1000), sizeof(*sp->dma_regs));
276 + dev->base_addr = (unsigned int) sp->dma_regs;
277 + if (!sp->dma_regs) {
278 + printk("Can't remap DMA registers\n");
279 + return(-ENXIO);
280 + }
281 +
282 + sp->int_regs = ioremap_nocache(PHYSADDR(ar_int_base), 4);
283 + if (!sp->int_regs) {
284 + printk("Can't remap INTERRUPT registers\n");
285 + return(-ENXIO);
286 + }
287 +
288 + strncpy(sp->name, "Atheros AR2313", sizeof (sp->name) - 1);
289 + sp->name [sizeof (sp->name) - 1] = '\0';
290 +
291 + {
292 + /* XXX: Will have to rewrite this part later */
293 + char *configstart;
294 + unsigned char def_mac[6] = {0, 0xaa, 0xbb, 0xcc, 0xdd, 0xee};
295 +
296 + configstart = (char *) cfg->board_config;
297 +
298 + if (!configstart) {
299 + printk("no valid mac found, using defaults");
300 + memcpy(dev->dev_addr, def_mac, 6);
301 + } else {
302 + memcpy(dev->dev_addr, ((u8 *)configstart)+102, 6);
303 + }
304 + }
305 +
306 + sp->board_idx = BOARD_IDX_STATIC;
307 +
308 + if (ar2313_init(dev)) {
309 + /*
310 + * ar2313_init() calls ar2313_init_cleanup() on error.
311 + */
312 + kfree(dev);
313 + return -ENODEV;
314 + }
315 +
316 + if (register_netdev(dev)){
317 + printk("%s: register_netdev failed\n", __func__);
318 + return -1;
319 + }
320 +
321 + printk("%s: %s: %02x:%02x:%02x:%02x:%02x:%02x, irq %d\n",
322 + dev->name, sp->name,
323 + dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
324 + dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5],
325 + dev->irq);
326 +
327 + /* start link poll timer */
328 + ar2313_setup_timer(dev);
329 +
330 + return 0;
331 +}
332 +
333 +#if 0
334 +static void ar2313_dump_regs(struct net_device *dev)
335 +{
336 + unsigned int *ptr, i;
337 + struct ar2313_private *sp = (struct ar2313_private *)dev->priv;
338 +
339 + ptr = (unsigned int *)sp->eth_regs;
340 + for(i=0; i< (sizeof(ETHERNET_STRUCT)/ sizeof(unsigned int)); i++, ptr++) {
341 + printk("ENET: %08x = %08x\n", (int)ptr, *ptr);
342 + }
343 +
344 + ptr = (unsigned int *)sp->dma_regs;
345 + for(i=0; i< (sizeof(DMA)/ sizeof(unsigned int)); i++, ptr++) {
346 + printk("DMA: %08x = %08x\n", (int)ptr, *ptr);
347 + }
348 +
349 + ptr = (unsigned int *)sp->int_regs;
350 + for(i=0; i< (sizeof(INTERRUPT)/ sizeof(unsigned int)); i++, ptr++){
351 + printk("INT: %08x = %08x\n", (int)ptr, *ptr);
352 + }
353 +
354 + for (i = 0; i < AR2313_DESCR_ENTRIES; i++) {
355 + ar2313_descr_t *td = &sp->tx_ring[i];
356 + printk("Tx desc %2d: %08x %08x %08x %08x\n", i,
357 + td->status, td->devcs, td->addr, td->descr);
358 + }
359 +}
360 +#endif
361 +
362 +#ifdef TX_TIMEOUT
363 +static void
364 +ar2313_tx_timeout(struct net_device *dev)
365 +{
366 + struct ar2313_private *sp = (struct ar2313_private *)dev->priv;
367 + unsigned long flags;
368 +
369 +#if DEBUG_TX
370 + printk("Tx timeout\n");
371 +#endif
372 + spin_lock_irqsave(&sp->lock, flags);
373 + ar2313_restart(dev);
374 + spin_unlock_irqrestore(&sp->lock, flags);
375 +}
376 +#endif
377 +
378 +#if DEBUG_MC
379 +static void
380 +printMcList(struct net_device *dev)
381 +{
382 + struct dev_mc_list *list = dev->mc_list;
383 + int num=0, i;
384 + while(list){
385 + printk("%d MC ADDR ", num);
386 + for(i=0;i<list->dmi_addrlen;i++) {
387 + printk(":%02x", list->dmi_addr[i]);
388 + }
389 + list = list->next;
390 + printk("\n");
391 + }
392 +}
393 +#endif
394 +
395 +/*
396 + * Set or clear the multicast filter for this adaptor.
397 + * THIS IS ABSOLUTE CRAP, disabled
398 + */
399 +static void
400 +ar2313_multicast_list(struct net_device *dev)
401 +{
402 + /*
403 + * Always listen to broadcasts and
404 + * treat IFF bits independently
405 + */
406 + struct ar2313_private *sp = (struct ar2313_private *)dev->priv;
407 + unsigned int recognise;
408 +
409 + recognise = sp->eth_regs->mac_control;
410 +
411 + if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
412 + recognise |= MAC_CONTROL_PR;
413 + } else {
414 + recognise &= ~MAC_CONTROL_PR;
415 + }
416 +
417 + if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 15)) {
418 +#if DEBUG_MC
419 + printMcList(dev);
420 + printk("%s: all MULTICAST mc_count %d\n", __FUNCTION__, dev->mc_count);
421 +#endif
422 + recognise |= MAC_CONTROL_PM;/* all multicast */
423 + } else if (dev->mc_count > 0) {
424 +#if DEBUG_MC
425 + printMcList(dev);
426 + printk("%s: mc_count %d\n", __FUNCTION__, dev->mc_count);
427 +#endif
428 + recognise |= MAC_CONTROL_PM; /* for the time being */
429 + }
430 +#if DEBUG_MC
431 + printk("%s: setting %08x to %08x\n", __FUNCTION__, (int)sp->eth_regs, recognise);
432 +#endif
433 +
434 + sp->eth_regs->mac_control = recognise;
435 +}
436 +
437 +static void rx_tasklet_cleanup(struct net_device *dev)
438 +{
439 + struct ar2313_private *sp = dev->priv;
440 +
441 + /*
442 + * Tasklet may be scheduled. Need to get it removed from the list
443 + * since we're about to free the struct.
444 + */
445 +
446 + sp->unloading = 1;
447 + tasklet_enable(&sp->rx_tasklet);
448 + tasklet_kill(&sp->rx_tasklet);
449 +}
450 +
451 +static int __exit ar2313_remove(struct platform_device *pdev)
452 +{
453 + struct net_device *dev = platform_get_drvdata(pdev);
454 + rx_tasklet_cleanup(dev);
455 + ar2313_init_cleanup(dev);
456 + unregister_netdev(dev);
457 + kfree(dev);
458 + return 0;
459 +}
460 +
461 +
462 +/*
463 + * Restart the AR2313 ethernet controller.
464 + */
465 +static int ar2313_restart(struct net_device *dev)
466 +{
467 + /* disable interrupts */
468 + disable_irq(dev->irq);
469 +
470 + /* stop mac */
471 + ar2313_halt(dev);
472 +
473 + /* initialize */
474 + ar2313_init(dev);
475 +
476 + /* enable interrupts */
477 + enable_irq(dev->irq);
478 +
479 + return 0;
480 +}
481 +
482 +static struct platform_driver ar2313_driver = {
483 + .driver.name = "ar531x-eth",
484 + .probe = ar2313_probe,
485 + .remove = ar2313_remove,
486 +};
487 +
488 +int __init ar2313_module_init(void)
489 +{
490 + return platform_driver_register(&ar2313_driver);
491 +}
492 +
493 +void __exit ar2313_module_cleanup(void)
494 +{
495 + platform_driver_unregister(&ar2313_driver);
496 +}
497 +
498 +module_init(ar2313_module_init);
499 +module_exit(ar2313_module_cleanup);
500 +
501 +
502 +static void ar2313_free_descriptors(struct net_device *dev)
503 +{
504 + struct ar2313_private *sp = dev->priv;
505 + if (sp->rx_ring != NULL) {
506 + kfree((void*)KSEG0ADDR(sp->rx_ring));
507 + sp->rx_ring = NULL;
508 + sp->tx_ring = NULL;
509 + }
510 +}
511 +
512 +
513 +static int ar2313_allocate_descriptors(struct net_device *dev)
514 +{
515 + struct ar2313_private *sp = dev->priv;
516 + int size;
517 + int j;
518 + ar2313_descr_t *space;
519 +
520 + if(sp->rx_ring != NULL){
521 + printk("%s: already done.\n", __FUNCTION__);
522 + return 0;
523 + }
524 +
525 + size = (sizeof(ar2313_descr_t) * (AR2313_DESCR_ENTRIES * AR2313_QUEUES));
526 + space = kmalloc(size, GFP_KERNEL);
527 + if (space == NULL)
528 + return 1;
529 +
530 + /* invalidate caches */
531 + dma_cache_inv((unsigned int)space, size);
532 +
533 + /* now convert pointer to KSEG1 */
534 + space = (ar2313_descr_t *)KSEG1ADDR(space);
535 +
536 + memset((void *)space, 0, size);
537 +
538 + sp->rx_ring = space;
539 + space += AR2313_DESCR_ENTRIES;
540 +
541 + sp->tx_ring = space;
542 + space += AR2313_DESCR_ENTRIES;
543 +
544 + /* Initialize the transmit Descriptors */
545 + for (j = 0; j < AR2313_DESCR_ENTRIES; j++) {
546 + ar2313_descr_t *td = &sp->tx_ring[j];
547 + td->status = 0;
548 + td->devcs = DMA_TX1_CHAINED;
549 + td->addr = 0;
550 + td->descr = K1_TO_PHYS(&sp->tx_ring[(j+1) & (AR2313_DESCR_ENTRIES-1)]);
551 + }
552 +
553 + return 0;
554 +}
555 +
556 +
557 +/*
558 + * Generic cleanup handling data allocated during init. Used when the
559 + * module is unloaded or if an error occurs during initialization
560 + */
561 +static void ar2313_init_cleanup(struct net_device *dev)
562 +{
563 + struct ar2313_private *sp = dev->priv;
564 + struct sk_buff *skb;
565 + int j;
566 +
567 + ar2313_free_descriptors(dev);
568 +
569 + if (sp->eth_regs) iounmap((void*)sp->eth_regs);
570 + if (sp->dma_regs) iounmap((void*)sp->dma_regs);
571 +
572 + if (sp->rx_skb) {
573 + for (j = 0; j < AR2313_DESCR_ENTRIES; j++) {
574 + skb = sp->rx_skb[j];
575 + if (skb) {
576 + sp->rx_skb[j] = NULL;
577 + dev_kfree_skb(skb);
578 + }
579 + }
580 + kfree(sp->rx_skb);
581 + sp->rx_skb = NULL;
582 + }
583 +
584 + if (sp->tx_skb) {
585 + for (j = 0; j < AR2313_DESCR_ENTRIES; j++) {
586 + skb = sp->tx_skb[j];
587 + if (skb) {
588 + sp->tx_skb[j] = NULL;
589 + dev_kfree_skb(skb);
590 + }
591 + }
592 + kfree(sp->tx_skb);
593 + sp->tx_skb = NULL;
594 + }
595 +}
596 +
597 +static int ar2313_setup_timer(struct net_device *dev)
598 +{
599 + struct ar2313_private *sp = dev->priv;
600 +
601 + init_timer(&sp->link_timer);
602 +
603 + sp->link_timer.function = ar2313_link_timer_fn;
604 + sp->link_timer.data = (int) dev;
605 + sp->link_timer.expires = jiffies + HZ;
606 +
607 + add_timer(&sp->link_timer);
608 + return 0;
609 +
610 +}
611 +
612 +static void ar2313_link_timer_fn(unsigned long data)
613 +{
614 + struct net_device *dev = (struct net_device *) data;
615 + struct ar2313_private *sp = dev->priv;
616 +
617 + // see if the link status changed
618 + // This was needed to make sure we set the PHY to the
619 + // autonegotiated value of half or full duplex.
620 + ar2313_check_link(dev);
621 +
622 + // Loop faster when we don't have link.
623 + // This was needed to speed up the AP bootstrap time.
624 + if(sp->link == 0) {
625 + mod_timer(&sp->link_timer, jiffies + HZ/2);
626 + } else {
627 + mod_timer(&sp->link_timer, jiffies + LINK_TIMER);
628 + }
629 +}
630 +
631 +static void ar2313_check_link(struct net_device *dev)
632 +{
633 + struct ar2313_private *sp = dev->priv;
634 + u16 phyData;
635 +
636 + phyData = armiiread(sp->phy, MII_BMSR);
637 + if (sp->phyData != phyData) {
638 + if (phyData & BMSR_LSTATUS) {
639 + /* link is present, ready link partner ability to deterine duplexity */
640 + int duplex = 0;
641 + u16 reg;
642 +
643 + sp->link = 1;
644 + reg = armiiread(sp->phy, MII_BMCR);
645 + if (reg & BMCR_ANENABLE) {
646 + /* auto neg enabled */
647 + reg = armiiread(sp->phy, MII_LPA);
648 + duplex = (reg & (LPA_100FULL|LPA_10FULL))? 1:0;
649 + } else {
650 + /* no auto neg, just read duplex config */
651 + duplex = (reg & BMCR_FULLDPLX)? 1:0;
652 + }
653 +
654 + printk(KERN_INFO "%s: Configuring MAC for %s duplex\n", dev->name,
655 + (duplex)? "full":"half");
656 +
657 + if (duplex) {
658 + /* full duplex */
659 + sp->eth_regs->mac_control = ((sp->eth_regs->mac_control | MAC_CONTROL_F) &
660 + ~MAC_CONTROL_DRO);
661 + } else {
662 + /* half duplex */
663 + sp->eth_regs->mac_control = ((sp->eth_regs->mac_control | MAC_CONTROL_DRO) &
664 + ~MAC_CONTROL_F);
665 + }
666 + } else {
667 + /* no link */
668 + sp->link = 0;
669 + }
670 + sp->phyData = phyData;
671 + }
672 +}
673 +
674 +static int
675 +ar2313_reset_reg(struct net_device *dev)
676 +{
677 + struct ar2313_private *sp = (struct ar2313_private *)dev->priv;
678 + unsigned int ethsal, ethsah;
679 + unsigned int flags;
680 +
681 + *sp->int_regs |= ar_int_mac_mask;
682 + mdelay(10);
683 + *sp->int_regs &= ~ar_int_mac_mask;
684 + mdelay(10);
685 + *sp->int_regs |= ar_int_phy_mask;
686 + mdelay(10);
687 + *sp->int_regs &= ~ar_int_phy_mask;
688 + mdelay(10);
689 +
690 + sp->dma_regs->bus_mode = (DMA_BUS_MODE_SWR);
691 + mdelay(10);
692 + sp->dma_regs->bus_mode = ((32 << DMA_BUS_MODE_PBL_SHIFT) | DMA_BUS_MODE_BLE);
693 +
694 + /* enable interrupts */
695 + sp->dma_regs->intr_ena = (DMA_STATUS_AIS |
696 + DMA_STATUS_NIS |
697 + DMA_STATUS_RI |
698 + DMA_STATUS_TI |
699 + DMA_STATUS_FBE);
700 + sp->dma_regs->xmt_base = K1_TO_PHYS(sp->tx_ring);
701 + sp->dma_regs->rcv_base = K1_TO_PHYS(sp->rx_ring);
702 + sp->dma_regs->control = (DMA_CONTROL_SR | DMA_CONTROL_ST | DMA_CONTROL_SF);
703 +
704 + sp->eth_regs->flow_control = (FLOW_CONTROL_FCE);
705 + sp->eth_regs->vlan_tag = (0x8100);
706 +
707 + /* Enable Ethernet Interface */
708 + flags = (MAC_CONTROL_TE | /* transmit enable */
709 + MAC_CONTROL_PM | /* pass mcast */
710 + MAC_CONTROL_F | /* full duplex */
711 + MAC_CONTROL_HBD); /* heart beat disabled */
712 +
713 + if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
714 + flags |= MAC_CONTROL_PR;
715 + }
716 + sp->eth_regs->mac_control = flags;
717 +
718 + /* Set all Ethernet station address registers to their initial values */
719 + ethsah = ((((u_int)(dev->dev_addr[5]) << 8) & (u_int)0x0000FF00) |
720 + (((u_int)(dev->dev_addr[4]) << 0) & (u_int)0x000000FF));
721 +
722 + ethsal = ((((u_int)(dev->dev_addr[3]) << 24) & (u_int)0xFF000000) |
723 + (((u_int)(dev->dev_addr[2]) << 16) & (u_int)0x00FF0000) |
724 + (((u_int)(dev->dev_addr[1]) << 8) & (u_int)0x0000FF00) |
725 + (((u_int)(dev->dev_addr[0]) << 0) & (u_int)0x000000FF) );
726 +
727 + sp->eth_regs->mac_addr[0] = ethsah;
728 + sp->eth_regs->mac_addr[1] = ethsal;
729 +
730 + mdelay(10);
731 +
732 + return(0);
733 +}
734 +
735 +
736 +static int ar2313_init(struct net_device *dev)
737 +{
738 + struct ar2313_private *sp = dev->priv;
739 + int ecode=0;
740 +
741 + /*
742 + * Allocate descriptors
743 + */
744 + if (ar2313_allocate_descriptors(dev)) {
745 + printk("%s: %s: ar2313_allocate_descriptors failed\n",
746 + dev->name, __FUNCTION__);
747 + ecode = -EAGAIN;
748 + goto init_error;
749 + }
750 +
751 + /*
752 + * Get the memory for the skb rings.
753 + */
754 + if(sp->rx_skb == NULL) {
755 + sp->rx_skb = kmalloc(sizeof(struct sk_buff *) * AR2313_DESCR_ENTRIES, GFP_KERNEL);
756 + if (!(sp->rx_skb)) {
757 + printk("%s: %s: rx_skb kmalloc failed\n",
758 + dev->name, __FUNCTION__);
759 + ecode = -EAGAIN;
760 + goto init_error;
761 + }
762 + }
763 + memset(sp->rx_skb, 0, sizeof(struct sk_buff *) * AR2313_DESCR_ENTRIES);
764 +
765 + if(sp->tx_skb == NULL) {
766 + sp->tx_skb = kmalloc(sizeof(struct sk_buff *) * AR2313_DESCR_ENTRIES, GFP_KERNEL);
767 + if (!(sp->tx_skb)) {
768 + printk("%s: %s: tx_skb kmalloc failed\n",
769 + dev->name, __FUNCTION__);
770 + ecode = -EAGAIN;
771 + goto init_error;
772 + }
773 + }
774 + memset(sp->tx_skb, 0, sizeof(struct sk_buff *) * AR2313_DESCR_ENTRIES);
775 +
776 + /*
777 + * Set tx_csm before we start receiving interrupts, otherwise
778 + * the interrupt handler might think it is supposed to process
779 + * tx ints before we are up and running, which may cause a null
780 + * pointer access in the int handler.
781 + */
782 + sp->rx_skbprd = 0;
783 + sp->cur_rx = 0;
784 + sp->tx_prd = 0;
785 + sp->tx_csm = 0;
786 +
787 + /*
788 + * Zero the stats before starting the interface
789 + */
790 + memset(&sp->stats, 0, sizeof(sp->stats));
791 +
792 + /*
793 + * We load the ring here as there seem to be no way to tell the
794 + * firmware to wipe the ring without re-initializing it.
795 + */
796 + ar2313_load_rx_ring(dev, RX_RING_SIZE);
797 +
798 + /*
799 + * Init hardware
800 + */
801 + ar2313_reset_reg(dev);
802 +
803 + /*
804 + * Get the IRQ
805 + */
806 + ecode = request_irq(dev->irq, &ar2313_interrupt, IRQF_SHARED | IRQF_DISABLED | IRQF_SAMPLE_RANDOM, dev->name, dev);
807 + if (ecode) {
808 + printk(KERN_WARNING "%s: %s: Requested IRQ %d is busy\n",
809 + dev->name, __FUNCTION__, dev->irq);
810 + goto init_error;
811 + }
812 +
813 +
814 + tasklet_enable(&sp->rx_tasklet);
815 +
816 + return 0;
817 +
818 + init_error:
819 + ar2313_init_cleanup(dev);
820 + return ecode;
821 +}
822 +
823 +/*
824 + * Load the rx ring.
825 + *
826 + * Loading rings is safe without holding the spin lock since this is
827 + * done only before the device is enabled, thus no interrupts are
828 + * generated and by the interrupt handler/tasklet handler.
829 + */
830 +static void ar2313_load_rx_ring(struct net_device *dev, int nr_bufs)
831 +{
832 +
833 + struct ar2313_private *sp = ((struct net_device *)dev)->priv;
834 + short i, idx;
835 +
836 + idx = sp->rx_skbprd;
837 +
838 + for (i = 0; i < nr_bufs; i++) {
839 + struct sk_buff *skb;
840 + ar2313_descr_t *rd;
841 +
842 + if (sp->rx_skb[idx]) {
843 +#if DEBUG_RX
844 + printk(KERN_INFO "ar2313 rx refill full\n");
845 +#endif /* DEBUG */
846 + break;
847 + }
848 +
849 + // partha: create additional room for the second GRE fragment
850 + skb = alloc_skb(AR2313_BUFSIZE+128, GFP_ATOMIC);
851 + if (!skb) {
852 + printk("\n\n\n\n %s: No memory in system\n\n\n\n", __FUNCTION__);
853 + break;
854 + }
855 + // partha: create additional room in the front for tx pkt capture
856 + skb_reserve(skb, 32);
857 +
858 + /*
859 + * Make sure IP header starts on a fresh cache line.
860 + */
861 + skb->dev = dev;
862 + skb_reserve(skb, RX_OFFSET);
863 + sp->rx_skb[idx] = skb;
864 +
865 + rd = (ar2313_descr_t *) &sp->rx_ring[idx];
866 +
867 + /* initialize dma descriptor */
868 + rd->devcs = ((AR2313_BUFSIZE << DMA_RX1_BSIZE_SHIFT) |
869 + DMA_RX1_CHAINED);
870 + rd->addr = virt_to_phys(skb->data);
871 + rd->descr = virt_to_phys(&sp->rx_ring[(idx+1) & (AR2313_DESCR_ENTRIES-1)]);
872 + rd->status = DMA_RX_OWN;
873 +
874 + idx = DSC_NEXT(idx);
875 + }
876 +
877 + if (!i) {
878 +#if DEBUG_ERR
879 + printk(KERN_INFO "Out of memory when allocating standard receive buffers\n");
880 +#endif /* DEBUG */
881 + } else {
882 + sp->rx_skbprd = idx;
883 + }
884 +
885 + return;
886 +}
887 +
888 +#define AR2313_MAX_PKTS_PER_CALL 64
889 +
890 +static int ar2313_rx_int(struct net_device *dev)
891 +{
892 + struct ar2313_private *sp = dev->priv;
893 + struct sk_buff *skb, *skb_new;
894 + ar2313_descr_t *rxdesc;
895 + unsigned int status;
896 + u32 idx;
897 + int pkts = 0;
898 + int rval;
899 +
900 + idx = sp->cur_rx;
901 +
902 + /* process at most the entire ring and then wait for another interrupt */
903 + while(1) {
904 +
905 + rxdesc = &sp->rx_ring[idx];
906 + status = rxdesc->status;
907 + if (status & DMA_RX_OWN) {
908 + /* SiByte owns descriptor or descr not yet filled in */
909 + rval = 0;
910 + break;
911 + }
912 +
913 + if (++pkts > AR2313_MAX_PKTS_PER_CALL) {
914 + rval = 1;
915 + break;
916 + }
917 +
918 +#if DEBUG_RX
919 + printk("index %d\n", idx);
920 + printk("RX status %08x\n", rxdesc->status);
921 + printk("RX devcs %08x\n", rxdesc->devcs );
922 + printk("RX addr %08x\n", rxdesc->addr );
923 + printk("RX descr %08x\n", rxdesc->descr );
924 +#endif
925 +
926 + if ((status & (DMA_RX_ERROR|DMA_RX_ERR_LENGTH)) &&
927 + (!(status & DMA_RX_LONG))){
928 +#if DEBUG_RX
929 + printk("%s: rx ERROR %08x\n", __FUNCTION__, status);
930 +#endif
931 + sp->stats.rx_errors++;
932 + sp->stats.rx_dropped++;
933 +
934 + /* add statistics counters */
935 + if (status & DMA_RX_ERR_CRC) sp->stats.rx_crc_errors++;
936 + if (status & DMA_RX_ERR_COL) sp->stats.rx_over_errors++;
937 + if (status & DMA_RX_ERR_LENGTH)
938 + sp->stats.rx_length_errors++;
939 + if (status & DMA_RX_ERR_RUNT) sp->stats.rx_over_errors++;
940 + if (status & DMA_RX_ERR_DESC) sp->stats.rx_over_errors++;
941 +
942 + } else {
943 + /* alloc new buffer. */
944 + skb_new = dev_alloc_skb(AR2313_BUFSIZE + RX_OFFSET + 128);
945 + if (skb_new != NULL) {
946 +
947 + skb = sp->rx_skb[idx];
948 + /* set skb */
949 + skb_put(skb, ((status >> DMA_RX_LEN_SHIFT) & 0x3fff) - CRC_LEN);
950 +
951 +#ifdef CONFIG_MERLOT
952 + if ((dev->am_pkt_handler == NULL) ||
953 + (dev->am_pkt_handler(skb, dev) == 0)) {
954 +#endif
955 + sp->stats.rx_bytes += skb->len;
956 + skb->protocol = eth_type_trans(skb, dev);
957 + /* pass the packet to upper layers */
958 +
959 +#ifdef CONFIG_MERLOT
960 + if (dev->asap_netif_rx)
961 + dev->asap_netif_rx(skb);
962 + else
963 +#endif
964 + netif_rx(skb);
965 +#ifdef CONFIG_MERLOT
966 + }
967 +#endif
968 + skb_new->dev = dev;
969 + /* 16 bit align */
970 + skb_reserve(skb_new, RX_OFFSET+32);
971 + /* reset descriptor's curr_addr */
972 + rxdesc->addr = virt_to_phys(skb_new->data);
973 +
974 + sp->stats.rx_packets++;
975 + sp->rx_skb[idx] = skb_new;
976 +
977 + } else {
978 + sp->stats.rx_dropped++;
979 + }
980 + }
981 +
982 + rxdesc->devcs = ((AR2313_BUFSIZE << DMA_RX1_BSIZE_SHIFT) |
983 + DMA_RX1_CHAINED);
984 + rxdesc->status = DMA_RX_OWN;
985 +
986 + idx = DSC_NEXT(idx);
987 + }
988 +
989 + sp->cur_rx = idx;
990 +
991 + return rval;
992 +}
993 +
994 +
995 +static void ar2313_tx_int(struct net_device *dev)
996 +{
997 + struct ar2313_private *sp = dev->priv;
998 + u32 idx;
999 + struct sk_buff *skb;
1000 + ar2313_descr_t *txdesc;
1001 + unsigned int status=0;
1002 +
1003 + idx = sp->tx_csm;
1004 +
1005 + while (idx != sp->tx_prd) {
1006 +
1007 + txdesc = &sp->tx_ring[idx];
1008 +
1009 +#if DEBUG_TX
1010 + printk("%s: TXINT: csm=%d idx=%d prd=%d status=%x devcs=%x addr=%08x descr=%x\n",
1011 + dev->name, sp->tx_csm, idx, sp->tx_prd,
1012 + txdesc->status, txdesc->devcs, txdesc->addr, txdesc->descr);
1013 +#endif /* DEBUG */
1014 +
1015 + if ((status = txdesc->status) & DMA_TX_OWN) {
1016 + /* ar2313 dma still owns descr */
1017 + break;
1018 + }
1019 + /* done with this descriptor */
1020 + dma_unmap_single(NULL, txdesc->addr, txdesc->devcs & DMA_TX1_BSIZE_MASK, DMA_TO_DEVICE);
1021 + txdesc->status = 0;
1022 +
1023 + if (status & DMA_TX_ERROR){
1024 + sp->stats.tx_errors++;
1025 + sp->stats.tx_dropped++;
1026 + if(status & DMA_TX_ERR_UNDER)
1027 + sp->stats.tx_fifo_errors++;
1028 + if(status & DMA_TX_ERR_HB)
1029 + sp->stats.tx_heartbeat_errors++;
1030 + if(status & (DMA_TX_ERR_LOSS |
1031 + DMA_TX_ERR_LINK))
1032 + sp->stats.tx_carrier_errors++;
1033 + if (status & (DMA_TX_ERR_LATE|
1034 + DMA_TX_ERR_COL |
1035 + DMA_TX_ERR_JABBER |
1036 + DMA_TX_ERR_DEFER))
1037 + sp->stats.tx_aborted_errors++;
1038 + } else {
1039 + /* transmit OK */
1040 + sp->stats.tx_packets++;
1041 + }
1042 +
1043 + skb = sp->tx_skb[idx];
1044 + sp->tx_skb[idx] = NULL;
1045 + idx = DSC_NEXT(idx);
1046 + sp->stats.tx_bytes += skb->len;
1047 + dev_kfree_skb_irq(skb);
1048 + }
1049 +
1050 + sp->tx_csm = idx;
1051 +
1052 + return;
1053 +}
1054 +
1055 +
1056 +static void
1057 +rx_tasklet_func(unsigned long data)
1058 +{
1059 + struct net_device *dev = (struct net_device *) data;
1060 + struct ar2313_private *sp = dev->priv;
1061 +
1062 + if (sp->unloading) {
1063 + return;
1064 + }
1065 +
1066 + if (ar2313_rx_int(dev)) {
1067 + tasklet_hi_schedule(&sp->rx_tasklet);
1068 + }
1069 + else {
1070 + unsigned long flags;
1071 + spin_lock_irqsave(&sp->lock, flags);
1072 + sp->dma_regs->intr_ena |= DMA_STATUS_RI;
1073 + spin_unlock_irqrestore(&sp->lock, flags);
1074 + }
1075 +}
1076 +
1077 +static void
1078 +rx_schedule(struct net_device *dev)
1079 +{
1080 + struct ar2313_private *sp = dev->priv;
1081 +
1082 + sp->dma_regs->intr_ena &= ~DMA_STATUS_RI;
1083 +
1084 + tasklet_hi_schedule(&sp->rx_tasklet);
1085 +}
1086 +
1087 +static irqreturn_t ar2313_interrupt(int irq, void *dev_id)
1088 +{
1089 + struct net_device *dev = (struct net_device *)dev_id;
1090 + struct ar2313_private *sp = dev->priv;
1091 + unsigned int status, enabled;
1092 +
1093 + /* clear interrupt */
1094 + /*
1095 + * Don't clear RI bit if currently disabled.
1096 + */
1097 + status = sp->dma_regs->status;
1098 + enabled = sp->dma_regs->intr_ena;
1099 + sp->dma_regs->status = status & enabled;
1100 +
1101 + if (status & DMA_STATUS_NIS) {
1102 + /* normal status */
1103 + /*
1104 + * Don't schedule rx processing if interrupt
1105 + * is already disabled.
1106 + */
1107 + if (status & enabled & DMA_STATUS_RI) {
1108 + /* receive interrupt */
1109 + rx_schedule(dev);
1110 + }
1111 + if (status & DMA_STATUS_TI) {
1112 + /* transmit interrupt */
1113 + ar2313_tx_int(dev);
1114 + }
1115 + }
1116 +
1117 + if (status & DMA_STATUS_AIS) {
1118 +#if DEBUG_INT
1119 + printk("%s: AIS set %08x & %x\n", __FUNCTION__,
1120 + status, (DMA_STATUS_FBE | DMA_STATUS_TPS));
1121 +#endif
1122 + /* abnormal status */
1123 + if (status & (DMA_STATUS_FBE | DMA_STATUS_TPS)) {
1124 + ar2313_restart(dev);
1125 + }
1126 + }
1127 + return IRQ_HANDLED;
1128 +}
1129 +
1130 +
1131 +static int ar2313_open(struct net_device *dev)
1132 +{
1133 + struct ar2313_private *sp;
1134 +
1135 + sp = dev->priv;
1136 +
1137 + dev->mtu = 1500;
1138 + netif_start_queue(dev);
1139 +
1140 + sp->eth_regs->mac_control |= MAC_CONTROL_RE;
1141 +
1142 + AR2313_MOD_INC_USE_COUNT;
1143 +
1144 + return 0;
1145 +}
1146 +
1147 +static void ar2313_halt(struct net_device *dev)
1148 +{
1149 + struct ar2313_private *sp = dev->priv;
1150 + int j;
1151 +
1152 + tasklet_disable(&sp->rx_tasklet);
1153 +
1154 + /* kill the MAC */
1155 + sp->eth_regs->mac_control &= ~(MAC_CONTROL_RE | /* disable Receives */
1156 + MAC_CONTROL_TE); /* disable Transmits */
1157 + /* stop dma */
1158 + sp->dma_regs->control = 0;
1159 + sp->dma_regs->bus_mode = DMA_BUS_MODE_SWR;
1160 +
1161 + /* place phy and MAC in reset */
1162 + *sp->int_regs |= (ar_int_mac_mask | ar_int_phy_mask);
1163 +
1164 + /* free buffers on tx ring */
1165 + for (j = 0; j < AR2313_DESCR_ENTRIES; j++) {
1166 + struct sk_buff *skb;
1167 + ar2313_descr_t *txdesc;
1168 +
1169 + txdesc = &sp->tx_ring[j];
1170 + txdesc->descr = 0;
1171 +
1172 + skb = sp->tx_skb[j];
1173 + if (skb) {
1174 + dev_kfree_skb(skb);
1175 + sp->tx_skb[j] = NULL;
1176 + }
1177 + }
1178 +}
1179 +
1180 +/*
1181 + * close should do nothing. Here's why. It's called when
1182 + * 'ifconfig bond0 down' is run. If it calls free_irq then
1183 + * the irq is gone forever ! When bond0 is made 'up' again,
1184 + * the ar2313_open () does not call request_irq (). Worse,
1185 + * the call to ar2313_halt() generates a WDOG reset due to
1186 + * the write to 'sp->int_regs' and the box reboots.
1187 + * Commenting this out is good since it allows the
1188 + * system to resume when bond0 is made up again.
1189 + */
1190 +static int ar2313_close(struct net_device *dev)
1191 +{
1192 +#if 0
1193 + /*
1194 + * Disable interrupts
1195 + */
1196 + disable_irq(dev->irq);
1197 +
1198 + /*
1199 + * Without (or before) releasing irq and stopping hardware, this
1200 + * is an absolute non-sense, by the way. It will be reset instantly
1201 + * by the first irq.
1202 + */
1203 + netif_stop_queue(dev);
1204 +
1205 + /* stop the MAC and DMA engines */
1206 + ar2313_halt(dev);
1207 +
1208 + /* release the interrupt */
1209 + free_irq(dev->irq, dev);
1210 +
1211 +#endif
1212 + AR2313_MOD_DEC_USE_COUNT;
1213 + return 0;
1214 +}
1215 +
1216 +static int ar2313_start_xmit(struct sk_buff *skb, struct net_device *dev)
1217 +{
1218 + struct ar2313_private *sp = dev->priv;
1219 + ar2313_descr_t *td;
1220 + u32 idx;
1221 +
1222 + idx = sp->tx_prd;
1223 + td = &sp->tx_ring[idx];
1224 +
1225 + if (td->status & DMA_TX_OWN) {
1226 +#if DEBUG_TX
1227 + printk("%s: No space left to Tx\n", __FUNCTION__);
1228 +#endif
1229 + /* free skbuf and lie to the caller that we sent it out */
1230 + sp->stats.tx_dropped++;
1231 + dev_kfree_skb(skb);
1232 +
1233 + /* restart transmitter in case locked */
1234 + sp->dma_regs->xmt_poll = 0;
1235 + return 0;
1236 + }
1237 +
1238 + /* Setup the transmit descriptor. */
1239 + td->devcs = ((skb->len << DMA_TX1_BSIZE_SHIFT) |
1240 + (DMA_TX1_LS|DMA_TX1_IC|DMA_TX1_CHAINED));
1241 + td->addr = dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE);
1242 + td->status = DMA_TX_OWN;
1243 +
1244 + /* kick transmitter last */
1245 + sp->dma_regs->xmt_poll = 0;
1246 +
1247 +#if DEBUG_TX
1248 + printk("index %d\n", idx);
1249 + printk("TX status %08x\n", td->status);
1250 + printk("TX devcs %08x\n", td->devcs );
1251 + printk("TX addr %08x\n", td->addr );
1252 + printk("TX descr %08x\n", td->descr );
1253 +#endif
1254 +
1255 + sp->tx_skb[idx] = skb;
1256 + idx = DSC_NEXT(idx);
1257 + sp->tx_prd = idx;
1258 +
1259 + //dev->trans_start = jiffies;
1260 +
1261 + return 0;
1262 +}
1263 +
1264 +static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
1265 +{
1266 + struct ar2313_private *np = dev->priv;
1267 + u32 tmp;
1268 +
1269 + ecmd->supported =
1270 + (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
1271 + SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
1272 + SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII);
1273 +
1274 + ecmd->port = PORT_TP;
1275 + /* only supports internal transceiver */
1276 + ecmd->transceiver = XCVR_INTERNAL;
1277 + /* not sure what this is for */
1278 + ecmd->phy_address = 1;
1279 +
1280 + ecmd->advertising = ADVERTISED_MII;
1281 + tmp = armiiread(np->phy, MII_ADVERTISE);
1282 + if (tmp & ADVERTISE_10HALF)
1283 + ecmd->advertising |= ADVERTISED_10baseT_Half;
1284 + if (tmp & ADVERTISE_10FULL)
1285 + ecmd->advertising |= ADVERTISED_10baseT_Full;
1286 + if (tmp & ADVERTISE_100HALF)
1287 + ecmd->advertising |= ADVERTISED_100baseT_Half;
1288 + if (tmp & ADVERTISE_100FULL)
1289 + ecmd->advertising |= ADVERTISED_100baseT_Full;
1290 +
1291 + tmp = armiiread(np->phy, MII_BMCR);
1292 + if (tmp & BMCR_ANENABLE) {
1293 + ecmd->advertising |= ADVERTISED_Autoneg;
1294 + ecmd->autoneg = AUTONEG_ENABLE;
1295 + } else {
1296 + ecmd->autoneg = AUTONEG_DISABLE;
1297 + }
1298 +
1299 + if (ecmd->autoneg == AUTONEG_ENABLE) {
1300 + tmp = armiiread(np->phy, MII_LPA);
1301 + if (tmp & (LPA_100FULL|LPA_10FULL)) {
1302 + ecmd->duplex = DUPLEX_FULL;
1303 + } else {
1304 + ecmd->duplex = DUPLEX_HALF;
1305 + }
1306 + if (tmp & (LPA_100FULL|LPA_100HALF)) {
1307 + ecmd->speed = SPEED_100;
1308 + } else {
1309 + ecmd->speed = SPEED_10;
1310 + }
1311 + } else {
1312 + if (tmp & BMCR_FULLDPLX) {
1313 + ecmd->duplex = DUPLEX_FULL;
1314 + } else {
1315 + ecmd->duplex = DUPLEX_HALF;
1316 + }
1317 + if (tmp & BMCR_SPEED100) {
1318 + ecmd->speed = SPEED_100;
1319 + } else {
1320 + ecmd->speed = SPEED_10;
1321 + }
1322 + }
1323 +
1324 + /* ignore maxtxpkt, maxrxpkt for now */
1325 +
1326 + return 0;
1327 +}
1328 +
1329 +static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
1330 +{
1331 + struct ar2313_private *np = dev->priv;
1332 + u32 tmp;
1333 +
1334 + if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
1335 + return -EINVAL;
1336 + if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
1337 + return -EINVAL;
1338 + if (ecmd->port != PORT_TP)
1339 + return -EINVAL;
1340 + if (ecmd->transceiver != XCVR_INTERNAL)
1341 + return -EINVAL;
1342 + if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
1343 + return -EINVAL;
1344 + /* ignore phy_address, maxtxpkt, maxrxpkt for now */
1345 +
1346 + /* WHEW! now lets bang some bits */
1347 +
1348 + tmp = armiiread(np->phy, MII_BMCR);
1349 + if (ecmd->autoneg == AUTONEG_ENABLE) {
1350 + /* turn on autonegotiation */
1351 + tmp |= BMCR_ANENABLE;
1352 + printk("%s: Enabling auto-neg\n", dev->name);
1353 + } else {
1354 + /* turn off auto negotiation, set speed and duplexity */
1355 + tmp &= ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX);
1356 + if (ecmd->speed == SPEED_100)
1357 + tmp |= BMCR_SPEED100;
1358 + if (ecmd->duplex == DUPLEX_FULL)
1359 + tmp |= BMCR_FULLDPLX;
1360 + printk("%s: Hard coding %d/%s\n", dev->name,
1361 + (ecmd->speed == SPEED_100)? 100:10,
1362 + (ecmd->duplex == DUPLEX_FULL)? "full":"half");
1363 + }
1364 + armiiwrite(np->phy, MII_BMCR, tmp);
1365 + np->phyData = 0;
1366 + return 0;
1367 +}
1368 +
1369 +static int netdev_ethtool_ioctl(struct net_device *dev, void *useraddr)
1370 +{
1371 + struct ar2313_private *np = dev->priv;
1372 + u32 cmd;
1373 +
1374 + if (get_user(cmd, (u32 *)useraddr))
1375 + return -EFAULT;
1376 +
1377 + switch (cmd) {
1378 + /* get settings */
1379 + case ETHTOOL_GSET: {
1380 + struct ethtool_cmd ecmd = { ETHTOOL_GSET };
1381 + spin_lock_irq(&np->lock);
1382 + netdev_get_ecmd(dev, &ecmd);
1383 + spin_unlock_irq(&np->lock);
1384 + if (copy_to_user(useraddr, &ecmd, sizeof(ecmd)))
1385 + return -EFAULT;
1386 + return 0;
1387 + }
1388 + /* set settings */
1389 + case ETHTOOL_SSET: {
1390 + struct ethtool_cmd ecmd;
1391 + int r;
1392 + if (copy_from_user(&ecmd, useraddr, sizeof(ecmd)))
1393 + return -EFAULT;
1394 + spin_lock_irq(&np->lock);
1395 + r = netdev_set_ecmd(dev, &ecmd);
1396 + spin_unlock_irq(&np->lock);
1397 + return r;
1398 + }
1399 + /* restart autonegotiation */
1400 + case ETHTOOL_NWAY_RST: {
1401 + int tmp;
1402 + int r = -EINVAL;
1403 + /* if autoneg is off, it's an error */
1404 + tmp = armiiread(np->phy, MII_BMCR);
1405 + if (tmp & BMCR_ANENABLE) {
1406 + tmp |= (BMCR_ANRESTART);
1407 + armiiwrite(np->phy, MII_BMCR, tmp);
1408 + r = 0;
1409 + }
1410 + return r;
1411 + }
1412 + /* get link status */
1413 + case ETHTOOL_GLINK: {
1414 + struct ethtool_value edata = {ETHTOOL_GLINK};
1415 + edata.data = (armiiread(np->phy, MII_BMSR)&BMSR_LSTATUS) ? 1:0;
1416 + if (copy_to_user(useraddr, &edata, sizeof(edata)))
1417 + return -EFAULT;
1418 + return 0;
1419 + }
1420 + }
1421 +
1422 + return -EOPNOTSUPP;
1423 +}
1424 +
1425 +static int ar2313_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1426 +{
1427 + struct mii_ioctl_data *data = (struct mii_ioctl_data *)&ifr->ifr_data;
1428 +
1429 + switch (cmd) {
1430 + case SIOCDEVPRIVATE: {
1431 + struct ar2313_cmd scmd;
1432 +
1433 + if (copy_from_user(&scmd, ifr->ifr_data, sizeof(scmd)))
1434 + return -EFAULT;
1435 +
1436 +#if DEBUG
1437 + printk("%s: ioctl devprivate c=%d a=%x l=%d m=%d d=%x,%x\n",
1438 + dev->name, scmd.cmd,
1439 + scmd.address, scmd.length,
1440 + scmd.mailbox, scmd.data[0], scmd.data[1]);
1441 +#endif /* DEBUG */
1442 +
1443 + switch (scmd.cmd) {
1444 + case AR2313_READ_DATA:
1445 + if(scmd.length==4){
1446 + scmd.data[0] = *((u32*)scmd.address);
1447 + } else if(scmd.length==2) {
1448 + scmd.data[0] = *((u16*)scmd.address);
1449 + } else if (scmd.length==1) {
1450 + scmd.data[0] = *((u8*)scmd.address);
1451 + } else {
1452 + return -EOPNOTSUPP;
1453 + }
1454 + if(copy_to_user(ifr->ifr_data, &scmd, sizeof(scmd)))
1455 + return -EFAULT;
1456 + break;
1457 +
1458 + case AR2313_WRITE_DATA:
1459 + if(scmd.length==4){
1460 + *((u32*)scmd.address) = scmd.data[0];
1461 + } else if(scmd.length==2) {
1462 + *((u16*)scmd.address) = scmd.data[0];
1463 + } else if (scmd.length==1) {
1464 + *((u8*)scmd.address) = scmd.data[0];
1465 + } else {
1466 + return -EOPNOTSUPP;
1467 + }
1468 + break;
1469 +
1470 + case AR2313_GET_VERSION:
1471 + // SAMEER: sprintf((char*) &scmd, "%s", ARUBA_VERSION);
1472 + if(copy_to_user(ifr->ifr_data, &scmd, sizeof(scmd)))
1473 + return -EFAULT;
1474 + break;
1475 +
1476 + default:
1477 + return -EOPNOTSUPP;
1478 + }
1479 + return 0;
1480 + }
1481 +
1482 + case SIOCETHTOOL:
1483 + return netdev_ethtool_ioctl(dev, (void *) ifr->ifr_data);
1484 +
1485 + case SIOCGMIIPHY: /* Get address of MII PHY in use. */
1486 + data->phy_id = 1;
1487 + /* Fall Through */
1488 +
1489 + case SIOCGMIIREG: /* Read MII PHY register. */
1490 + case SIOCDEVPRIVATE+1: /* for binary compat, remove in 2.5 */
1491 + data->val_out = armiiread(data->phy_id & 0x1f,
1492 + data->reg_num & 0x1f);
1493 + return 0;
1494 + case SIOCSMIIREG: /* Write MII PHY register. */
1495 + case SIOCDEVPRIVATE+2: /* for binary compat, remove in 2.5 */
1496 + if (!capable(CAP_NET_ADMIN))
1497 + return -EPERM;
1498 + armiiwrite(data->phy_id & 0x1f,
1499 + data->reg_num & 0x1f, data->val_in);
1500 + return 0;
1501 +
1502 + case SIOCSIFHWADDR:
1503 + if (copy_from_user(dev->dev_addr, ifr->ifr_data, sizeof(dev->dev_addr)))
1504 + return -EFAULT;
1505 + return 0;
1506 +
1507 + case SIOCGIFHWADDR:
1508 + if (copy_to_user(ifr->ifr_data, dev->dev_addr, sizeof(dev->dev_addr)))
1509 + return -EFAULT;
1510 + return 0;
1511 +
1512 + default:
1513 + break;
1514 + }
1515 +
1516 + return -EOPNOTSUPP;
1517 +}
1518 +
1519 +static struct net_device_stats *ar2313_get_stats(struct net_device *dev)
1520 +{
1521 + struct ar2313_private *sp = dev->priv;
1522 + return &sp->stats;
1523 +}
1524 +
1525 +static short
1526 +armiiread(short phy, short reg)
1527 +{
1528 + volatile ETHERNET_STRUCT * ethernet;
1529 +
1530 + ethernet = (volatile ETHERNET_STRUCT *)(ar_eth_base); /* always MAC 0 */
1531 + ethernet->mii_addr = ((reg << MII_ADDR_REG_SHIFT) |
1532 + (phy << MII_ADDR_PHY_SHIFT));
1533 + while (ethernet->mii_addr & MII_ADDR_BUSY);
1534 + return (ethernet->mii_data >> MII_DATA_SHIFT);
1535 +}
1536 +
1537 +static void
1538 +armiiwrite(short phy, short reg, short data)
1539 +{
1540 + volatile ETHERNET_STRUCT * ethernet;
1541 +
1542 + ethernet = (volatile ETHERNET_STRUCT *)(ar_eth_base); /* always MAC 0 */
1543 + while (ethernet->mii_addr & MII_ADDR_BUSY);
1544 + ethernet->mii_data = data << MII_DATA_SHIFT;
1545 + ethernet->mii_addr = ((reg << MII_ADDR_REG_SHIFT) |
1546 + (phy << MII_ADDR_PHY_SHIFT) |
1547 + MII_ADDR_WRITE);
1548 +}
1549 +
1550 diff -urN linux.old/drivers/net/ar2313/ar2313.h linux.eth/drivers/net/ar2313/ar2313.h
1551 --- linux.old/drivers/net/ar2313/ar2313.h 1970-01-01 01:00:00.000000000 +0100
1552 +++ linux.eth/drivers/net/ar2313/ar2313.h 2006-12-16 04:30:11.000000000 +0100
1553 @@ -0,0 +1,191 @@
1554 +#ifndef _AR2313_H_
1555 +#define _AR2313_H_
1556 +
1557 +#include <linux/autoconf.h>
1558 +#include <asm/bootinfo.h>
1559 +#include "platform.h"
1560 +
1561 +extern unsigned long mips_machtype;
1562 +
1563 +#undef ETHERNET_BASE
1564 +#define ETHERNET_BASE ar_eth_base
1565 +#define ETHERNET_SIZE 0x00100000
1566 +#define ETHERNET_MACS 2
1567 +
1568 +#undef DMA_BASE
1569 +#define DMA_BASE ar_dma_base
1570 +#define DMA_SIZE 0x00100000
1571 +
1572 +
1573 +/*
1574 + * probe link timer - 5 secs
1575 + */
1576 +#define LINK_TIMER (5*HZ)
1577 +
1578 +/*
1579 + * Interrupt register base address
1580 + */
1581 +#define INTERRUPT_BASE PHYS_TO_K1(ar_int_base)
1582 +
1583 +/*
1584 + * Reset Register
1585 + */
1586 +#define AR531X_RESET (AR531X_RESETTMR + 0x0020)
1587 +#define RESET_SYSTEM 0x00000001 /* cold reset full system */
1588 +#define RESET_PROC 0x00000002 /* cold reset MIPS core */
1589 +#define RESET_WLAN0 0x00000004 /* cold reset WLAN MAC and BB */
1590 +#define RESET_EPHY0 0x00000008 /* cold reset ENET0 phy */
1591 +#define RESET_EPHY1 0x00000010 /* cold reset ENET1 phy */
1592 +#define RESET_ENET0 0x00000020 /* cold reset ENET0 mac */
1593 +#define RESET_ENET1 0x00000040 /* cold reset ENET1 mac */
1594 +
1595 +#define IS_DMA_TX_INT(X) (((X) & (DMA_STATUS_TI)) != 0)
1596 +#define IS_DMA_RX_INT(X) (((X) & (DMA_STATUS_RI)) != 0)
1597 +#define IS_DRIVER_OWNED(X) (((X) & (DMA_TX_OWN)) == 0)
1598 +
1599 +#ifndef K1_TO_PHYS
1600 +// hack
1601 +#define K1_TO_PHYS(x) (((unsigned int)(x)) & 0x1FFFFFFF) /* kseg1 to physical */
1602 +#endif
1603 +
1604 +#ifndef PHYS_TO_K1
1605 +// hack
1606 +#define PHYS_TO_K1(x) (((unsigned int)(x)) | 0xA0000000) /* physical to kseg1 */
1607 +#endif
1608 +
1609 +#define AR2313_TX_TIMEOUT (HZ/4)
1610 +
1611 +/*
1612 + * Rings
1613 + */
1614 +#define DSC_RING_ENTRIES_SIZE (AR2313_DESCR_ENTRIES * sizeof(struct desc))
1615 +#define DSC_NEXT(idx) ((idx + 1) & (AR2313_DESCR_ENTRIES - 1))
1616 +
1617 +static inline int tx_space (u32 csm, u32 prd)
1618 +{
1619 + return (csm - prd - 1) & (AR2313_DESCR_ENTRIES - 1);
1620 +}
1621 +
1622 +#if MAX_SKB_FRAGS
1623 +#define TX_RESERVED (MAX_SKB_FRAGS+1) /* +1 for message header */
1624 +#define tx_ring_full(csm, prd) (tx_space(csm, prd) <= TX_RESERVED)
1625 +#else
1626 +#define tx_ring_full 0
1627 +#endif
1628 +
1629 +#define AR2313_MBGET 2
1630 +#define AR2313_MBSET 3
1631 +#define AR2313_PCI_RECONFIG 4
1632 +#define AR2313_PCI_DUMP 5
1633 +#define AR2313_TEST_PANIC 6
1634 +#define AR2313_TEST_NULLPTR 7
1635 +#define AR2313_READ_DATA 8
1636 +#define AR2313_WRITE_DATA 9
1637 +#define AR2313_GET_VERSION 10
1638 +#define AR2313_TEST_HANG 11
1639 +#define AR2313_SYNC 12
1640 +
1641 +
1642 +struct ar2313_cmd {
1643 + u32 cmd;
1644 + u32 address; /* virtual address of image */
1645 + u32 length; /* size of image to download */
1646 + u32 mailbox; /* mailbox to get/set */
1647 + u32 data[2]; /* contents of mailbox to read/write */
1648 +};
1649 +
1650 +
1651 +/*
1652 + * Struct private for the Sibyte.
1653 + *
1654 + * Elements are grouped so variables used by the tx handling goes
1655 + * together, and will go into the same cache lines etc. in order to
1656 + * avoid cache line contention between the rx and tx handling on SMP.
1657 + *
1658 + * Frequently accessed variables are put at the beginning of the
1659 + * struct to help the compiler generate better/shorter code.
1660 + */
1661 +struct ar2313_private
1662 +{
1663 + struct net_device *dev;
1664 + int version;
1665 + u32 mb[2];
1666 +
1667 + volatile ETHERNET_STRUCT *eth_regs;
1668 + volatile DMA *dma_regs;
1669 + volatile u32 *int_regs;
1670 +
1671 + spinlock_t lock; /* Serialise access to device */
1672 +
1673 + /*
1674 + * RX and TX descriptors, must be adjacent
1675 + */
1676 + ar2313_descr_t *rx_ring;
1677 + ar2313_descr_t *tx_ring;
1678 +
1679 +
1680 + struct sk_buff **rx_skb;
1681 + struct sk_buff **tx_skb;
1682 +
1683 + /*
1684 + * RX elements
1685 + */
1686 + u32 rx_skbprd;
1687 + u32 cur_rx;
1688 +
1689 + /*
1690 + * TX elements
1691 + */
1692 + u32 tx_prd;
1693 + u32 tx_csm;
1694 +
1695 + /*
1696 + * Misc elements
1697 + */
1698 + int board_idx;
1699 + char name[48];
1700 + struct net_device_stats stats;
1701 + struct {
1702 + u32 address;
1703 + u32 length;
1704 + char *mapping;
1705 + } desc;
1706 +
1707 +
1708 + struct timer_list link_timer;
1709 + unsigned short phy; /* merlot phy = 1, samsung phy = 0x1f */
1710 + unsigned short mac;
1711 + unsigned short link; /* 0 - link down, 1 - link up */
1712 + u16 phyData;
1713 +
1714 + struct tasklet_struct rx_tasklet;
1715 + int unloading;
1716 +};
1717 +
1718 +
1719 +/*
1720 + * Prototypes
1721 + */
1722 +static int ar2313_init(struct net_device *dev);
1723 +#ifdef TX_TIMEOUT
1724 +static void ar2313_tx_timeout(struct net_device *dev);
1725 +#endif
1726 +#if 0
1727 +static void ar2313_multicast_list(struct net_device *dev);
1728 +#endif
1729 +static int ar2313_restart(struct net_device *dev);
1730 +#if DEBUG
1731 +static void ar2313_dump_regs(struct net_device *dev);
1732 +#endif
1733 +static void ar2313_load_rx_ring(struct net_device *dev, int bufs);
1734 +static irqreturn_t ar2313_interrupt(int irq, void *dev_id);
1735 +static int ar2313_open(struct net_device *dev);
1736 +static int ar2313_start_xmit(struct sk_buff *skb, struct net_device *dev);
1737 +static int ar2313_close(struct net_device *dev);
1738 +static int ar2313_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
1739 +static void ar2313_init_cleanup(struct net_device *dev);
1740 +static int ar2313_setup_timer(struct net_device *dev);
1741 +static void ar2313_link_timer_fn(unsigned long data);
1742 +static void ar2313_check_link(struct net_device *dev);
1743 +static struct net_device_stats *ar2313_get_stats(struct net_device *dev);
1744 +#endif /* _AR2313_H_ */
1745 diff -urN linux.old/drivers/net/ar2313/ar2313_msg.h linux.eth/drivers/net/ar2313/ar2313_msg.h
1746 --- linux.old/drivers/net/ar2313/ar2313_msg.h 1970-01-01 01:00:00.000000000 +0100
1747 +++ linux.eth/drivers/net/ar2313/ar2313_msg.h 2006-12-16 04:30:11.000000000 +0100
1748 @@ -0,0 +1,17 @@
1749 +#ifndef _AR2313_MSG_H_
1750 +#define _AR2313_MSG_H_
1751 +
1752 +#define AR2313_MTU 1692
1753 +#define AR2313_PRIOS 1
1754 +#define AR2313_QUEUES (2*AR2313_PRIOS)
1755 +
1756 +#define AR2313_DESCR_ENTRIES 64
1757 +
1758 +typedef struct {
1759 + volatile unsigned int status; // OWN, Device control and status.
1760 + volatile unsigned int devcs; // pkt Control bits + Length
1761 + volatile unsigned int addr; // Current Address.
1762 + volatile unsigned int descr; // Next descriptor in chain.
1763 +} ar2313_descr_t;
1764 +
1765 +#endif /* _AR2313_MSG_H_ */
1766 diff -urN linux.old/drivers/net/ar2313/dma.h linux.eth/drivers/net/ar2313/dma.h
1767 --- linux.old/drivers/net/ar2313/dma.h 1970-01-01 01:00:00.000000000 +0100
1768 +++ linux.eth/drivers/net/ar2313/dma.h 2006-12-16 04:30:11.000000000 +0100
1769 @@ -0,0 +1,135 @@
1770 +#ifndef __ARUBA_DMA_H__
1771 +#define __ARUBA_DMA_H__
1772 +
1773 +/*******************************************************************************
1774 + *
1775 + * Copyright 2002 Integrated Device Technology, Inc.
1776 + * All rights reserved.
1777 + *
1778 + * DMA register definition.
1779 + *
1780 + * File : $Id: dma.h,v 1.3 2002/06/06 18:34:03 astichte Exp $
1781 + *
1782 + * Author : ryan.holmQVist@idt.com
1783 + * Date : 20011005
1784 + * Update :
1785 + * $Log: dma.h,v $
1786 + * Revision 1.3 2002/06/06 18:34:03 astichte
1787 + * Added XXX_PhysicalAddress and XXX_VirtualAddress
1788 + *
1789 + * Revision 1.2 2002/06/05 18:30:46 astichte
1790 + * Removed IDTField
1791 + *
1792 + * Revision 1.1 2002/05/29 17:33:21 sysarch
1793 + * jba File moved from vcode/include/idt/acacia
1794 + *
1795 + *
1796 + ******************************************************************************/
1797 +
1798 +#define AR_BIT(x) (1 << (x))
1799 +#define DMA_RX_ERR_CRC AR_BIT(1)
1800 +#define DMA_RX_ERR_DRIB AR_BIT(2)
1801 +#define DMA_RX_ERR_MII AR_BIT(3)
1802 +#define DMA_RX_EV2 AR_BIT(5)
1803 +#define DMA_RX_ERR_COL AR_BIT(6)
1804 +#define DMA_RX_LONG AR_BIT(7)
1805 +#define DMA_RX_LS AR_BIT(8) /* last descriptor */
1806 +#define DMA_RX_FS AR_BIT(9) /* first descriptor */
1807 +#define DMA_RX_MF AR_BIT(10) /* multicast frame */
1808 +#define DMA_RX_ERR_RUNT AR_BIT(11) /* runt frame */
1809 +#define DMA_RX_ERR_LENGTH AR_BIT(12) /* length error */
1810 +#define DMA_RX_ERR_DESC AR_BIT(14) /* descriptor error */
1811 +#define DMA_RX_ERROR AR_BIT(15) /* error summary */
1812 +#define DMA_RX_LEN_MASK 0x3fff0000
1813 +#define DMA_RX_LEN_SHIFT 16
1814 +#define DMA_RX_FILT AR_BIT(30)
1815 +#define DMA_RX_OWN AR_BIT(31) /* desc owned by DMA controller */
1816 +
1817 +#define DMA_RX1_BSIZE_MASK 0x000007ff
1818 +#define DMA_RX1_BSIZE_SHIFT 0
1819 +#define DMA_RX1_CHAINED AR_BIT(24)
1820 +#define DMA_RX1_RER AR_BIT(25)
1821 +
1822 +#define DMA_TX_ERR_UNDER AR_BIT(1) /* underflow error */
1823 +#define DMA_TX_ERR_DEFER AR_BIT(2) /* excessive deferral */
1824 +#define DMA_TX_COL_MASK 0x78
1825 +#define DMA_TX_COL_SHIFT 3
1826 +#define DMA_TX_ERR_HB AR_BIT(7) /* hearbeat failure */
1827 +#define DMA_TX_ERR_COL AR_BIT(8) /* excessive collisions */
1828 +#define DMA_TX_ERR_LATE AR_BIT(9) /* late collision */
1829 +#define DMA_TX_ERR_LINK AR_BIT(10) /* no carrier */
1830 +#define DMA_TX_ERR_LOSS AR_BIT(11) /* loss of carrier */
1831 +#define DMA_TX_ERR_JABBER AR_BIT(14) /* transmit jabber timeout */
1832 +#define DMA_TX_ERROR AR_BIT(15) /* frame aborted */
1833 +#define DMA_TX_OWN AR_BIT(31) /* descr owned by DMA controller */
1834 +
1835 +#define DMA_TX1_BSIZE_MASK 0x000007ff
1836 +#define DMA_TX1_BSIZE_SHIFT 0
1837 +#define DMA_TX1_CHAINED AR_BIT(24) /* chained descriptors */
1838 +#define DMA_TX1_TER AR_BIT(25) /* transmit end of ring */
1839 +#define DMA_TX1_FS AR_BIT(29) /* first segment */
1840 +#define DMA_TX1_LS AR_BIT(30) /* last segment */
1841 +#define DMA_TX1_IC AR_BIT(31) /* interrupt on completion */
1842 +
1843 +#define RCVPKT_LENGTH(X) (X >> 16) /* Received pkt Length */
1844 +
1845 +#define MAC_CONTROL_RE AR_BIT(2) /* receive enable */
1846 +#define MAC_CONTROL_TE AR_BIT(3) /* transmit enable */
1847 +#define MAC_CONTROL_DC AR_BIT(5) /* Deferral check*/
1848 +#define MAC_CONTROL_ASTP AR_BIT(8) /* Auto pad strip */
1849 +#define MAC_CONTROL_DRTY AR_BIT(10) /* Disable retry */
1850 +#define MAC_CONTROL_DBF AR_BIT(11) /* Disable bcast frames */
1851 +#define MAC_CONTROL_LCC AR_BIT(12) /* late collision ctrl */
1852 +#define MAC_CONTROL_HP AR_BIT(13) /* Hash Perfect filtering */
1853 +#define MAC_CONTROL_HASH AR_BIT(14) /* Unicast hash filtering */
1854 +#define MAC_CONTROL_HO AR_BIT(15) /* Hash only filtering */
1855 +#define MAC_CONTROL_PB AR_BIT(16) /* Pass Bad frames */
1856 +#define MAC_CONTROL_IF AR_BIT(17) /* Inverse filtering */
1857 +#define MAC_CONTROL_PR AR_BIT(18) /* promiscuous mode (valid frames only) */
1858 +#define MAC_CONTROL_PM AR_BIT(19) /* pass multicast */
1859 +#define MAC_CONTROL_F AR_BIT(20) /* full-duplex */
1860 +#define MAC_CONTROL_DRO AR_BIT(23) /* Disable Receive Own */
1861 +#define MAC_CONTROL_HBD AR_BIT(28) /* heart-beat disabled (MUST BE SET) */
1862 +#define MAC_CONTROL_BLE AR_BIT(30) /* big endian mode */
1863 +#define MAC_CONTROL_RA AR_BIT(31) /* receive all (valid and invalid frames) */
1864 +
1865 +#define MII_ADDR_BUSY AR_BIT(0)
1866 +#define MII_ADDR_WRITE AR_BIT(1)
1867 +#define MII_ADDR_REG_SHIFT 6
1868 +#define MII_ADDR_PHY_SHIFT 11
1869 +#define MII_DATA_SHIFT 0
1870 +
1871 +#define FLOW_CONTROL_FCE AR_BIT(1)
1872 +
1873 +#define DMA_BUS_MODE_SWR AR_BIT(0) /* software reset */
1874 +#define DMA_BUS_MODE_BLE AR_BIT(7) /* big endian mode */
1875 +#define DMA_BUS_MODE_PBL_SHIFT 8 /* programmable burst length 32 */
1876 +#define DMA_BUS_MODE_DBO AR_BIT(20) /* big-endian descriptors */
1877 +
1878 +#define DMA_STATUS_TI AR_BIT(0) /* transmit interrupt */
1879 +#define DMA_STATUS_TPS AR_BIT(1) /* transmit process stopped */
1880 +#define DMA_STATUS_TU AR_BIT(2) /* transmit buffer unavailable */
1881 +#define DMA_STATUS_TJT AR_BIT(3) /* transmit buffer timeout */
1882 +#define DMA_STATUS_UNF AR_BIT(5) /* transmit underflow */
1883 +#define DMA_STATUS_RI AR_BIT(6) /* receive interrupt */
1884 +#define DMA_STATUS_RU AR_BIT(7) /* receive buffer unavailable */
1885 +#define DMA_STATUS_RPS AR_BIT(8) /* receive process stopped */
1886 +#define DMA_STATUS_ETI AR_BIT(10) /* early transmit interrupt */
1887 +#define DMA_STATUS_FBE AR_BIT(13) /* fatal bus interrupt */
1888 +#define DMA_STATUS_ERI AR_BIT(14) /* early receive interrupt */
1889 +#define DMA_STATUS_AIS AR_BIT(15) /* abnormal interrupt summary */
1890 +#define DMA_STATUS_NIS AR_BIT(16) /* normal interrupt summary */
1891 +#define DMA_STATUS_RS_SHIFT 17 /* receive process state */
1892 +#define DMA_STATUS_TS_SHIFT 20 /* transmit process state */
1893 +#define DMA_STATUS_EB_SHIFT 23 /* error bits */
1894 +
1895 +#define DMA_CONTROL_SR AR_BIT(1) /* start receive */
1896 +#define DMA_CONTROL_ST AR_BIT(13) /* start transmit */
1897 +#define DMA_CONTROL_SF AR_BIT(21) /* store and forward */
1898 +
1899 +#endif // __ARUBA_DMA_H__
1900 +
1901 +
1902 +
1903 +
1904 +
1905 diff -urN linux.old/drivers/net/ar2313/Makefile linux.eth/drivers/net/ar2313/Makefile
1906 --- linux.old/drivers/net/ar2313/Makefile 1970-01-01 01:00:00.000000000 +0100
1907 +++ linux.eth/drivers/net/ar2313/Makefile 2006-12-16 04:30:11.000000000 +0100
1908 @@ -0,0 +1,5 @@
1909 +#
1910 +# Makefile for the AR2313 ethernet driver
1911 +#
1912 +
1913 +obj-$(CONFIG_AR2313) += ar2313.o
1914 diff -urN linux.old/drivers/net/ar2313/platform.h linux.eth/drivers/net/ar2313/platform.h
1915 --- linux.old/drivers/net/ar2313/platform.h 1970-01-01 01:00:00.000000000 +0100
1916 +++ linux.eth/drivers/net/ar2313/platform.h 2006-12-16 04:30:11.000000000 +0100
1917 @@ -0,0 +1,128 @@
1918 +/********************************************************************************
1919 + Title: $Source: platform.h,v $
1920 +
1921 + Author: Dan Steinberg
1922 + Copyright Integrated Device Technology 2001
1923 +
1924 + Purpose: AR2313 Register/Bit Definitions
1925 +
1926 + Update:
1927 + $Log: platform.h,v $
1928 +
1929 + Notes: See Merlot architecture spec for complete details. Note, all
1930 + addresses are virtual addresses in kseg1 (Uncached, Unmapped).
1931 +
1932 +********************************************************************************/
1933 +
1934 +#ifndef PLATFORM_H
1935 +#define PLATFORM_H
1936 +
1937 +#define BIT(x) (1 << (x))
1938 +
1939 +#define RESET_BASE 0xBC003020
1940 +#define RESET_VALUE 0x00000001
1941 +
1942 +/********************************************************************
1943 + * Device controller
1944 + ********************************************************************/
1945 +typedef struct {
1946 + volatile unsigned int flash0;
1947 +} DEVICE;
1948 +
1949 +#define device (*((volatile DEVICE *) DEV_CTL_BASE))
1950 +
1951 +// DDRC register
1952 +#define DEV_WP (1<<26)
1953 +
1954 +/********************************************************************
1955 + * DDR controller
1956 + ********************************************************************/
1957 +typedef struct {
1958 + volatile unsigned int ddrc0;
1959 + volatile unsigned int ddrc1;
1960 + volatile unsigned int ddrrefresh;
1961 +} DDR;
1962 +
1963 +#define ddr (*((volatile DDR *) DDR_BASE))
1964 +
1965 +// DDRC register
1966 +#define DDRC_CS(i) ((i&0x3)<<0)
1967 +#define DDRC_WE (1<<2)
1968 +
1969 +/********************************************************************
1970 + * Ethernet interfaces
1971 + ********************************************************************/
1972 +#define ETHERNET_BASE 0xB8200000
1973 +
1974 +//
1975 +// New Combo structure for Both Eth0 AND eth1
1976 +//
1977 +typedef struct {
1978 + volatile unsigned int mac_control; /* 0x00 */
1979 + volatile unsigned int mac_addr[2]; /* 0x04 - 0x08*/
1980 + volatile unsigned int mcast_table[2]; /* 0x0c - 0x10 */
1981 + volatile unsigned int mii_addr; /* 0x14 */
1982 + volatile unsigned int mii_data; /* 0x18 */
1983 + volatile unsigned int flow_control; /* 0x1c */
1984 + volatile unsigned int vlan_tag; /* 0x20 */
1985 + volatile unsigned int pad[7]; /* 0x24 - 0x3c */
1986 + volatile unsigned int ucast_table[8]; /* 0x40-0x5c */
1987 +
1988 +} ETHERNET_STRUCT;
1989 +
1990 +/********************************************************************
1991 + * Interrupt controller
1992 + ********************************************************************/
1993 +
1994 +typedef struct {
1995 + volatile unsigned int wdog_control; /* 0x08 */
1996 + volatile unsigned int wdog_timer; /* 0x0c */
1997 + volatile unsigned int misc_status; /* 0x10 */
1998 + volatile unsigned int misc_mask; /* 0x14 */
1999 + volatile unsigned int global_status; /* 0x18 */
2000 + volatile unsigned int reserved; /* 0x1c */
2001 + volatile unsigned int reset_control; /* 0x20 */
2002 +} INTERRUPT;
2003 +
2004 +#define interrupt (*((volatile INTERRUPT *) INTERRUPT_BASE))
2005 +
2006 +#define INTERRUPT_MISC_TIMER BIT(0)
2007 +#define INTERRUPT_MISC_AHBPROC BIT(1)
2008 +#define INTERRUPT_MISC_AHBDMA BIT(2)
2009 +#define INTERRUPT_MISC_GPIO BIT(3)
2010 +#define INTERRUPT_MISC_UART BIT(4)
2011 +#define INTERRUPT_MISC_UARTDMA BIT(5)
2012 +#define INTERRUPT_MISC_WATCHDOG BIT(6)
2013 +#define INTERRUPT_MISC_LOCAL BIT(7)
2014 +
2015 +#define INTERRUPT_GLOBAL_ETH BIT(2)
2016 +#define INTERRUPT_GLOBAL_WLAN BIT(3)
2017 +#define INTERRUPT_GLOBAL_MISC BIT(4)
2018 +#define INTERRUPT_GLOBAL_ITIMER BIT(5)
2019 +
2020 +/********************************************************************
2021 + * DMA controller
2022 + ********************************************************************/
2023 +#define DMA_BASE 0xB8201000
2024 +
2025 +typedef struct {
2026 + volatile unsigned int bus_mode; /* 0x00 (CSR0) */
2027 + volatile unsigned int xmt_poll; /* 0x04 (CSR1) */
2028 + volatile unsigned int rcv_poll; /* 0x08 (CSR2) */
2029 + volatile unsigned int rcv_base; /* 0x0c (CSR3) */
2030 + volatile unsigned int xmt_base; /* 0x10 (CSR4) */
2031 + volatile unsigned int status; /* 0x14 (CSR5) */
2032 + volatile unsigned int control; /* 0x18 (CSR6) */
2033 + volatile unsigned int intr_ena; /* 0x1c (CSR7) */
2034 + volatile unsigned int rcv_missed; /* 0x20 (CSR8) */
2035 + volatile unsigned int reserved[11]; /* 0x24-0x4c (CSR9-19) */
2036 + volatile unsigned int cur_tx_buf_addr; /* 0x50 (CSR20) */
2037 + volatile unsigned int cur_rx_buf_addr; /* 0x50 (CSR21) */
2038 +} DMA;
2039 +
2040 +#define dma (*((volatile DMA *) DMA_BASE))
2041 +
2042 +// macro to convert from virtual to physical address
2043 +#define phys_addr(x) (x & 0x1fffffff)
2044 +
2045 +#endif /* PLATFORM_H */
2046 diff -urN linux.old/drivers/net/Kconfig linux.eth/drivers/net/Kconfig
2047 --- linux.old/drivers/net/Kconfig 2006-12-14 23:53:29.000000000 +0100
2048 +++ linux.eth/drivers/net/Kconfig 2006-12-16 04:30:11.000000000 +0100
2049 @@ -313,6 +313,12 @@
2050
2051 source "drivers/net/arm/Kconfig"
2052
2053 +config AR2313
2054 + tristate "AR2313 Ethernet support"
2055 + depends on NET_ETHERNET && AR531X
2056 + help
2057 + Support for the AR231x/531x ethernet controller
2058 +
2059 config MACE
2060 tristate "MACE (Power Mac ethernet) support"
2061 depends on NET_ETHERNET && PPC_PMAC && PPC32
2062 diff -urN linux.old/drivers/net/Makefile linux.eth/drivers/net/Makefile
2063 --- linux.old/drivers/net/Makefile 2006-12-14 23:53:29.000000000 +0100
2064 +++ linux.eth/drivers/net/Makefile 2006-12-16 04:30:11.000000000 +0100
2065 @@ -9,6 +9,7 @@
2066 obj-$(CONFIG_EHEA) += ehea/
2067 obj-$(CONFIG_BONDING) += bonding/
2068 obj-$(CONFIG_GIANFAR) += gianfar_driver.o
2069 +obj-$(CONFIG_AR2313) += ar2313/
2070
2071 gianfar_driver-objs := gianfar.o \
2072 gianfar_ethtool.o \