2 * ar2313.c: Linux driver for the Atheros AR231x Ethernet device.
4 * Copyright (C) 2004 by Sameer Dekate <sdekate@arubanetworks.com>
5 * Copyright (C) 2006 Imre Kaloz <kaloz@openwrt.org>
6 * Copyright (C) 2006-2007 Felix Fietkau <nbd@openwrt.org>
8 * Thanks to Atheros for providing hardware and documentation
9 * enabling me to write this driver.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
17 * This code is taken from John Taylor's Sibyte driver and then
18 * modified for the AR2313.
21 #include <linux/autoconf.h>
22 #include <linux/module.h>
23 #include <linux/version.h>
24 #include <linux/types.h>
25 #include <linux/errno.h>
26 #include <linux/ioport.h>
27 #include <linux/pci.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/skbuff.h>
31 #include <linux/init.h>
32 #include <linux/delay.h>
34 #include <linux/highmem.h>
35 #include <linux/sockios.h>
36 #include <linux/pkt_sched.h>
37 #include <linux/compile.h>
38 #include <linux/mii.h>
39 #include <linux/ethtool.h>
40 #include <linux/ctype.h>
41 #include <linux/platform_device.h>
46 #include <asm/system.h>
49 #include <asm/byteorder.h>
50 #include <asm/uaccess.h>
51 #include <asm/bootinfo.h>
53 #define AR2313_MTU 1692
54 #define AR2313_PRIOS 1
55 #define AR2313_QUEUES (2*AR2313_PRIOS)
56 #define AR2313_DESCR_ENTRIES 64
67 #define min(a,b) (((a)<(b))?(a):(b))
70 #ifndef SMP_CACHE_BYTES
71 #define SMP_CACHE_BYTES L1_CACHE_BYTES
74 #define AR2313_MBOX_SET_BIT 0x8
76 #define BOARD_IDX_STATIC 0
77 #define BOARD_IDX_OVERFLOW -1
84 * New interrupt handler strategy:
86 * An old interrupt handler worked using the traditional method of
87 * replacing an skbuff with a new one when a packet arrives. However
88 * the rx rings do not need to contain a static number of buffer
89 * descriptors, thus it makes sense to move the memory allocation out
90 * of the main interrupt handler and do it in a bottom half handler
91 * and only allocate new buffers when the number of buffers in the
92 * ring is below a certain threshold. In order to avoid starving the
93 * NIC under heavy load it is however necessary to force allocation
94 * when hitting a minimum threshold. The strategy for alloction is as
97 * RX_LOW_BUF_THRES - allocate buffers in the bottom half
98 * RX_PANIC_LOW_THRES - we are very low on buffers, allocate
99 * the buffers in the interrupt handler
100 * RX_RING_THRES - maximum number of buffers in the rx ring
102 * One advantagous side effect of this allocation approach is that the
103 * entire rx processing can be done without holding any spin lock
104 * since the rx rings and registers are totally independent of the tx
105 * ring and its registers. This of course includes the kmalloc's of
106 * new skb's. Thus start_xmit can run in parallel with rx processing
107 * and the memory allocation on SMP systems.
109 * Note that running the skb reallocation in a bottom half opens up
110 * another can of races which needs to be handled properly. In
111 * particular it can happen that the interrupt handler tries to run
112 * the reallocation while the bottom half is either running on another
113 * CPU or was interrupted on the same CPU. To get around this the
114 * driver uses bitops to prevent the reallocation routines from being
117 * TX handling can also be done without holding any spin lock, wheee
118 * this is fun! since tx_csm is only written to by the interrupt
123 * Threshold values for RX buffer allocation - the low water marks for
124 * when to start refilling the rings are set to 75% of the ring
125 * sizes. It seems to make sense to refill the rings entirely from the
126 * intrrupt handler once it gets below the panic threshold, that way
127 * we don't risk that the refilling is moved to another CPU when the
128 * one running the interrupt handler just got the slab code hot in its
131 #define RX_RING_SIZE AR2313_DESCR_ENTRIES
132 #define RX_PANIC_THRES (RX_RING_SIZE/4)
133 #define RX_LOW_THRES ((3*RX_RING_SIZE)/4)
137 #define AR2313_BUFSIZE (AR2313_MTU + ETH_HLEN + CRC_LEN + RX_OFFSET)
140 MODULE_AUTHOR("Sameer Dekate <sdekate@arubanetworks.com>, Imre Kaloz <kaloz@openwrt.org>, Felix Fietkau <nbd@openwrt.org>");
141 MODULE_DESCRIPTION("AR2313 Ethernet driver");
144 #define virt_to_phys(x) ((u32)(x) & 0x1fffffff)
147 static short armiiread(struct net_device
*dev
, short phy
, short reg
);
148 static void armiiwrite(struct net_device
*dev
, short phy
, short reg
, short data
);
150 static void ar2313_tx_timeout(struct net_device
*dev
);
152 static void ar2313_halt(struct net_device
*dev
);
153 static void rx_tasklet_func(unsigned long data
);
154 static void ar2313_multicast_list(struct net_device
*dev
);
157 #define ERR(fmt, args...) printk("%s: " fmt, __func__, ##args)
161 int __init
ar2313_probe(struct platform_device
*pdev
)
163 struct net_device
*dev
;
164 struct ar2313_private
*sp
;
165 struct resource
*res
;
166 unsigned long ar_eth_base
;
169 dev
= alloc_etherdev(sizeof(struct ar2313_private
));
172 printk(KERN_ERR
"ar2313: Unable to allocate net_device structure!\n");
176 SET_MODULE_OWNER(dev
);
177 platform_set_drvdata(pdev
, dev
);
181 sp
->cfg
= pdev
->dev
.platform_data
;
183 sprintf(buf
, "eth%d_membase", pdev
->id
);
184 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, buf
);
189 ar_eth_base
= res
->start
;
190 sp
->phy
= sp
->cfg
->phy
;
192 sprintf(buf
, "eth%d_irq", pdev
->id
);
193 dev
->irq
= platform_get_irq_byname(pdev
, buf
);
195 spin_lock_init(&sp
->lock
);
197 /* initialize func pointers */
198 dev
->open
= &ar2313_open
;
199 dev
->stop
= &ar2313_close
;
200 dev
->hard_start_xmit
= &ar2313_start_xmit
;
202 dev
->get_stats
= &ar2313_get_stats
;
203 dev
->set_multicast_list
= &ar2313_multicast_list
;
205 dev
->tx_timeout
= ar2313_tx_timeout
;
206 dev
->watchdog_timeo
= AR2313_TX_TIMEOUT
;
208 dev
->do_ioctl
= &ar2313_ioctl
;
210 // SAMEER: do we need this?
211 dev
->features
|= NETIF_F_SG
| NETIF_F_HIGHDMA
;
213 tasklet_init(&sp
->rx_tasklet
, rx_tasklet_func
, (unsigned long) dev
);
214 tasklet_disable(&sp
->rx_tasklet
);
216 sp
->eth_regs
= ioremap_nocache(virt_to_phys(ar_eth_base
), sizeof(*sp
->eth_regs
));
218 printk("Can't remap eth registers\n");
223 * When there's only one MAC, PHY regs are typically on ENET0,
224 * even though the MAC might be on ENET1.
225 * Needto remap PHY regs separately in this case
227 if (virt_to_phys(ar_eth_base
) == virt_to_phys(sp
->phy_regs
))
228 sp
->phy_regs
= sp
->eth_regs
;
230 sp
->phy_regs
= ioremap_nocache(virt_to_phys(sp
->cfg
->phy_base
), sizeof(*sp
->phy_regs
));
232 printk("Can't remap phy registers\n");
237 sp
->dma_regs
= ioremap_nocache(virt_to_phys(ar_eth_base
+ 0x1000), sizeof(*sp
->dma_regs
));
238 dev
->base_addr
= (unsigned int) sp
->dma_regs
;
240 printk("Can't remap DMA registers\n");
244 sp
->int_regs
= ioremap_nocache(virt_to_phys(sp
->cfg
->reset_base
), 4);
246 printk("Can't remap INTERRUPT registers\n");
250 strncpy(sp
->name
, "Atheros AR231x", sizeof (sp
->name
) - 1);
251 sp
->name
[sizeof (sp
->name
) - 1] = '\0';
252 memcpy(dev
->dev_addr
, sp
->cfg
->macaddr
, 6);
253 sp
->board_idx
= BOARD_IDX_STATIC
;
255 if (ar2313_init(dev
)) {
257 * ar2313_init() calls ar2313_init_cleanup() on error.
263 if (register_netdev(dev
)){
264 printk("%s: register_netdev failed\n", __func__
);
268 printk("%s: %s: %02x:%02x:%02x:%02x:%02x:%02x, irq %d\n",
270 dev
->dev_addr
[0], dev
->dev_addr
[1], dev
->dev_addr
[2],
271 dev
->dev_addr
[3], dev
->dev_addr
[4], dev
->dev_addr
[5],
274 /* start link poll timer */
275 ar2313_setup_timer(dev
);
281 static void ar2313_dump_regs(struct net_device
*dev
)
283 unsigned int *ptr
, i
;
284 struct ar2313_private
*sp
= (struct ar2313_private
*)dev
->priv
;
286 ptr
= (unsigned int *)sp
->eth_regs
;
287 for(i
=0; i
< (sizeof(ETHERNET_STRUCT
)/ sizeof(unsigned int)); i
++, ptr
++) {
288 printk("ENET: %08x = %08x\n", (int)ptr
, *ptr
);
291 ptr
= (unsigned int *)sp
->dma_regs
;
292 for(i
=0; i
< (sizeof(DMA
)/ sizeof(unsigned int)); i
++, ptr
++) {
293 printk("DMA: %08x = %08x\n", (int)ptr
, *ptr
);
296 ptr
= (unsigned int *)sp
->int_regs
;
297 for(i
=0; i
< (sizeof(INTERRUPT
)/ sizeof(unsigned int)); i
++, ptr
++){
298 printk("INT: %08x = %08x\n", (int)ptr
, *ptr
);
301 for (i
= 0; i
< AR2313_DESCR_ENTRIES
; i
++) {
302 ar2313_descr_t
*td
= &sp
->tx_ring
[i
];
303 printk("Tx desc %2d: %08x %08x %08x %08x\n", i
,
304 td
->status
, td
->devcs
, td
->addr
, td
->descr
);
311 ar2313_tx_timeout(struct net_device
*dev
)
313 struct ar2313_private
*sp
= (struct ar2313_private
*)dev
->priv
;
317 printk("Tx timeout\n");
319 spin_lock_irqsave(&sp
->lock
, flags
);
321 spin_unlock_irqrestore(&sp
->lock
, flags
);
327 printMcList(struct net_device
*dev
)
329 struct dev_mc_list
*list
= dev
->mc_list
;
332 printk("%d MC ADDR ", num
);
333 for(i
=0;i
<list
->dmi_addrlen
;i
++) {
334 printk(":%02x", list
->dmi_addr
[i
]);
343 * Set or clear the multicast filter for this adaptor.
344 * THIS IS ABSOLUTE CRAP, disabled
347 ar2313_multicast_list(struct net_device
*dev
)
350 * Always listen to broadcasts and
351 * treat IFF bits independently
353 struct ar2313_private
*sp
= (struct ar2313_private
*)dev
->priv
;
354 unsigned int recognise
;
356 recognise
= sp
->eth_regs
->mac_control
;
358 if (dev
->flags
& IFF_PROMISC
) { /* set promiscuous mode */
359 recognise
|= MAC_CONTROL_PR
;
361 recognise
&= ~MAC_CONTROL_PR
;
364 if ((dev
->flags
& IFF_ALLMULTI
) || (dev
->mc_count
> 15)) {
367 printk("%s: all MULTICAST mc_count %d\n", __FUNCTION__
, dev
->mc_count
);
369 recognise
|= MAC_CONTROL_PM
;/* all multicast */
370 } else if (dev
->mc_count
> 0) {
373 printk("%s: mc_count %d\n", __FUNCTION__
, dev
->mc_count
);
375 recognise
|= MAC_CONTROL_PM
; /* for the time being */
378 printk("%s: setting %08x to %08x\n", __FUNCTION__
, (int)sp
->eth_regs
, recognise
);
381 sp
->eth_regs
->mac_control
= recognise
;
384 static void rx_tasklet_cleanup(struct net_device
*dev
)
386 struct ar2313_private
*sp
= dev
->priv
;
389 * Tasklet may be scheduled. Need to get it removed from the list
390 * since we're about to free the struct.
394 tasklet_enable(&sp
->rx_tasklet
);
395 tasklet_kill(&sp
->rx_tasklet
);
398 static int __exit
ar2313_remove(struct platform_device
*pdev
)
400 struct net_device
*dev
= platform_get_drvdata(pdev
);
401 rx_tasklet_cleanup(dev
);
402 ar2313_init_cleanup(dev
);
403 unregister_netdev(dev
);
410 * Restart the AR2313 ethernet controller.
412 static int ar2313_restart(struct net_device
*dev
)
414 /* disable interrupts */
415 disable_irq(dev
->irq
);
423 /* enable interrupts */
424 enable_irq(dev
->irq
);
429 static struct platform_driver ar2313_driver
= {
430 .driver
.name
= "ar531x-eth",
431 .probe
= ar2313_probe
,
432 .remove
= ar2313_remove
,
435 int __init
ar2313_module_init(void)
437 return platform_driver_register(&ar2313_driver
);
440 void __exit
ar2313_module_cleanup(void)
442 platform_driver_unregister(&ar2313_driver
);
445 module_init(ar2313_module_init
);
446 module_exit(ar2313_module_cleanup
);
449 static void ar2313_free_descriptors(struct net_device
*dev
)
451 struct ar2313_private
*sp
= dev
->priv
;
452 if (sp
->rx_ring
!= NULL
) {
453 kfree((void*)KSEG0ADDR(sp
->rx_ring
));
460 static int ar2313_allocate_descriptors(struct net_device
*dev
)
462 struct ar2313_private
*sp
= dev
->priv
;
465 ar2313_descr_t
*space
;
467 if(sp
->rx_ring
!= NULL
){
468 printk("%s: already done.\n", __FUNCTION__
);
472 size
= (sizeof(ar2313_descr_t
) * (AR2313_DESCR_ENTRIES
* AR2313_QUEUES
));
473 space
= kmalloc(size
, GFP_KERNEL
);
477 /* invalidate caches */
478 dma_cache_inv((unsigned int)space
, size
);
480 /* now convert pointer to KSEG1 */
481 space
= (ar2313_descr_t
*)KSEG1ADDR(space
);
483 memset((void *)space
, 0, size
);
486 space
+= AR2313_DESCR_ENTRIES
;
489 space
+= AR2313_DESCR_ENTRIES
;
491 /* Initialize the transmit Descriptors */
492 for (j
= 0; j
< AR2313_DESCR_ENTRIES
; j
++) {
493 ar2313_descr_t
*td
= &sp
->tx_ring
[j
];
495 td
->devcs
= DMA_TX1_CHAINED
;
497 td
->descr
= virt_to_phys(&sp
->tx_ring
[(j
+1) & (AR2313_DESCR_ENTRIES
-1)]);
505 * Generic cleanup handling data allocated during init. Used when the
506 * module is unloaded or if an error occurs during initialization
508 static void ar2313_init_cleanup(struct net_device
*dev
)
510 struct ar2313_private
*sp
= dev
->priv
;
514 ar2313_free_descriptors(dev
);
516 if (sp
->eth_regs
) iounmap((void*)sp
->eth_regs
);
517 if (sp
->dma_regs
) iounmap((void*)sp
->dma_regs
);
520 for (j
= 0; j
< AR2313_DESCR_ENTRIES
; j
++) {
523 sp
->rx_skb
[j
] = NULL
;
532 for (j
= 0; j
< AR2313_DESCR_ENTRIES
; j
++) {
535 sp
->tx_skb
[j
] = NULL
;
544 static int ar2313_setup_timer(struct net_device
*dev
)
546 struct ar2313_private
*sp
= dev
->priv
;
548 init_timer(&sp
->link_timer
);
550 sp
->link_timer
.function
= ar2313_link_timer_fn
;
551 sp
->link_timer
.data
= (int) dev
;
552 sp
->link_timer
.expires
= jiffies
+ HZ
;
554 add_timer(&sp
->link_timer
);
559 static void ar2313_link_timer_fn(unsigned long data
)
561 struct net_device
*dev
= (struct net_device
*) data
;
562 struct ar2313_private
*sp
= dev
->priv
;
564 // see if the link status changed
565 // This was needed to make sure we set the PHY to the
566 // autonegotiated value of half or full duplex.
567 ar2313_check_link(dev
);
569 // Loop faster when we don't have link.
570 // This was needed to speed up the AP bootstrap time.
572 mod_timer(&sp
->link_timer
, jiffies
+ HZ
/2);
574 mod_timer(&sp
->link_timer
, jiffies
+ LINK_TIMER
);
578 static void ar2313_check_link(struct net_device
*dev
)
580 struct ar2313_private
*sp
= dev
->priv
;
583 phyData
= armiiread(dev
, sp
->phy
, MII_BMSR
);
584 if (sp
->phyData
!= phyData
) {
585 if (phyData
& BMSR_LSTATUS
) {
586 /* link is present, ready link partner ability to deterine duplexity */
591 reg
= armiiread(dev
, sp
->phy
, MII_BMCR
);
592 if (reg
& BMCR_ANENABLE
) {
593 /* auto neg enabled */
594 reg
= armiiread(dev
, sp
->phy
, MII_LPA
);
595 duplex
= (reg
& (LPA_100FULL
|LPA_10FULL
))? 1:0;
597 /* no auto neg, just read duplex config */
598 duplex
= (reg
& BMCR_FULLDPLX
)? 1:0;
601 printk(KERN_INFO
"%s: Configuring MAC for %s duplex\n", dev
->name
,
602 (duplex
)? "full":"half");
606 sp
->eth_regs
->mac_control
= ((sp
->eth_regs
->mac_control
| MAC_CONTROL_F
) &
610 sp
->eth_regs
->mac_control
= ((sp
->eth_regs
->mac_control
| MAC_CONTROL_DRO
) &
617 sp
->phyData
= phyData
;
622 ar2313_reset_reg(struct net_device
*dev
)
624 struct ar2313_private
*sp
= (struct ar2313_private
*)dev
->priv
;
625 unsigned int ethsal
, ethsah
;
628 *sp
->int_regs
|= sp
->cfg
->reset_mac
;
630 *sp
->int_regs
&= ~sp
->cfg
->reset_mac
;
632 *sp
->int_regs
|= sp
->cfg
->reset_phy
;
634 *sp
->int_regs
&= ~sp
->cfg
->reset_phy
;
637 sp
->dma_regs
->bus_mode
= (DMA_BUS_MODE_SWR
);
639 sp
->dma_regs
->bus_mode
= ((32 << DMA_BUS_MODE_PBL_SHIFT
) | DMA_BUS_MODE_BLE
);
641 /* enable interrupts */
642 sp
->dma_regs
->intr_ena
= (DMA_STATUS_AIS
|
647 sp
->dma_regs
->xmt_base
= virt_to_phys(sp
->tx_ring
);
648 sp
->dma_regs
->rcv_base
= virt_to_phys(sp
->rx_ring
);
649 sp
->dma_regs
->control
= (DMA_CONTROL_SR
| DMA_CONTROL_ST
| DMA_CONTROL_SF
);
651 sp
->eth_regs
->flow_control
= (FLOW_CONTROL_FCE
);
652 sp
->eth_regs
->vlan_tag
= (0x8100);
654 /* Enable Ethernet Interface */
655 flags
= (MAC_CONTROL_TE
| /* transmit enable */
656 MAC_CONTROL_PM
| /* pass mcast */
657 MAC_CONTROL_F
| /* full duplex */
658 MAC_CONTROL_HBD
); /* heart beat disabled */
660 if (dev
->flags
& IFF_PROMISC
) { /* set promiscuous mode */
661 flags
|= MAC_CONTROL_PR
;
663 sp
->eth_regs
->mac_control
= flags
;
665 /* Set all Ethernet station address registers to their initial values */
666 ethsah
= ((((u_int
)(dev
->dev_addr
[5]) << 8) & (u_int
)0x0000FF00) |
667 (((u_int
)(dev
->dev_addr
[4]) << 0) & (u_int
)0x000000FF));
669 ethsal
= ((((u_int
)(dev
->dev_addr
[3]) << 24) & (u_int
)0xFF000000) |
670 (((u_int
)(dev
->dev_addr
[2]) << 16) & (u_int
)0x00FF0000) |
671 (((u_int
)(dev
->dev_addr
[1]) << 8) & (u_int
)0x0000FF00) |
672 (((u_int
)(dev
->dev_addr
[0]) << 0) & (u_int
)0x000000FF) );
674 sp
->eth_regs
->mac_addr
[0] = ethsah
;
675 sp
->eth_regs
->mac_addr
[1] = ethsal
;
683 static int ar2313_init(struct net_device
*dev
)
685 struct ar2313_private
*sp
= dev
->priv
;
689 * Allocate descriptors
691 if (ar2313_allocate_descriptors(dev
)) {
692 printk("%s: %s: ar2313_allocate_descriptors failed\n",
693 dev
->name
, __FUNCTION__
);
699 * Get the memory for the skb rings.
701 if(sp
->rx_skb
== NULL
) {
702 sp
->rx_skb
= kmalloc(sizeof(struct sk_buff
*) * AR2313_DESCR_ENTRIES
, GFP_KERNEL
);
704 printk("%s: %s: rx_skb kmalloc failed\n",
705 dev
->name
, __FUNCTION__
);
710 memset(sp
->rx_skb
, 0, sizeof(struct sk_buff
*) * AR2313_DESCR_ENTRIES
);
712 if(sp
->tx_skb
== NULL
) {
713 sp
->tx_skb
= kmalloc(sizeof(struct sk_buff
*) * AR2313_DESCR_ENTRIES
, GFP_KERNEL
);
715 printk("%s: %s: tx_skb kmalloc failed\n",
716 dev
->name
, __FUNCTION__
);
721 memset(sp
->tx_skb
, 0, sizeof(struct sk_buff
*) * AR2313_DESCR_ENTRIES
);
724 * Set tx_csm before we start receiving interrupts, otherwise
725 * the interrupt handler might think it is supposed to process
726 * tx ints before we are up and running, which may cause a null
727 * pointer access in the int handler.
735 * Zero the stats before starting the interface
737 memset(&sp
->stats
, 0, sizeof(sp
->stats
));
740 * We load the ring here as there seem to be no way to tell the
741 * firmware to wipe the ring without re-initializing it.
743 ar2313_load_rx_ring(dev
, RX_RING_SIZE
);
748 ar2313_reset_reg(dev
);
753 ecode
= request_irq(dev
->irq
, &ar2313_interrupt
, IRQF_SHARED
| IRQF_DISABLED
| IRQF_SAMPLE_RANDOM
, dev
->name
, dev
);
755 printk(KERN_WARNING
"%s: %s: Requested IRQ %d is busy\n",
756 dev
->name
, __FUNCTION__
, dev
->irq
);
761 tasklet_enable(&sp
->rx_tasklet
);
766 ar2313_init_cleanup(dev
);
773 * Loading rings is safe without holding the spin lock since this is
774 * done only before the device is enabled, thus no interrupts are
775 * generated and by the interrupt handler/tasklet handler.
777 static void ar2313_load_rx_ring(struct net_device
*dev
, int nr_bufs
)
780 struct ar2313_private
*sp
= ((struct net_device
*)dev
)->priv
;
785 for (i
= 0; i
< nr_bufs
; i
++) {
789 if (sp
->rx_skb
[idx
]) {
791 printk(KERN_INFO
"ar2313 rx refill full\n");
796 // partha: create additional room for the second GRE fragment
797 skb
= alloc_skb(AR2313_BUFSIZE
+128, GFP_ATOMIC
);
799 printk("\n\n\n\n %s: No memory in system\n\n\n\n", __FUNCTION__
);
802 // partha: create additional room in the front for tx pkt capture
803 skb_reserve(skb
, 32);
806 * Make sure IP header starts on a fresh cache line.
809 skb_reserve(skb
, RX_OFFSET
);
810 sp
->rx_skb
[idx
] = skb
;
812 rd
= (ar2313_descr_t
*) &sp
->rx_ring
[idx
];
814 /* initialize dma descriptor */
815 rd
->devcs
= ((AR2313_BUFSIZE
<< DMA_RX1_BSIZE_SHIFT
) |
817 rd
->addr
= virt_to_phys(skb
->data
);
818 rd
->descr
= virt_to_phys(&sp
->rx_ring
[(idx
+1) & (AR2313_DESCR_ENTRIES
-1)]);
819 rd
->status
= DMA_RX_OWN
;
826 printk(KERN_INFO
"Out of memory when allocating standard receive buffers\n");
835 #define AR2313_MAX_PKTS_PER_CALL 64
837 static int ar2313_rx_int(struct net_device
*dev
)
839 struct ar2313_private
*sp
= dev
->priv
;
840 struct sk_buff
*skb
, *skb_new
;
841 ar2313_descr_t
*rxdesc
;
849 /* process at most the entire ring and then wait for another interrupt */
852 rxdesc
= &sp
->rx_ring
[idx
];
853 status
= rxdesc
->status
;
854 if (status
& DMA_RX_OWN
) {
855 /* SiByte owns descriptor or descr not yet filled in */
860 if (++pkts
> AR2313_MAX_PKTS_PER_CALL
) {
866 printk("index %d\n", idx
);
867 printk("RX status %08x\n", rxdesc
->status
);
868 printk("RX devcs %08x\n", rxdesc
->devcs
);
869 printk("RX addr %08x\n", rxdesc
->addr
);
870 printk("RX descr %08x\n", rxdesc
->descr
);
873 if ((status
& (DMA_RX_ERROR
|DMA_RX_ERR_LENGTH
)) &&
874 (!(status
& DMA_RX_LONG
))){
876 printk("%s: rx ERROR %08x\n", __FUNCTION__
, status
);
878 sp
->stats
.rx_errors
++;
879 sp
->stats
.rx_dropped
++;
881 /* add statistics counters */
882 if (status
& DMA_RX_ERR_CRC
) sp
->stats
.rx_crc_errors
++;
883 if (status
& DMA_RX_ERR_COL
) sp
->stats
.rx_over_errors
++;
884 if (status
& DMA_RX_ERR_LENGTH
)
885 sp
->stats
.rx_length_errors
++;
886 if (status
& DMA_RX_ERR_RUNT
) sp
->stats
.rx_over_errors
++;
887 if (status
& DMA_RX_ERR_DESC
) sp
->stats
.rx_over_errors
++;
890 /* alloc new buffer. */
891 skb_new
= dev_alloc_skb(AR2313_BUFSIZE
+ RX_OFFSET
+ 128);
892 if (skb_new
!= NULL
) {
894 skb
= sp
->rx_skb
[idx
];
896 skb_put(skb
, ((status
>> DMA_RX_LEN_SHIFT
) & 0x3fff) - CRC_LEN
);
898 sp
->stats
.rx_bytes
+= skb
->len
;
899 skb
->protocol
= eth_type_trans(skb
, dev
);
900 /* pass the packet to upper layers */
905 skb_reserve(skb_new
, RX_OFFSET
+32);
906 /* reset descriptor's curr_addr */
907 rxdesc
->addr
= virt_to_phys(skb_new
->data
);
909 sp
->stats
.rx_packets
++;
910 sp
->rx_skb
[idx
] = skb_new
;
912 sp
->stats
.rx_dropped
++;
916 rxdesc
->devcs
= ((AR2313_BUFSIZE
<< DMA_RX1_BSIZE_SHIFT
) |
918 rxdesc
->status
= DMA_RX_OWN
;
929 static void ar2313_tx_int(struct net_device
*dev
)
931 struct ar2313_private
*sp
= dev
->priv
;
934 ar2313_descr_t
*txdesc
;
935 unsigned int status
=0;
939 while (idx
!= sp
->tx_prd
) {
941 txdesc
= &sp
->tx_ring
[idx
];
944 printk("%s: TXINT: csm=%d idx=%d prd=%d status=%x devcs=%x addr=%08x descr=%x\n",
945 dev
->name
, sp
->tx_csm
, idx
, sp
->tx_prd
,
946 txdesc
->status
, txdesc
->devcs
, txdesc
->addr
, txdesc
->descr
);
949 if ((status
= txdesc
->status
) & DMA_TX_OWN
) {
950 /* ar2313 dma still owns descr */
953 /* done with this descriptor */
954 dma_unmap_single(NULL
, txdesc
->addr
, txdesc
->devcs
& DMA_TX1_BSIZE_MASK
, DMA_TO_DEVICE
);
957 if (status
& DMA_TX_ERROR
){
958 sp
->stats
.tx_errors
++;
959 sp
->stats
.tx_dropped
++;
960 if(status
& DMA_TX_ERR_UNDER
)
961 sp
->stats
.tx_fifo_errors
++;
962 if(status
& DMA_TX_ERR_HB
)
963 sp
->stats
.tx_heartbeat_errors
++;
964 if(status
& (DMA_TX_ERR_LOSS
|
966 sp
->stats
.tx_carrier_errors
++;
967 if (status
& (DMA_TX_ERR_LATE
|
971 sp
->stats
.tx_aborted_errors
++;
974 sp
->stats
.tx_packets
++;
977 skb
= sp
->tx_skb
[idx
];
978 sp
->tx_skb
[idx
] = NULL
;
980 sp
->stats
.tx_bytes
+= skb
->len
;
981 dev_kfree_skb_irq(skb
);
991 rx_tasklet_func(unsigned long data
)
993 struct net_device
*dev
= (struct net_device
*) data
;
994 struct ar2313_private
*sp
= dev
->priv
;
1000 if (ar2313_rx_int(dev
)) {
1001 tasklet_hi_schedule(&sp
->rx_tasklet
);
1004 unsigned long flags
;
1005 spin_lock_irqsave(&sp
->lock
, flags
);
1006 sp
->dma_regs
->intr_ena
|= DMA_STATUS_RI
;
1007 spin_unlock_irqrestore(&sp
->lock
, flags
);
1012 rx_schedule(struct net_device
*dev
)
1014 struct ar2313_private
*sp
= dev
->priv
;
1016 sp
->dma_regs
->intr_ena
&= ~DMA_STATUS_RI
;
1018 tasklet_hi_schedule(&sp
->rx_tasklet
);
1021 static irqreturn_t
ar2313_interrupt(int irq
, void *dev_id
)
1023 struct net_device
*dev
= (struct net_device
*)dev_id
;
1024 struct ar2313_private
*sp
= dev
->priv
;
1025 unsigned int status
, enabled
;
1027 /* clear interrupt */
1029 * Don't clear RI bit if currently disabled.
1031 status
= sp
->dma_regs
->status
;
1032 enabled
= sp
->dma_regs
->intr_ena
;
1033 sp
->dma_regs
->status
= status
& enabled
;
1035 if (status
& DMA_STATUS_NIS
) {
1038 * Don't schedule rx processing if interrupt
1039 * is already disabled.
1041 if (status
& enabled
& DMA_STATUS_RI
) {
1042 /* receive interrupt */
1045 if (status
& DMA_STATUS_TI
) {
1046 /* transmit interrupt */
1051 if (status
& DMA_STATUS_AIS
) {
1053 printk("%s: AIS set %08x & %x\n", __FUNCTION__
,
1054 status
, (DMA_STATUS_FBE
| DMA_STATUS_TPS
));
1056 /* abnormal status */
1057 if (status
& (DMA_STATUS_FBE
| DMA_STATUS_TPS
)) {
1058 ar2313_restart(dev
);
1065 static int ar2313_open(struct net_device
*dev
)
1067 struct ar2313_private
*sp
;
1072 netif_start_queue(dev
);
1074 sp
->eth_regs
->mac_control
|= MAC_CONTROL_RE
;
1079 static void ar2313_halt(struct net_device
*dev
)
1081 struct ar2313_private
*sp
= dev
->priv
;
1084 tasklet_disable(&sp
->rx_tasklet
);
1087 sp
->eth_regs
->mac_control
&= ~(MAC_CONTROL_RE
| /* disable Receives */
1088 MAC_CONTROL_TE
); /* disable Transmits */
1090 sp
->dma_regs
->control
= 0;
1091 sp
->dma_regs
->bus_mode
= DMA_BUS_MODE_SWR
;
1093 /* place phy and MAC in reset */
1094 *sp
->int_regs
|= (sp
->cfg
->reset_mac
| sp
->cfg
->reset_phy
);
1096 /* free buffers on tx ring */
1097 for (j
= 0; j
< AR2313_DESCR_ENTRIES
; j
++) {
1098 struct sk_buff
*skb
;
1099 ar2313_descr_t
*txdesc
;
1101 txdesc
= &sp
->tx_ring
[j
];
1104 skb
= sp
->tx_skb
[j
];
1107 sp
->tx_skb
[j
] = NULL
;
1113 * close should do nothing. Here's why. It's called when
1114 * 'ifconfig bond0 down' is run. If it calls free_irq then
1115 * the irq is gone forever ! When bond0 is made 'up' again,
1116 * the ar2313_open () does not call request_irq (). Worse,
1117 * the call to ar2313_halt() generates a WDOG reset due to
1118 * the write to 'sp->int_regs' and the box reboots.
1119 * Commenting this out is good since it allows the
1120 * system to resume when bond0 is made up again.
1122 static int ar2313_close(struct net_device
*dev
)
1126 * Disable interrupts
1128 disable_irq(dev
->irq
);
1131 * Without (or before) releasing irq and stopping hardware, this
1132 * is an absolute non-sense, by the way. It will be reset instantly
1135 netif_stop_queue(dev
);
1137 /* stop the MAC and DMA engines */
1140 /* release the interrupt */
1141 free_irq(dev
->irq
, dev
);
1147 static int ar2313_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1149 struct ar2313_private
*sp
= dev
->priv
;
1154 td
= &sp
->tx_ring
[idx
];
1156 if (td
->status
& DMA_TX_OWN
) {
1158 printk("%s: No space left to Tx\n", __FUNCTION__
);
1160 /* free skbuf and lie to the caller that we sent it out */
1161 sp
->stats
.tx_dropped
++;
1164 /* restart transmitter in case locked */
1165 sp
->dma_regs
->xmt_poll
= 0;
1169 /* Setup the transmit descriptor. */
1170 td
->devcs
= ((skb
->len
<< DMA_TX1_BSIZE_SHIFT
) |
1171 (DMA_TX1_LS
|DMA_TX1_IC
|DMA_TX1_CHAINED
));
1172 td
->addr
= dma_map_single(NULL
, skb
->data
, skb
->len
, DMA_TO_DEVICE
);
1173 td
->status
= DMA_TX_OWN
;
1175 /* kick transmitter last */
1176 sp
->dma_regs
->xmt_poll
= 0;
1179 printk("index %d\n", idx
);
1180 printk("TX status %08x\n", td
->status
);
1181 printk("TX devcs %08x\n", td
->devcs
);
1182 printk("TX addr %08x\n", td
->addr
);
1183 printk("TX descr %08x\n", td
->descr
);
1186 sp
->tx_skb
[idx
] = skb
;
1187 idx
= DSC_NEXT(idx
);
1193 static int netdev_get_ecmd(struct net_device
*dev
, struct ethtool_cmd
*ecmd
)
1195 struct ar2313_private
*np
= dev
->priv
;
1199 (SUPPORTED_10baseT_Half
| SUPPORTED_10baseT_Full
|
1200 SUPPORTED_100baseT_Half
| SUPPORTED_100baseT_Full
|
1201 SUPPORTED_Autoneg
| SUPPORTED_TP
| SUPPORTED_MII
);
1203 ecmd
->port
= PORT_TP
;
1204 /* only supports internal transceiver */
1205 ecmd
->transceiver
= XCVR_INTERNAL
;
1206 /* not sure what this is for */
1207 ecmd
->phy_address
= 1;
1209 ecmd
->advertising
= ADVERTISED_MII
;
1210 tmp
= armiiread(dev
, np
->phy
, MII_ADVERTISE
);
1211 if (tmp
& ADVERTISE_10HALF
)
1212 ecmd
->advertising
|= ADVERTISED_10baseT_Half
;
1213 if (tmp
& ADVERTISE_10FULL
)
1214 ecmd
->advertising
|= ADVERTISED_10baseT_Full
;
1215 if (tmp
& ADVERTISE_100HALF
)
1216 ecmd
->advertising
|= ADVERTISED_100baseT_Half
;
1217 if (tmp
& ADVERTISE_100FULL
)
1218 ecmd
->advertising
|= ADVERTISED_100baseT_Full
;
1220 tmp
= armiiread(dev
, np
->phy
, MII_BMCR
);
1221 if (tmp
& BMCR_ANENABLE
) {
1222 ecmd
->advertising
|= ADVERTISED_Autoneg
;
1223 ecmd
->autoneg
= AUTONEG_ENABLE
;
1225 ecmd
->autoneg
= AUTONEG_DISABLE
;
1228 if (ecmd
->autoneg
== AUTONEG_ENABLE
) {
1229 tmp
= armiiread(dev
, np
->phy
, MII_LPA
);
1230 if (tmp
& (LPA_100FULL
|LPA_10FULL
)) {
1231 ecmd
->duplex
= DUPLEX_FULL
;
1233 ecmd
->duplex
= DUPLEX_HALF
;
1235 if (tmp
& (LPA_100FULL
|LPA_100HALF
)) {
1236 ecmd
->speed
= SPEED_100
;
1238 ecmd
->speed
= SPEED_10
;
1241 if (tmp
& BMCR_FULLDPLX
) {
1242 ecmd
->duplex
= DUPLEX_FULL
;
1244 ecmd
->duplex
= DUPLEX_HALF
;
1246 if (tmp
& BMCR_SPEED100
) {
1247 ecmd
->speed
= SPEED_100
;
1249 ecmd
->speed
= SPEED_10
;
1253 /* ignore maxtxpkt, maxrxpkt for now */
1258 static int netdev_set_ecmd(struct net_device
*dev
, struct ethtool_cmd
*ecmd
)
1260 struct ar2313_private
*np
= dev
->priv
;
1263 if (ecmd
->speed
!= SPEED_10
&& ecmd
->speed
!= SPEED_100
)
1265 if (ecmd
->duplex
!= DUPLEX_HALF
&& ecmd
->duplex
!= DUPLEX_FULL
)
1267 if (ecmd
->port
!= PORT_TP
)
1269 if (ecmd
->transceiver
!= XCVR_INTERNAL
)
1271 if (ecmd
->autoneg
!= AUTONEG_DISABLE
&& ecmd
->autoneg
!= AUTONEG_ENABLE
)
1273 /* ignore phy_address, maxtxpkt, maxrxpkt for now */
1275 /* WHEW! now lets bang some bits */
1277 tmp
= armiiread(dev
, np
->phy
, MII_BMCR
);
1278 if (ecmd
->autoneg
== AUTONEG_ENABLE
) {
1279 /* turn on autonegotiation */
1280 tmp
|= BMCR_ANENABLE
;
1281 printk("%s: Enabling auto-neg\n", dev
->name
);
1283 /* turn off auto negotiation, set speed and duplexity */
1284 tmp
&= ~(BMCR_ANENABLE
| BMCR_SPEED100
| BMCR_FULLDPLX
);
1285 if (ecmd
->speed
== SPEED_100
)
1286 tmp
|= BMCR_SPEED100
;
1287 if (ecmd
->duplex
== DUPLEX_FULL
)
1288 tmp
|= BMCR_FULLDPLX
;
1289 printk("%s: Hard coding %d/%s\n", dev
->name
,
1290 (ecmd
->speed
== SPEED_100
)? 100:10,
1291 (ecmd
->duplex
== DUPLEX_FULL
)? "full":"half");
1293 armiiwrite(dev
, np
->phy
, MII_BMCR
, tmp
);
1298 static int netdev_ethtool_ioctl(struct net_device
*dev
, void *useraddr
)
1300 struct ar2313_private
*np
= dev
->priv
;
1303 if (get_user(cmd
, (u32
*)useraddr
))
1308 case ETHTOOL_GSET
: {
1309 struct ethtool_cmd ecmd
= { ETHTOOL_GSET
};
1310 spin_lock_irq(&np
->lock
);
1311 netdev_get_ecmd(dev
, &ecmd
);
1312 spin_unlock_irq(&np
->lock
);
1313 if (copy_to_user(useraddr
, &ecmd
, sizeof(ecmd
)))
1318 case ETHTOOL_SSET
: {
1319 struct ethtool_cmd ecmd
;
1321 if (copy_from_user(&ecmd
, useraddr
, sizeof(ecmd
)))
1323 spin_lock_irq(&np
->lock
);
1324 r
= netdev_set_ecmd(dev
, &ecmd
);
1325 spin_unlock_irq(&np
->lock
);
1328 /* restart autonegotiation */
1329 case ETHTOOL_NWAY_RST
: {
1332 /* if autoneg is off, it's an error */
1333 tmp
= armiiread(dev
, np
->phy
, MII_BMCR
);
1334 if (tmp
& BMCR_ANENABLE
) {
1335 tmp
|= (BMCR_ANRESTART
);
1336 armiiwrite(dev
, np
->phy
, MII_BMCR
, tmp
);
1341 /* get link status */
1342 case ETHTOOL_GLINK
: {
1343 struct ethtool_value edata
= {ETHTOOL_GLINK
};
1344 edata
.data
= (armiiread(dev
, np
->phy
, MII_BMSR
)&BMSR_LSTATUS
) ? 1:0;
1345 if (copy_to_user(useraddr
, &edata
, sizeof(edata
)))
1354 static int ar2313_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
1356 struct mii_ioctl_data
*data
= (struct mii_ioctl_data
*)&ifr
->ifr_data
;
1361 return netdev_ethtool_ioctl(dev
, (void *) ifr
->ifr_data
);
1363 case SIOCGMIIPHY
: /* Get address of MII PHY in use. */
1367 case SIOCGMIIREG
: /* Read MII PHY register. */
1368 data
->val_out
= armiiread(dev
, data
->phy_id
& 0x1f,
1369 data
->reg_num
& 0x1f);
1371 case SIOCSMIIREG
: /* Write MII PHY register. */
1372 if (!capable(CAP_NET_ADMIN
))
1374 armiiwrite(dev
, data
->phy_id
& 0x1f,
1375 data
->reg_num
& 0x1f, data
->val_in
);
1379 if (copy_from_user(dev
->dev_addr
, ifr
->ifr_data
, sizeof(dev
->dev_addr
)))
1384 if (copy_to_user(ifr
->ifr_data
, dev
->dev_addr
, sizeof(dev
->dev_addr
)))
1395 static struct net_device_stats
*ar2313_get_stats(struct net_device
*dev
)
1397 struct ar2313_private
*sp
= dev
->priv
;
1402 #define MII_ADDR(phy, reg) \
1403 ((reg << MII_ADDR_REG_SHIFT) | (phy << MII_ADDR_PHY_SHIFT))
1406 armiiread(struct net_device
*dev
, short phy
, short reg
)
1408 struct ar2313_private
*sp
= (struct ar2313_private
*)dev
->priv
;
1409 volatile ETHERNET_STRUCT
*ethernet
= sp
->phy_regs
;
1411 ethernet
->mii_addr
= MII_ADDR(phy
, reg
);
1412 while (ethernet
->mii_addr
& MII_ADDR_BUSY
);
1413 return (ethernet
->mii_data
>> MII_DATA_SHIFT
);
1417 armiiwrite(struct net_device
*dev
, short phy
, short reg
, short data
)
1419 struct ar2313_private
*sp
= (struct ar2313_private
*)dev
->priv
;
1420 volatile ETHERNET_STRUCT
*ethernet
= sp
->phy_regs
;
1422 while (ethernet
->mii_addr
& MII_ADDR_BUSY
);
1423 ethernet
->mii_data
= data
<< MII_DATA_SHIFT
;
1424 ethernet
->mii_addr
= MII_ADDR(phy
, reg
) | MII_ADDR_WRITE
;