2 * originally drivers/net/tulip_core.c
3 * Copyright 2000,2001 The Linux Kernel Team
4 * Written/copyright 1994-2001 by Donald Becker.
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
11 #define DRV_NAME "tulip"
12 #define DRV_VERSION "1.1.15-NAPI" /* Keep at least for test */
13 #define DRV_RELDATE "Feb 27, 2007"
17 static char version
[] __devinitdata
=
18 "ADM8668net driver version " DRV_VERSION
" (" DRV_RELDATE
")\n";
23 Set the bus performance register.
24 Typical: Set 16 longword cache alignment, no burst limit.
25 Cache alignment bits 15:14 Burst length 13:8
26 0000 No alignment 0x00000000 unlimited 0800 8 longwords
27 4000 8 longwords 0100 1 longword 1000 16 longwords
28 8000 16 longwords 0200 2 longwords 2000 32 longwords
29 C000 32 longwords 0400 4 longwords
30 Warning: many older 486 systems are broken and require setting 0x00A04800
31 8 longword cache alignment, 8 longword burst.
32 ToDo: Non-Intel setting could be better.
35 //static int csr0 = 0x00200000 | 0x4000;
38 /* Operational parameters that usually are not changed. */
39 /* Time in jiffies before concluding the transmitter is hung. */
40 #define TX_TIMEOUT (4*HZ)
42 MODULE_AUTHOR("Scott Nicholas <neutronscott@scottn.us>");
43 MODULE_DESCRIPTION("ADM8668 new ethernet driver.");
44 MODULE_LICENSE("GPL");
45 MODULE_VERSION(DRV_VERSION
);
48 int tulip_debug
= TULIP_DEBUG
;
53 static void tulip_tx_timeout(struct net_device
*dev
);
54 static void tulip_init_ring(struct net_device
*dev
);
55 static void tulip_free_ring(struct net_device
*dev
);
56 static netdev_tx_t
tulip_start_xmit(struct sk_buff
*skb
,
57 struct net_device
*dev
);
58 static int tulip_open(struct net_device
*dev
);
59 static int tulip_close(struct net_device
*dev
);
60 static void tulip_up(struct net_device
*dev
);
61 static void tulip_down(struct net_device
*dev
);
62 static struct net_device_stats
*tulip_get_stats(struct net_device
*dev
);
63 //static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
64 static void set_rx_mode(struct net_device
*dev
);
65 #ifdef CONFIG_NET_POLL_CONTROLLER
66 static void poll_tulip(struct net_device
*dev
);
69 static void tulip_up(struct net_device
*dev
)
71 struct tulip_private
*tp
= netdev_priv(dev
);
72 void __iomem
*ioaddr
= tp
->base_addr
;
74 napi_enable(&tp
->napi
);
76 /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
77 iowrite32(0x00000001, ioaddr
+ CSR0
);
80 Wait the specified 50 PCI cycles after a reset by initializing
81 Tx and Rx queues and the address filter list. */
82 iowrite32(tp
->csr0
, ioaddr
+ CSR0
);
85 printk(KERN_DEBUG
"%s: tulip_up(), irq==%d\n",
88 iowrite32(tp
->rx_ring_dma
, ioaddr
+ CSR3
);
89 iowrite32(tp
->tx_ring_dma
, ioaddr
+ CSR4
);
90 tp
->cur_rx
= tp
->cur_tx
= 0;
91 tp
->dirty_rx
= tp
->dirty_tx
= 0;
94 iowrite32(get_unaligned_le32(dev
->dev_addr
), ioaddr
+ 0xA4);
95 iowrite32(get_unaligned_le16(dev
->dev_addr
+ 4), ioaddr
+ 0xA8);
96 iowrite32(0, ioaddr
+ CSR27
);
97 iowrite32(0, ioaddr
+ CSR28
);
101 /* Enable automatic Tx underrun recovery. */
102 iowrite32(ioread32(ioaddr
+ CSR18
) | 1, ioaddr
+ CSR18
);
103 tp
->csr6
= 0x00040000;
105 /* Start the chip's Tx to process setup frame. */
109 iowrite32(tp
->csr6
| TxOn
, ioaddr
+ CSR6
);
111 /* Enable interrupts by setting the interrupt mask. */
112 iowrite32(VALID_INTR
, ioaddr
+ CSR5
);
113 iowrite32(VALID_INTR
, ioaddr
+ CSR7
);
114 tulip_start_rxtx(tp
);
115 iowrite32(0, ioaddr
+ CSR2
); /* Rx poll demand */
117 if (tulip_debug
> 2) {
118 printk(KERN_DEBUG
"%s: Done tulip_up(), CSR0 %08x, CSR5 %08x CSR6 %08x\n",
119 dev
->name
, ioread32(ioaddr
+ CSR0
),
120 ioread32(ioaddr
+ CSR5
),
121 ioread32(ioaddr
+ CSR6
));
124 init_timer(&tp
->oom_timer
);
125 tp
->oom_timer
.data
= (unsigned long)dev
;
126 tp
->oom_timer
.function
= oom_timer
;
130 tulip_open(struct net_device
*dev
)
134 tulip_init_ring (dev
);
136 retval
= request_irq(dev
->irq
, tulip_interrupt
, 0, dev
->name
, dev
);
142 netif_start_queue (dev
);
147 tulip_free_ring (dev
);
152 static void tulip_tx_timeout(struct net_device
*dev
)
154 struct tulip_private
*tp
= netdev_priv(dev
);
155 void __iomem
*ioaddr
= tp
->base_addr
;
158 spin_lock_irqsave (&tp
->lock
, flags
);
161 "Transmit timed out, status %08x, CSR12 %08x, resetting...\n",
162 ioread32(ioaddr
+ CSR5
), ioread32(ioaddr
+ CSR12
));
164 tulip_tx_timeout_complete(tp
, ioaddr
);
166 spin_unlock_irqrestore (&tp
->lock
, flags
);
167 dev
->trans_start
= jiffies
; /* prevent tx timeout */
168 netif_wake_queue (dev
);
172 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
173 static void tulip_init_ring(struct net_device
*dev
)
175 struct tulip_private
*tp
= netdev_priv(dev
);
180 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
181 tp
->rx_ring
[i
].status
= 0x00000000;
182 tp
->rx_ring
[i
].length
= cpu_to_le32(PKT_BUF_SZ
);
183 tp
->rx_ring
[i
].buffer2
= cpu_to_le32(tp
->rx_ring_dma
+ sizeof(struct tulip_rx_desc
) * (i
+ 1));
184 tp
->rx_buffers
[i
].skb
= NULL
;
185 tp
->rx_buffers
[i
].mapping
= 0;
187 /* Mark the last entry as wrapping the ring. */
188 tp
->rx_ring
[i
-1].length
= cpu_to_le32(PKT_BUF_SZ
| DESC_RING_WRAP
);
189 tp
->rx_ring
[i
-1].buffer2
= cpu_to_le32(tp
->rx_ring_dma
);
191 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
193 /* Note the receive buffer must be longword aligned.
194 dev_alloc_skb() provides 16 byte alignment. But do *not*
195 use skb_reserve() to align the IP header! */
196 struct sk_buff
*skb
= dev_alloc_skb(PKT_BUF_SZ
);
197 tp
->rx_buffers
[i
].skb
= skb
;
200 mapping
= dma_map_single(&dev
->dev
, skb
->data
,
201 PKT_BUF_SZ
, DMA_FROM_DEVICE
);
202 tp
->rx_buffers
[i
].mapping
= mapping
;
203 skb
->dev
= dev
; /* Mark as being used by this device. */
204 tp
->rx_ring
[i
].status
= cpu_to_le32(DescOwned
); /* Owned by Tulip chip */
205 tp
->rx_ring
[i
].buffer1
= cpu_to_le32(mapping
);
207 tp
->dirty_rx
= (unsigned int)(i
- RX_RING_SIZE
);
209 /* The Tx buffer descriptor is filled in as needed, but we
210 do need to clear the ownership bit. */
211 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
212 tp
->tx_buffers
[i
].skb
= NULL
;
213 tp
->tx_buffers
[i
].mapping
= 0;
214 tp
->tx_ring
[i
].status
= 0x00000000;
215 tp
->tx_ring
[i
].buffer2
= cpu_to_le32(tp
->tx_ring_dma
+ sizeof(struct tulip_tx_desc
) * (i
+ 1));
217 tp
->tx_ring
[i
-1].buffer2
= cpu_to_le32(tp
->tx_ring_dma
);
221 tulip_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
223 struct tulip_private
*tp
= netdev_priv(dev
);
229 spin_lock_irqsave(&tp
->lock
, flags
);
231 /* Calculate the next Tx descriptor entry. */
232 entry
= tp
->cur_tx
% TX_RING_SIZE
;
234 tp
->tx_buffers
[entry
].skb
= skb
;
235 mapping
= dma_map_single(&tp
->pdev
->dev
, skb
->data
, skb
->len
,
237 tp
->tx_buffers
[entry
].mapping
= mapping
;
238 tp
->tx_ring
[entry
].buffer1
= cpu_to_le32(mapping
);
240 if (tp
->cur_tx
- tp
->dirty_tx
< TX_RING_SIZE
/2) {/* Typical path */
241 flag
= 0x60000000; /* No interrupt */
242 } else if (tp
->cur_tx
- tp
->dirty_tx
== TX_RING_SIZE
/2) {
243 flag
= 0xe0000000; /* Tx-done intr. */
244 } else if (tp
->cur_tx
- tp
->dirty_tx
< TX_RING_SIZE
- 2) {
245 flag
= 0x60000000; /* No Tx-done intr. */
246 } else { /* Leave room for set_rx_mode() to fill entries. */
247 flag
= 0xe0000000; /* Tx-done intr. */
248 netif_stop_queue(dev
);
250 if (entry
== TX_RING_SIZE
-1)
251 flag
= 0xe0000000 | DESC_RING_WRAP
;
253 tp
->tx_ring
[entry
].length
= cpu_to_le32(skb
->len
| flag
);
254 /* if we were using Transmit Automatic Polling, we would need a
256 tp
->tx_ring
[entry
].status
= cpu_to_le32(DescOwned
);
261 /* Trigger an immediate transmit demand. */
262 iowrite32(0, tp
->base_addr
+ CSR1
);
264 spin_unlock_irqrestore(&tp
->lock
, flags
);
269 static void tulip_clean_tx_ring(struct tulip_private
*tp
)
271 unsigned int dirty_tx
;
273 for (dirty_tx
= tp
->dirty_tx
; tp
->cur_tx
- dirty_tx
> 0;
275 int entry
= dirty_tx
% TX_RING_SIZE
;
276 int status
= le32_to_cpu(tp
->tx_ring
[entry
].status
);
279 tp
->stats
.tx_errors
++; /* It wasn't Txed */
280 tp
->tx_ring
[entry
].status
= 0;
283 dma_unmap_single(&tp
->pdev
->dev
, tp
->tx_buffers
[entry
].mapping
,
284 tp
->tx_buffers
[entry
].skb
->len
,
287 /* Free the original skb. */
288 dev_kfree_skb_irq(tp
->tx_buffers
[entry
].skb
);
289 tp
->tx_buffers
[entry
].skb
= NULL
;
290 tp
->tx_buffers
[entry
].mapping
= 0;
294 static void tulip_down (struct net_device
*dev
)
296 struct tulip_private
*tp
= netdev_priv(dev
);
297 void __iomem
*ioaddr
= tp
->base_addr
;
300 napi_disable(&tp
->napi
);
301 del_timer_sync (&tp
->oom_timer
);
302 spin_lock_irqsave (&tp
->lock
, flags
);
304 /* Disable interrupts by clearing the interrupt mask. */
305 iowrite32 (0x00000000, ioaddr
+ CSR7
);
307 /* Stop the Tx and Rx processes. */
310 /* prepare receive buffers */
311 tulip_refill_rx(dev
);
313 /* release any unconsumed transmit buffers */
314 tulip_clean_tx_ring(tp
);
316 if (ioread32 (ioaddr
+ CSR6
) != 0xffffffff)
317 tp
->stats
.rx_missed_errors
+= ioread32 (ioaddr
+ CSR8
) & 0xffff;
319 spin_unlock_irqrestore (&tp
->lock
, flags
);
322 static void tulip_free_ring (struct net_device
*dev
)
324 struct tulip_private
*tp
= netdev_priv(dev
);
327 /* Free all the skbuffs in the Rx queue. */
328 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
329 struct sk_buff
*skb
= tp
->rx_buffers
[i
].skb
;
330 dma_addr_t mapping
= tp
->rx_buffers
[i
].mapping
;
332 tp
->rx_buffers
[i
].skb
= NULL
;
333 tp
->rx_buffers
[i
].mapping
= 0;
335 tp
->rx_ring
[i
].status
= 0; /* Not owned by Tulip chip. */
336 tp
->rx_ring
[i
].length
= 0;
337 /* An invalid address. */
338 tp
->rx_ring
[i
].buffer1
= cpu_to_le32(0xBADF00D0);
340 dma_unmap_single(&tp
->pdev
->dev
, mapping
, PKT_BUF_SZ
,
346 for (i
= 0; i
< TX_RING_SIZE
; i
++) {
347 struct sk_buff
*skb
= tp
->tx_buffers
[i
].skb
;
350 dma_unmap_single(&tp
->pdev
->dev
,
351 tp
->tx_buffers
[i
].mapping
, skb
->len
, DMA_TO_DEVICE
);
354 tp
->tx_buffers
[i
].skb
= NULL
;
355 tp
->tx_buffers
[i
].mapping
= 0;
359 static int tulip_close (struct net_device
*dev
)
361 struct tulip_private
*tp
= netdev_priv(dev
);
362 void __iomem
*ioaddr
= tp
->base_addr
;
364 netif_stop_queue (dev
);
369 dev_printk(KERN_DEBUG
, &dev
->dev
,
370 "Shutting down ethercard, status was %02x\n",
371 ioread32 (ioaddr
+ CSR5
));
373 free_irq (dev
->irq
, dev
);
375 tulip_free_ring (dev
);
380 static struct net_device_stats
*tulip_get_stats(struct net_device
*dev
)
382 struct tulip_private
*tp
= netdev_priv(dev
);
383 void __iomem
*ioaddr
= tp
->base_addr
;
385 if (netif_running(dev
)) {
388 spin_lock_irqsave (&tp
->lock
, flags
);
390 tp
->stats
.rx_missed_errors
+= ioread32(ioaddr
+ CSR8
) & 0xffff;
392 spin_unlock_irqrestore(&tp
->lock
, flags
);
399 static void tulip_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
401 strcpy(info
->driver
, DRV_NAME
);
402 strcpy(info
->version
, DRV_VERSION
);
403 strcpy(info
->bus_info
, "mmio");
406 static const struct ethtool_ops ops
= {
407 .get_drvinfo
= tulip_get_drvinfo
410 static void set_rx_mode(struct net_device
*dev
)
412 struct tulip_private
*tp
= netdev_priv(dev
);
413 void __iomem
*ioaddr
= tp
->base_addr
;
416 csr6
= ioread32(ioaddr
+ CSR6
) & ~0x00D5;
419 if (dev
->flags
& IFF_PROMISC
) { /* Set promiscuous. */
420 tp
->csr6
|= AcceptAllMulticast
| AcceptAllPhys
;
421 csr6
|= AcceptAllMulticast
| AcceptAllPhys
;
422 } else if ((netdev_mc_count(dev
) > 1000) ||
423 (dev
->flags
& IFF_ALLMULTI
)) {
424 /* Too many to filter well -- accept all multicasts. */
425 tp
->csr6
|= AcceptAllMulticast
;
426 csr6
|= AcceptAllMulticast
;
428 /* Some work-alikes have only a 64-entry hash filter table. */
429 /* Should verify correctness on big-endian/__powerpc__ */
430 struct netdev_hw_addr
*ha
;
431 if (netdev_mc_count(dev
) > 64) {
432 /* Arbitrary non-effective limit. */
433 tp
->csr6
|= AcceptAllMulticast
;
434 csr6
|= AcceptAllMulticast
;
436 u32 mc_filter
[2] = {0, 0}; /* Multicast hash filter */
438 netdev_for_each_mc_addr(ha
, dev
) {
439 filterbit
= ether_crc_le(ETH_ALEN
, ha
->addr
);
441 mc_filter
[filterbit
>> 5] |= 1 << (filterbit
& 31);
444 "Added filter for %pM %08x bit %d\n",
446 ether_crc(ETH_ALEN
, ha
->addr
),
449 if (mc_filter
[0] == tp
->mc_filter
[0] &&
450 mc_filter
[1] == tp
->mc_filter
[1])
452 iowrite32(mc_filter
[0], ioaddr
+ CSR27
);
453 iowrite32(mc_filter
[1], ioaddr
+ CSR28
);
454 tp
->mc_filter
[0] = mc_filter
[0];
455 tp
->mc_filter
[1] = mc_filter
[1];
459 if (dev
->irq
== ADM8668_LAN_IRQ
)
460 csr6
|= (1 << 9); /* force 100Mbps full duplex */
461 // csr6 |= 1; /* pad 2 bytes. vlan? */
463 iowrite32(csr6
, ioaddr
+ CSR6
);
466 static const struct net_device_ops tulip_netdev_ops
= {
467 .ndo_open
= tulip_open
,
468 .ndo_start_xmit
= tulip_start_xmit
,
469 .ndo_tx_timeout
= tulip_tx_timeout
,
470 .ndo_stop
= tulip_close
,
471 .ndo_get_stats
= tulip_get_stats
,
472 .ndo_set_rx_mode
= set_rx_mode
,
473 .ndo_change_mtu
= eth_change_mtu
,
474 .ndo_set_mac_address
= eth_mac_addr
,
475 .ndo_validate_addr
= eth_validate_addr
,
476 #ifdef CONFIG_NET_POLL_CONTROLLER
477 .ndo_poll_controller
= poll_tulip
,
481 static int __devinit
adm8668net_probe(struct platform_device
*pdev
)
483 struct tulip_private
*tp
;
484 struct net_device
*dev
;
485 struct resource
*res
;
486 void __iomem
*ioaddr
;
489 if (pdev
->id
< 0 || pdev
->id
>= MAX_UNITS
)
492 if (!(res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, 0)))
495 if (!(res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0)))
497 if (!(ioaddr
= ioremap(res
->start
, res
->end
- res
->start
)))
499 if (!(dev
= alloc_etherdev(sizeof (*tp
))))
503 dev
->base_addr
= (unsigned long)res
->start
;
505 SET_NETDEV_DEV(dev
, &pdev
->dev
);
507 /* tulip private struct */
508 tp
= netdev_priv(dev
);
510 tp
->base_addr
= ioaddr
;
513 tp
->rx_ring
= dma_alloc_coherent(&pdev
->dev
,
514 sizeof(struct tulip_rx_desc
) * RX_RING_SIZE
+
515 sizeof(struct tulip_tx_desc
) * TX_RING_SIZE
,
516 &tp
->rx_ring_dma
, GFP_KERNEL
);
519 tp
->tx_ring
= (struct tulip_tx_desc
*)(tp
->rx_ring
+ RX_RING_SIZE
);
520 tp
->tx_ring_dma
= tp
->rx_ring_dma
+ sizeof(struct tulip_rx_desc
) * RX_RING_SIZE
;
522 spin_lock_init(&tp
->lock
);
524 /* Stop the chip's Tx and Rx processes. */
527 /* Clear the missed-packet counter. */
528 ioread32(ioaddr
+ CSR8
);
530 /* Addresses are stored in BSP area of NOR flash */
531 if (irq
== ADM8668_WAN_IRQ
)
532 memcpy(dev
->dev_addr
, (char *)ADM8668_WAN_MACADDR
, 6);
534 memcpy(dev
->dev_addr
, (char *)ADM8668_LAN_MACADDR
, 6);
536 /* The Tulip-specific entries in the device structure. */
537 dev
->netdev_ops
= &tulip_netdev_ops
;
538 dev
->watchdog_timeo
= TX_TIMEOUT
;
539 netif_napi_add(dev
, &tp
->napi
, tulip_poll
, 16);
540 SET_ETHTOOL_OPS(dev
, &ops
);
542 if (register_netdev(dev
))
543 goto err_out_free_ring
;
546 "ADM8668net at MMIO %#lx %pM, IRQ %d\n",
547 (unsigned long)dev
->base_addr
, dev
->dev_addr
, irq
);
549 platform_set_drvdata(pdev
, dev
);
553 dma_free_coherent(&pdev
->dev
,
554 sizeof (struct tulip_rx_desc
) * RX_RING_SIZE
+
555 sizeof (struct tulip_tx_desc
) * TX_RING_SIZE
,
556 tp
->rx_ring
, tp
->rx_ring_dma
);
560 static int __devexit
adm8668net_remove(struct platform_device
*pdev
)
562 struct net_device
*dev
= platform_get_drvdata (pdev
);
563 struct tulip_private
*tp
;
568 tp
= netdev_priv(dev
);
569 unregister_netdev(dev
);
570 dma_free_coherent(&pdev
->dev
,
571 sizeof (struct tulip_rx_desc
) * RX_RING_SIZE
+
572 sizeof (struct tulip_tx_desc
) * TX_RING_SIZE
,
573 tp
->rx_ring
, tp
->rx_ring_dma
);
574 iounmap(tp
->base_addr
);
576 platform_set_drvdata(pdev
, NULL
);
580 #ifdef CONFIG_NET_POLL_CONTROLLER
582 * Polling 'interrupt' - used by things like netconsole to send skbs
583 * without having to re-enable interrupts. It's not called while
584 * the interrupt routine is executing.
587 static void poll_tulip (struct net_device
*dev
)
589 /* disable_irq here is not very nice, but with the lockless
590 interrupt handler we have no other choice. */
591 disable_irq(dev
->irq
);
592 tulip_interrupt(dev
->irq
, dev
);
593 enable_irq(dev
->irq
);
597 static struct platform_driver adm8668net_platform_driver
= {
598 .probe
= adm8668net_probe
,
599 .remove
= __devexit_p(adm8668net_remove
),
601 .owner
= THIS_MODULE
,
602 .name
= "adm8668_eth"
606 static int __init
adm8668net_init(void)
608 pr_info("%s", version
);
609 return platform_driver_register(&adm8668net_platform_driver
);
612 static void __exit
adm8668net_exit(void)
614 platform_driver_unregister(&adm8668net_platform_driver
);
617 module_init(adm8668net_init
);
618 module_exit(adm8668net_exit
);