more AR7 cleanups & fixes
[openwrt/openwrt.git] / target / linux / ar7 / files / drivers / net / cpmac.c
1 /*
2 * Copyright (C) 2006, 2007 OpenWrt.org
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/moduleparam.h>
22
23 #include <linux/sched.h>
24 #include <linux/kernel.h>
25 #include <linux/slab.h>
26 #include <linux/errno.h>
27 #include <linux/types.h>
28 #include <linux/delay.h>
29 #include <linux/version.h>
30
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/ethtool.h>
34 #include <linux/skbuff.h>
35 #include <linux/mii.h>
36 #include <linux/phy.h>
37 #include <linux/platform_device.h>
38 #include <asm/ar7/ar7.h>
39 #include <gpio.h>
40
41 MODULE_AUTHOR("Eugene Konev");
42 MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)");
43 MODULE_LICENSE("GPL");
44
45 static int rx_ring_size = 64;
46 static int disable_napi;
47 module_param(rx_ring_size, int, 64);
48 module_param(disable_napi, int, 0);
49 MODULE_PARM_DESC(rx_ring_size, "Size of rx ring (in skbs)");
50 MODULE_PARM_DESC(disable_napi, "Disable NAPI polling");
51
52 /* Register definitions */
53 struct cpmac_control_regs {
54 u32 revision;
55 u32 control;
56 u32 teardown;
57 u32 unused;
58 } __attribute__ ((packed));
59
60 struct cpmac_int_regs {
61 u32 stat_raw;
62 u32 stat_masked;
63 u32 enable;
64 u32 clear;
65 } __attribute__ ((packed));
66
67 struct cpmac_stats {
68 u32 good;
69 u32 bcast;
70 u32 mcast;
71 u32 pause;
72 u32 crc_error;
73 u32 align_error;
74 u32 oversized;
75 u32 jabber;
76 u32 undersized;
77 u32 fragment;
78 u32 filtered;
79 u32 qos_filtered;
80 u32 octets;
81 } __attribute__ ((packed));
82
83 struct cpmac_regs {
84 struct cpmac_control_regs tx_ctrl;
85 struct cpmac_control_regs rx_ctrl;
86 u32 unused1[56];
87 u32 mbp;
88 /* MBP bits */
89 #define MBP_RXPASSCRC 0x40000000
90 #define MBP_RXQOS 0x20000000
91 #define MBP_RXNOCHAIN 0x10000000
92 #define MBP_RXCMF 0x01000000
93 #define MBP_RXSHORT 0x00800000
94 #define MBP_RXCEF 0x00400000
95 #define MBP_RXPROMISC 0x00200000
96 #define MBP_PROMISCCHAN(chan) (((chan) & 0x7) << 16)
97 #define MBP_RXBCAST 0x00002000
98 #define MBP_BCASTCHAN(chan) (((chan) & 0x7) << 8)
99 #define MBP_RXMCAST 0x00000020
100 #define MBP_MCASTCHAN(chan) ((chan) & 0x7)
101 u32 unicast_enable;
102 u32 unicast_clear;
103 u32 max_len;
104 u32 buffer_offset;
105 u32 filter_flow_threshold;
106 u32 unused2[2];
107 u32 flow_thre[8];
108 u32 free_buffer[8];
109 u32 mac_control;
110 #define MAC_TXPTYPE 0x00000200
111 #define MAC_TXPACE 0x00000040
112 #define MAC_MII 0x00000020
113 #define MAC_TXFLOW 0x00000010
114 #define MAC_RXFLOW 0x00000008
115 #define MAC_MTEST 0x00000004
116 #define MAC_LOOPBACK 0x00000002
117 #define MAC_FDX 0x00000001
118 u32 mac_status;
119 #define MACST_QOS 0x4
120 #define MACST_RXFLOW 0x2
121 #define MACST_TXFLOW 0x1
122 u32 emc_control;
123 u32 unused3;
124 struct cpmac_int_regs tx_int;
125 u32 mac_int_vector;
126 /* Int Status bits */
127 #define INTST_STATUS 0x80000
128 #define INTST_HOST 0x40000
129 #define INTST_RX 0x20000
130 #define INTST_TX 0x10000
131 u32 mac_eoi_vector;
132 u32 unused4[2];
133 struct cpmac_int_regs rx_int;
134 u32 mac_int_stat_raw;
135 u32 mac_int_stat_masked;
136 u32 mac_int_enable;
137 u32 mac_int_clear;
138 u32 mac_addr_low[8];
139 u32 mac_addr_mid;
140 u32 mac_addr_high;
141 u32 mac_hash_low;
142 u32 mac_hash_high;
143 u32 boff_test;
144 u32 pac_test;
145 u32 rx_pause;
146 u32 tx_pause;
147 u32 unused5[2];
148 struct cpmac_stats rx_stats;
149 struct cpmac_stats tx_stats;
150 u32 unused6[232];
151 u32 tx_ptr[8];
152 u32 rx_ptr[8];
153 u32 tx_ack[8];
154 u32 rx_ack[8];
155
156 } __attribute__ ((packed));
157
158 struct cpmac_mdio_regs {
159 u32 version;
160 volatile u32 control;
161 #define MDIOC_IDLE 0x80000000
162 #define MDIOC_ENABLE 0x40000000
163 #define MDIOC_PREAMBLE 0x00100000
164 #define MDIOC_FAULT 0x00080000
165 #define MDIOC_FAULTDETECT 0x00040000
166 #define MDIOC_INTTEST 0x00020000
167 #define MDIOC_CLKDIV(div) ((div) & 0xff)
168 volatile u32 alive;
169 u32 link;
170 struct cpmac_int_regs link_int;
171 struct cpmac_int_regs user_int;
172 u32 unused[20];
173 volatile u32 access;
174 #define MDIO_BUSY 0x80000000
175 #define MDIO_WRITE 0x40000000
176 #define MDIO_REG(reg) (((reg) & 0x1f) << 21)
177 #define MDIO_PHY(phy) (((phy) & 0x1f) << 16)
178 #define MDIO_DATA(data) ((data) & 0xffff)
179 u32 physel;
180 } __attribute__ ((packed));
181
182 /* Descriptor */
183 struct cpmac_desc {
184 u32 hw_next;
185 u32 hw_data;
186 u16 buflen;
187 u16 bufflags;
188 u16 datalen;
189 u16 dataflags;
190 /* Flags bits */
191 #define CPMAC_SOP 0x8000
192 #define CPMAC_EOP 0x4000
193 #define CPMAC_OWN 0x2000
194 #define CPMAC_EOQ 0x1000
195 struct sk_buff *skb;
196 struct cpmac_desc *next;
197 } __attribute__ ((packed));
198
199 struct cpmac_priv {
200 struct net_device_stats stats;
201 spinlock_t lock; /* irq{save,restore} */
202 struct sk_buff *skb_pool;
203 int free_skbs;
204 struct cpmac_desc *rx_head;
205 int tx_head, tx_tail;
206 struct cpmac_desc *desc_ring;
207 struct cpmac_regs *regs;
208 struct mii_bus *mii_bus;
209 struct phy_device *phy;
210 char phy_name[BUS_ID_SIZE];
211 struct plat_cpmac_data *config;
212 int oldlink, oldspeed, oldduplex;
213 u32 msg_enable;
214 struct net_device *dev;
215 struct work_struct alloc_work;
216 };
217
218 static irqreturn_t cpmac_irq(int, void *);
219 static void cpmac_reset(struct net_device *dev);
220 static void cpmac_hw_init(struct net_device *dev);
221 static int cpmac_stop(struct net_device *dev);
222 static int cpmac_open(struct net_device *dev);
223
224 #undef CPMAC_DEBUG
225 #define CPMAC_LOW_THRESH 32
226 #define CPMAC_ALLOC_SIZE 64
227 #define CPMAC_SKB_SIZE 1518
228 #define CPMAC_TX_RING_SIZE 8
229
230 #ifdef CPMAC_DEBUG
231 static void cpmac_dump_regs(u32 *base, int count)
232 {
233 int i;
234 for (i = 0; i < (count + 3) / 4; i++) {
235 if (i % 4 == 0) printk(KERN_DEBUG "\nCPMAC[0x%04x]:", i * 4);
236 printk(KERN_DEBUG " 0x%08x", *(base + i));
237 }
238 printk(KERN_DEBUG "\n");
239 }
240
241 static const char *cpmac_dump_buf(const uint8_t *buf, unsigned size)
242 {
243 static char buffer[3 * 25 + 1];
244 char *p = &buffer[0];
245 if (size > 20)
246 size = 20;
247 while (size-- > 0)
248 p += sprintf(p, " %02x", *buf++);
249 return buffer;
250 }
251 #endif
252
253 static int cpmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
254 {
255 struct cpmac_mdio_regs *regs = bus->priv;
256 u32 val;
257
258 while ((val = regs->access) & MDIO_BUSY);
259 regs->access = MDIO_BUSY | MDIO_REG(regnum & 0x1f) |
260 MDIO_PHY(phy_id & 0x1f);
261 while ((val = regs->access) & MDIO_BUSY);
262
263 return val & 0xffff;
264 }
265
266 static int cpmac_mdio_write(struct mii_bus *bus, int phy_id,
267 int regnum, u16 val)
268 {
269 struct cpmac_mdio_regs *regs = bus->priv;
270 u32 tmp;
271
272 while ((tmp = regs->access) & MDIO_BUSY);
273 regs->access = MDIO_BUSY | MDIO_WRITE |
274 MDIO_REG(regnum & 0x1f) | MDIO_PHY(phy_id & 0x1f) |
275 val;
276
277 return 0;
278 }
279
280 static int cpmac_mdio_reset(struct mii_bus *bus)
281 {
282 struct cpmac_mdio_regs *regs = bus->priv;
283
284 ar7_device_reset(AR7_RESET_BIT_MDIO);
285 regs->control = MDIOC_ENABLE |
286 MDIOC_CLKDIV(ar7_cpmac_freq() / 2200000 - 1);
287
288 return 0;
289 }
290
291 static int mii_irqs[PHY_MAX_ADDR] = { PHY_POLL, };
292
293 static struct mii_bus cpmac_mii = {
294 .name = "cpmac-mii",
295 .read = cpmac_mdio_read,
296 .write = cpmac_mdio_write,
297 .reset = cpmac_mdio_reset,
298 .irq = mii_irqs,
299 };
300
301 static int cpmac_config(struct net_device *dev, struct ifmap *map)
302 {
303 if (dev->flags & IFF_UP)
304 return -EBUSY;
305
306 /* Don't allow changing the I/O address */
307 if (map->base_addr != dev->base_addr)
308 return -EOPNOTSUPP;
309
310 /* ignore other fields */
311 return 0;
312 }
313
314 static int cpmac_set_mac_address(struct net_device *dev, void *addr)
315 {
316 struct sockaddr *sa = addr;
317
318 if (dev->flags & IFF_UP)
319 return -EBUSY;
320
321 memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
322
323 return 0;
324 }
325
326 static void cpmac_set_multicast_list(struct net_device *dev)
327 {
328 struct dev_mc_list *iter;
329 int i;
330 int hash, tmp;
331 int hashlo = 0, hashhi = 0;
332 struct cpmac_priv *priv = netdev_priv(dev);
333
334 if (dev->flags & IFF_PROMISC) {
335 priv->regs->mbp &= ~MBP_PROMISCCHAN(0); /* promisc channel 0 */
336 priv->regs->mbp |= MBP_RXPROMISC;
337 } else {
338 priv->regs->mbp &= ~MBP_RXPROMISC;
339 if (dev->flags & IFF_ALLMULTI) {
340 /* enable all multicast mode */
341 priv->regs->mac_hash_low = 0xffffffff;
342 priv->regs->mac_hash_high = 0xffffffff;
343 } else {
344 for (i = 0, iter = dev->mc_list; i < dev->mc_count;
345 i++, iter = iter->next) {
346 hash = 0;
347 tmp = iter->dmi_addr[0];
348 hash ^= (tmp >> 2) ^ (tmp << 4);
349 tmp = iter->dmi_addr[1];
350 hash ^= (tmp >> 4) ^ (tmp << 2);
351 tmp = iter->dmi_addr[2];
352 hash ^= (tmp >> 6) ^ tmp;
353 tmp = iter->dmi_addr[4];
354 hash ^= (tmp >> 2) ^ (tmp << 4);
355 tmp = iter->dmi_addr[5];
356 hash ^= (tmp >> 4) ^ (tmp << 2);
357 tmp = iter->dmi_addr[6];
358 hash ^= (tmp >> 6) ^ tmp;
359 hash &= 0x3f;
360 if (hash < 32) {
361 hashlo |= 1<<hash;
362 } else {
363 hashhi |= 1<<(hash - 32);
364 }
365 }
366
367 priv->regs->mac_hash_low = hashlo;
368 priv->regs->mac_hash_high = hashhi;
369 }
370 }
371 }
372
373 static struct sk_buff *cpmac_get_skb(struct net_device *dev)
374 {
375 struct sk_buff *skb;
376 struct cpmac_priv *priv = netdev_priv(dev);
377
378 skb = priv->skb_pool;
379 if (likely(skb))
380 priv->skb_pool = skb->next;
381 else {
382 skb = dev_alloc_skb(CPMAC_SKB_SIZE + 2);
383 if (skb) {
384 skb->next = NULL;
385 skb_reserve(skb, 2);
386 skb->dev = priv->dev;
387 }
388 }
389
390 if (likely(priv->free_skbs))
391 priv->free_skbs--;
392
393 if (priv->free_skbs < CPMAC_LOW_THRESH)
394 schedule_work(&priv->alloc_work);
395
396 return skb;
397 }
398
399 static struct sk_buff *cpmac_rx_one(struct net_device *dev,
400 struct cpmac_priv *priv,
401 struct cpmac_desc *desc)
402 {
403 unsigned long flags;
404 char *data;
405 struct sk_buff *skb, *result = NULL;
406
407 priv->regs->rx_ack[0] = virt_to_phys(desc);
408 if (unlikely(!desc->datalen)) {
409 if (printk_ratelimit())
410 printk(KERN_WARNING "%s: rx: spurious interrupt\n",
411 dev->name);
412 priv->stats.rx_errors++;
413 return NULL;
414 }
415
416 spin_lock_irqsave(&priv->lock, flags);
417 skb = cpmac_get_skb(dev);
418 if (likely(skb)) {
419 data = (char *)phys_to_virt(desc->hw_data);
420 dma_cache_inv((u32)data, desc->datalen);
421 skb_put(desc->skb, desc->datalen);
422 desc->skb->protocol = eth_type_trans(desc->skb, dev);
423 desc->skb->ip_summed = CHECKSUM_NONE;
424 priv->stats.rx_packets++;
425 priv->stats.rx_bytes += desc->datalen;
426 result = desc->skb;
427 desc->skb = skb;
428 } else {
429 #ifdef CPMAC_DEBUG
430 if (printk_ratelimit())
431 printk(KERN_NOTICE "%s: low on skbs, dropping packet\n",
432 dev->name);
433 #endif
434 priv->stats.rx_dropped++;
435 }
436 spin_unlock_irqrestore(&priv->lock, flags);
437
438 desc->hw_data = virt_to_phys(desc->skb->data);
439 desc->buflen = CPMAC_SKB_SIZE;
440 desc->dataflags = CPMAC_OWN;
441 dma_cache_wback((u32)desc, 16);
442
443 return result;
444 }
445
446 static void cpmac_rx(struct net_device *dev)
447 {
448 struct sk_buff *skb;
449 struct cpmac_desc *desc;
450 struct cpmac_priv *priv = netdev_priv(dev);
451
452 spin_lock(&priv->lock);
453 if (unlikely(!priv->rx_head)) {
454 spin_unlock(&priv->lock);
455 return;
456 }
457
458 desc = priv->rx_head;
459 dma_cache_inv((u32)desc, 16);
460 #ifdef CPMAC_DEBUG
461 printk(KERN_DEBUG "%s: len=%d, %s\n", __func__, pkt->datalen,
462 cpmac_dump_buf(data, pkt->datalen));
463 #endif
464
465 while ((desc->dataflags & CPMAC_OWN) == 0) {
466 skb = cpmac_rx_one(dev, priv, desc);
467 if (likely(skb))
468 netif_rx(skb);
469 desc = desc->next;
470 dma_cache_inv((u32)desc, 16);
471 }
472
473 priv->rx_head = desc;
474 priv->regs->rx_ptr[0] = virt_to_phys(desc);
475 spin_unlock(&priv->lock);
476 }
477
478 static int cpmac_poll(struct net_device *dev, int *budget)
479 {
480 struct sk_buff *skb;
481 struct cpmac_desc *desc;
482 int received = 0, quota = min(dev->quota, *budget);
483 struct cpmac_priv *priv = netdev_priv(dev);
484
485 if (unlikely(!priv->rx_head)) {
486 if (printk_ratelimit())
487 printk(KERN_NOTICE "%s: rx: polling, but no queue\n",
488 dev->name);
489 netif_rx_complete(dev);
490 return 0;
491 }
492
493 desc = priv->rx_head;
494 dma_cache_inv((u32)desc, 16);
495
496 while ((received < quota) && ((desc->dataflags & CPMAC_OWN) == 0)) {
497 skb = cpmac_rx_one(dev, priv, desc);
498 if (likely(skb)) {
499 netif_receive_skb(skb);
500 received++;
501 }
502 desc = desc->next;
503 priv->rx_head = desc;
504 dma_cache_inv((u32)desc, 16);
505 }
506
507 *budget -= received;
508 dev->quota -= received;
509 #ifdef CPMAC_DEBUG
510 printk(KERN_DEBUG "%s: processed %d packets\n", dev->name, received);
511 #endif
512 if (desc->dataflags & CPMAC_OWN) {
513 priv->regs->rx_ptr[0] = virt_to_phys(desc);
514 netif_rx_complete(dev);
515 priv->regs->rx_int.enable = 0x1;
516 priv->regs->rx_int.clear = 0xfe;
517 return 0;
518 }
519
520 return 1;
521 }
522
523 static void
524 cpmac_alloc_skbs(struct work_struct *work)
525 {
526 struct cpmac_priv *priv = container_of(work, struct cpmac_priv,
527 alloc_work);
528 unsigned long flags;
529 int i, num_skbs = 0;
530 struct sk_buff *skb, *skbs = NULL;
531
532 for (i = 0; i < CPMAC_ALLOC_SIZE; i++) {
533 skb = alloc_skb(CPMAC_SKB_SIZE + 2, GFP_KERNEL);
534 if (!skb)
535 break;
536 skb->next = skbs;
537 skb_reserve(skb, 2);
538 skb->dev = priv->dev;
539 num_skbs++;
540 skbs = skb;
541 }
542
543 if (skbs) {
544 spin_lock_irqsave(&priv->lock, flags);
545 for (skb = priv->skb_pool; skb && skb->next; skb = skb->next);
546 if (!skb)
547 priv->skb_pool = skbs;
548 else
549 skb->next = skbs;
550 priv->free_skbs += num_skbs;
551 spin_unlock_irqrestore(&priv->lock, flags);
552 #ifdef CPMAC_DEBUG
553 printk(KERN_DEBUG "%s: allocated %d skbs\n",
554 priv->dev->name, num_skbs);
555 #endif
556 }
557 }
558
559 static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
560 {
561 unsigned long flags;
562 int len, chan;
563 struct cpmac_desc *desc;
564 struct cpmac_priv *priv = netdev_priv(dev);
565
566 len = skb->len;
567 #ifdef CPMAC_DEBUG
568 printk(KERN_DEBUG "%s: len=%d\n", __func__, len);
569 /* cpmac_dump_buf(const uint8_t * buf, unsigned size) */
570 #endif
571 if (unlikely(len < ETH_ZLEN)) {
572 if (unlikely(skb_padto(skb, ETH_ZLEN))) {
573 if (printk_ratelimit())
574 printk(KERN_NOTICE
575 "%s: padding failed, dropping\n",
576 dev->name);
577 spin_lock_irqsave(&priv->lock, flags);
578 priv->stats.tx_dropped++;
579 spin_unlock_irqrestore(&priv->lock, flags);
580 return -ENOMEM;
581 }
582 len = ETH_ZLEN;
583 }
584 spin_lock_irqsave(&priv->lock, flags);
585 chan = priv->tx_tail++;
586 priv->tx_tail %= 8;
587 if (priv->tx_tail == priv->tx_head)
588 netif_stop_queue(dev);
589
590 desc = &priv->desc_ring[chan];
591 dma_cache_inv((u32)desc, 16);
592 if (desc->dataflags & CPMAC_OWN) {
593 printk(KERN_NOTICE "%s: tx dma ring full, dropping\n",
594 dev->name);
595 priv->stats.tx_dropped++;
596 spin_unlock_irqrestore(&priv->lock, flags);
597 return -ENOMEM;
598 }
599
600 dev->trans_start = jiffies;
601 desc->dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN;
602 desc->skb = skb;
603 desc->hw_data = virt_to_phys(skb->data);
604 dma_cache_wback((u32)skb->data, len);
605 desc->buflen = len;
606 desc->datalen = len;
607 desc->hw_next = 0;
608 dma_cache_wback((u32)desc, 16);
609 priv->regs->tx_ptr[chan] = virt_to_phys(desc);
610 spin_unlock_irqrestore(&priv->lock, flags);
611
612 return 0;
613 }
614
615 static void cpmac_end_xmit(struct net_device *dev, int channel)
616 {
617 struct cpmac_desc *desc;
618 struct cpmac_priv *priv = netdev_priv(dev);
619
620 spin_lock(&priv->lock);
621 desc = &priv->desc_ring[channel];
622 priv->regs->tx_ack[channel] = virt_to_phys(desc);
623 if (likely(desc->skb)) {
624 priv->stats.tx_packets++;
625 priv->stats.tx_bytes += desc->skb->len;
626 dev_kfree_skb_irq(desc->skb);
627 if (netif_queue_stopped(dev))
628 netif_wake_queue(dev);
629 } else
630 if (printk_ratelimit())
631 printk(KERN_NOTICE "%s: end_xmit: spurious interrupt\n",
632 dev->name);
633 spin_unlock(&priv->lock);
634 }
635
636 static void cpmac_reset(struct net_device *dev)
637 {
638 int i;
639 struct cpmac_priv *priv = netdev_priv(dev);
640
641 ar7_device_reset(priv->config->reset_bit);
642 priv->regs->rx_ctrl.control &= ~1;
643 priv->regs->tx_ctrl.control &= ~1;
644 for (i = 0; i < 8; i++) {
645 priv->regs->tx_ptr[i] = 0;
646 priv->regs->rx_ptr[i] = 0;
647 }
648 priv->regs->mac_control &= ~MAC_MII; /* disable mii */
649 }
650
651 static inline void cpmac_free_rx_ring(struct net_device *dev)
652 {
653 struct cpmac_desc *desc;
654 int i;
655 struct cpmac_priv *priv = netdev_priv(dev);
656
657 if (unlikely(!priv->rx_head))
658 return;
659
660 desc = priv->rx_head;
661 dma_cache_inv((u32)desc, 16);
662
663 for (i = 0; i < rx_ring_size; i++) {
664 desc->buflen = CPMAC_SKB_SIZE;
665 if ((desc->dataflags & CPMAC_OWN) == 0) {
666 desc->dataflags = CPMAC_OWN;
667 priv->stats.rx_dropped++;
668 }
669 dma_cache_wback((u32)desc, 16);
670 desc = desc->next;
671 dma_cache_inv((u32)desc, 16);
672 }
673 }
674
675 static irqreturn_t cpmac_irq(int irq, void *dev_id)
676 {
677 struct net_device *dev = dev_id;
678 struct cpmac_priv *priv = netdev_priv(dev);
679 u32 status;
680
681 if (!dev)
682 return IRQ_NONE;
683
684 status = priv->regs->mac_int_vector;
685
686 if (status & INTST_TX)
687 cpmac_end_xmit(dev, (status & 7));
688
689 if (status & INTST_RX) {
690 if (disable_napi)
691 cpmac_rx(dev);
692 else {
693 priv->regs->rx_int.enable = 0;
694 priv->regs->rx_int.clear = 0xff;
695 netif_rx_schedule(dev);
696 }
697 }
698
699 priv->regs->mac_eoi_vector = 0;
700
701 if (unlikely(status & (INTST_HOST | INTST_STATUS))) {
702 if (printk_ratelimit())
703 printk(KERN_ERR "%s: hw error, resetting...\n",
704 dev->name);
705 spin_lock(&priv->lock);
706 phy_stop(priv->phy);
707 cpmac_reset(dev);
708 cpmac_free_rx_ring(dev);
709 cpmac_hw_init(dev);
710 spin_unlock(&priv->lock);
711 }
712
713 return IRQ_HANDLED;
714 }
715
716 static void cpmac_tx_timeout(struct net_device *dev)
717 {
718 struct cpmac_priv *priv = netdev_priv(dev);
719 struct cpmac_desc *desc;
720
721 priv->stats.tx_errors++;
722 desc = &priv->desc_ring[priv->tx_head++];
723 priv->tx_head %= 8;
724 printk(KERN_NOTICE "%s: transmit timeout\n", dev->name);
725 if (desc->skb)
726 dev_kfree_skb(desc->skb);
727 netif_wake_queue(dev);
728 }
729
730 static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
731 {
732 struct cpmac_priv *priv = netdev_priv(dev);
733 if (!(netif_running(dev)))
734 return -EINVAL;
735 if (!priv->phy)
736 return -EINVAL;
737 if ((cmd == SIOCGMIIPHY) || (cmd == SIOCGMIIREG) ||
738 (cmd == SIOCSMIIREG))
739 return phy_mii_ioctl(priv->phy, if_mii(ifr), cmd);
740
741 return -EINVAL;
742 }
743
744 static int cpmac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
745 {
746 struct cpmac_priv *priv = netdev_priv(dev);
747
748 if (priv->phy)
749 return phy_ethtool_gset(priv->phy, cmd);
750
751 return -EINVAL;
752 }
753
754 static int cpmac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
755 {
756 struct cpmac_priv *priv = netdev_priv(dev);
757
758 if (!capable(CAP_NET_ADMIN))
759 return -EPERM;
760
761 if (priv->phy)
762 return phy_ethtool_sset(priv->phy, cmd);
763
764 return -EINVAL;
765 }
766
767 static void cpmac_get_drvinfo(struct net_device *dev,
768 struct ethtool_drvinfo *info)
769 {
770 strcpy(info->driver, "cpmac");
771 strcpy(info->version, "0.0.3");
772 info->fw_version[0] = '\0';
773 sprintf(info->bus_info, "%s", "cpmac");
774 info->regdump_len = 0;
775 }
776
777 static const struct ethtool_ops cpmac_ethtool_ops = {
778 .get_settings = cpmac_get_settings,
779 .set_settings = cpmac_set_settings,
780 .get_drvinfo = cpmac_get_drvinfo,
781 .get_link = ethtool_op_get_link,
782 };
783
784 static struct net_device_stats *cpmac_stats(struct net_device *dev)
785 {
786 struct cpmac_priv *priv = netdev_priv(dev);
787
788 if (netif_device_present(dev))
789 return &priv->stats;
790
791 return NULL;
792 }
793
794 static int cpmac_change_mtu(struct net_device *dev, int mtu)
795 {
796 unsigned long flags;
797 struct cpmac_priv *priv = netdev_priv(dev);
798 spinlock_t *lock = &priv->lock;
799
800 if ((mtu < 68) || (mtu > 1500))
801 return -EINVAL;
802
803 spin_lock_irqsave(lock, flags);
804 dev->mtu = mtu;
805 spin_unlock_irqrestore(lock, flags);
806
807 return 0;
808 }
809
810 static void cpmac_adjust_link(struct net_device *dev)
811 {
812 struct cpmac_priv *priv = netdev_priv(dev);
813 unsigned long flags;
814 int new_state = 0;
815
816 spin_lock_irqsave(&priv->lock, flags);
817 if (priv->phy->link) {
818 if (priv->phy->duplex != priv->oldduplex) {
819 new_state = 1;
820 priv->oldduplex = priv->phy->duplex;
821 }
822
823 if (priv->phy->speed != priv->oldspeed) {
824 new_state = 1;
825 priv->oldspeed = priv->phy->speed;
826 }
827
828 if (!priv->oldlink) {
829 new_state = 1;
830 priv->oldlink = 1;
831 netif_schedule(dev);
832 }
833 } else if (priv->oldlink) {
834 new_state = 1;
835 priv->oldlink = 0;
836 priv->oldspeed = 0;
837 priv->oldduplex = -1;
838 }
839
840 if (new_state)
841 phy_print_status(priv->phy);
842
843 spin_unlock_irqrestore(&priv->lock, flags);
844 }
845
846 static void cpmac_hw_init(struct net_device *dev)
847 {
848 int i;
849 struct cpmac_priv *priv = netdev_priv(dev);
850
851 for (i = 0; i < 8; i++)
852 priv->regs->tx_ptr[i] = 0;
853 priv->regs->rx_ptr[0] = virt_to_phys(priv->rx_head);
854
855 priv->regs->mbp = MBP_RXSHORT | MBP_RXBCAST | MBP_RXMCAST;
856 priv->regs->unicast_enable = 0x1;
857 priv->regs->unicast_clear = 0xfe;
858 priv->regs->buffer_offset = 0;
859 for (i = 0; i < 8; i++)
860 priv->regs->mac_addr_low[i] = dev->dev_addr[5];
861 priv->regs->mac_addr_mid = dev->dev_addr[4];
862 priv->regs->mac_addr_high = dev->dev_addr[0] | (dev->dev_addr[1] << 8)
863 | (dev->dev_addr[2] << 16) | (dev->dev_addr[3] << 24);
864 priv->regs->max_len = CPMAC_SKB_SIZE;
865 priv->regs->rx_int.enable = 0x1;
866 priv->regs->rx_int.clear = 0xfe;
867 priv->regs->tx_int.enable = 0xff;
868 priv->regs->tx_int.clear = 0;
869 priv->regs->mac_int_enable = 3;
870 priv->regs->mac_int_clear = 0xfc;
871
872 priv->regs->rx_ctrl.control |= 1;
873 priv->regs->tx_ctrl.control |= 1;
874 priv->regs->mac_control |= MAC_MII | MAC_FDX;
875
876 priv->phy->state = PHY_CHANGELINK;
877 phy_start(priv->phy);
878 }
879
880 static int cpmac_open(struct net_device *dev)
881 {
882 int i, size, res;
883 struct cpmac_priv *priv = netdev_priv(dev);
884 struct cpmac_desc *desc;
885 struct sk_buff *skb;
886
887 priv->phy = phy_connect(dev, priv->phy_name, &cpmac_adjust_link,
888 0, PHY_INTERFACE_MODE_MII);
889 if (IS_ERR(priv->phy)) {
890 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
891 return PTR_ERR(priv->phy);
892 }
893
894 if (!request_mem_region(dev->mem_start, dev->mem_end -
895 dev->mem_start, dev->name)) {
896 printk(KERN_ERR "%s: failed to request registers\n",
897 dev->name);
898 res = -ENXIO;
899 goto fail_reserve;
900 }
901
902 priv->regs = ioremap_nocache(dev->mem_start, dev->mem_end -
903 dev->mem_start);
904 if (!priv->regs) {
905 printk(KERN_ERR "%s: failed to remap registers\n", dev->name);
906 res = -ENXIO;
907 goto fail_remap;
908 }
909
910 priv->rx_head = NULL;
911 size = sizeof(struct cpmac_desc) * (rx_ring_size +
912 CPMAC_TX_RING_SIZE);
913 priv->desc_ring = (struct cpmac_desc *)kmalloc(size, GFP_KERNEL);
914 if (!priv->desc_ring) {
915 res = -ENOMEM;
916 goto fail_alloc;
917 }
918
919 memset((char *)priv->desc_ring, 0, size);
920
921 priv->skb_pool = NULL;
922 priv->free_skbs = 0;
923 priv->rx_head = &priv->desc_ring[CPMAC_TX_RING_SIZE];
924
925 INIT_WORK(&priv->alloc_work, cpmac_alloc_skbs);
926 schedule_work(&priv->alloc_work);
927 flush_scheduled_work();
928
929 for (i = 0; i < rx_ring_size; i++) {
930 desc = &priv->rx_head[i];
931 skb = cpmac_get_skb(dev);
932 if (!skb) {
933 res = -ENOMEM;
934 goto fail_desc;
935 }
936 desc->skb = skb;
937 desc->hw_data = virt_to_phys(skb->data);
938 desc->buflen = CPMAC_SKB_SIZE;
939 desc->dataflags = CPMAC_OWN;
940 desc->next = &priv->rx_head[(i + 1) % rx_ring_size];
941 desc->hw_next = virt_to_phys(desc->next);
942 dma_cache_wback((u32)desc, 16);
943 }
944
945 if ((res = request_irq(dev->irq, cpmac_irq, SA_INTERRUPT,
946 dev->name, dev))) {
947 printk(KERN_ERR "%s: failed to obtain irq\n", dev->name);
948 goto fail_irq;
949 }
950
951 cpmac_reset(dev);
952 cpmac_hw_init(dev);
953
954 netif_start_queue(dev);
955 return 0;
956
957 fail_irq:
958 fail_desc:
959 for (i = 0; i < rx_ring_size; i++)
960 if (priv->rx_head[i].skb)
961 kfree_skb(priv->rx_head[i].skb);
962 fail_alloc:
963 kfree(priv->desc_ring);
964
965 for (skb = priv->skb_pool; skb; skb = priv->skb_pool) {
966 priv->skb_pool = skb->next;
967 kfree_skb(skb);
968 }
969
970 iounmap(priv->regs);
971
972 fail_remap:
973 release_mem_region(dev->mem_start, dev->mem_end -
974 dev->mem_start);
975
976 fail_reserve:
977 phy_disconnect(priv->phy);
978
979 return res;
980 }
981
982 static int cpmac_stop(struct net_device *dev)
983 {
984 int i;
985 struct sk_buff *skb;
986 struct cpmac_priv *priv = netdev_priv(dev);
987
988 netif_stop_queue(dev);
989
990 phy_stop(priv->phy);
991 phy_disconnect(priv->phy);
992 priv->phy = NULL;
993
994 cpmac_reset(dev);
995
996 for (i = 0; i < 8; i++) {
997 priv->regs->rx_ptr[i] = 0;
998 priv->regs->tx_ptr[i] = 0;
999 priv->regs->mbp = 0;
1000 }
1001
1002 free_irq(dev->irq, dev);
1003 release_mem_region(dev->mem_start, dev->mem_end -
1004 dev->mem_start);
1005
1006 cancel_delayed_work(&priv->alloc_work);
1007 flush_scheduled_work();
1008
1009 priv->rx_head = &priv->desc_ring[CPMAC_TX_RING_SIZE];
1010 for (i = 0; i < rx_ring_size; i++)
1011 if (priv->rx_head[i].skb)
1012 kfree_skb(priv->rx_head[i].skb);
1013
1014 kfree(priv->desc_ring);
1015
1016 for (skb = priv->skb_pool; skb; skb = priv->skb_pool) {
1017 priv->skb_pool = skb->next;
1018 kfree_skb(skb);
1019 }
1020
1021 return 0;
1022 }
1023
1024 static int external_switch;
1025
1026 static int __devinit cpmac_probe(struct platform_device *pdev)
1027 {
1028 int i, rc, phy_id;
1029 struct resource *res;
1030 struct cpmac_priv *priv;
1031 struct net_device *dev;
1032 struct plat_cpmac_data *pdata;
1033
1034 pdata = pdev->dev.platform_data;
1035
1036 for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
1037 if (!(pdata->phy_mask & (1 << phy_id)))
1038 continue;
1039 if (!cpmac_mii.phy_map[phy_id])
1040 continue;
1041 break;
1042 }
1043
1044 if (phy_id == PHY_MAX_ADDR) {
1045 if (external_switch)
1046 phy_id = 0;
1047 else {
1048 printk(KERN_ERR "cpmac: no PHY present\n");
1049 return -ENODEV;
1050 }
1051 }
1052
1053 dev = alloc_etherdev(sizeof(struct cpmac_priv));
1054
1055 if (!dev) {
1056 printk(KERN_ERR
1057 "cpmac: Unable to allocate net_device structure!\n");
1058 return -ENOMEM;
1059 }
1060
1061 SET_MODULE_OWNER(dev);
1062 platform_set_drvdata(pdev, dev);
1063 priv = netdev_priv(dev);
1064
1065 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
1066 if (!res) {
1067 rc = -ENODEV;
1068 goto fail;
1069 }
1070
1071 dev->mem_start = res->start;
1072 dev->mem_end = res->end;
1073 dev->irq = platform_get_irq_byname(pdev, "irq");
1074
1075 dev->mtu = 1500;
1076 dev->open = cpmac_open;
1077 dev->stop = cpmac_stop;
1078 dev->set_config = cpmac_config;
1079 dev->hard_start_xmit = cpmac_start_xmit;
1080 dev->do_ioctl = cpmac_ioctl;
1081 dev->get_stats = cpmac_stats;
1082 dev->change_mtu = cpmac_change_mtu;
1083 dev->set_mac_address = cpmac_set_mac_address;
1084 dev->set_multicast_list = cpmac_set_multicast_list;
1085 dev->tx_timeout = cpmac_tx_timeout;
1086 dev->ethtool_ops = &cpmac_ethtool_ops;
1087 if (!disable_napi) {
1088 dev->poll = cpmac_poll;
1089 dev->weight = min(rx_ring_size, 64);
1090 }
1091
1092 spin_lock_init(&priv->lock);
1093 priv->msg_enable = netif_msg_init(NETIF_MSG_WOL, 0x3fff);
1094 priv->config = pdata;
1095 priv->dev = dev;
1096 memcpy(dev->dev_addr, priv->config->dev_addr, sizeof(dev->dev_addr));
1097 if (phy_id == 31)
1098 snprintf(priv->phy_name, BUS_ID_SIZE, PHY_ID_FMT,
1099 cpmac_mii.id, phy_id);
1100 else
1101 snprintf(priv->phy_name, BUS_ID_SIZE, "fixed@%d:%d", 100, 1);
1102
1103 if ((rc = register_netdev(dev))) {
1104 printk(KERN_ERR "cpmac: error %i registering device %s\n",
1105 rc, dev->name);
1106 goto fail;
1107 }
1108
1109 printk(KERN_INFO "cpmac: device %s (regs: %p, irq: %d, phy: %s, mac: ",
1110 dev->name, (u32 *)dev->mem_start, dev->irq,
1111 priv->phy_name);
1112 for (i = 0; i < 6; i++)
1113 printk("%02x%s", dev->dev_addr[i], i < 5 ? ":" : ")\n");
1114
1115 return 0;
1116
1117 fail:
1118 free_netdev(dev);
1119 return rc;
1120 }
1121
1122 static int __devexit cpmac_remove(struct platform_device *pdev)
1123 {
1124 struct net_device *dev = platform_get_drvdata(pdev);
1125 unregister_netdev(dev);
1126 free_netdev(dev);
1127 return 0;
1128 }
1129
1130 static struct platform_driver cpmac_driver = {
1131 .driver.name = "cpmac",
1132 .probe = cpmac_probe,
1133 .remove = cpmac_remove,
1134 };
1135
1136 int __devinit cpmac_init(void)
1137 {
1138 u32 mask;
1139 int i, res;
1140 cpmac_mii.priv =
1141 ioremap_nocache(AR7_REGS_MDIO, sizeof(struct cpmac_mdio_regs));
1142
1143 if (!cpmac_mii.priv) {
1144 printk(KERN_ERR "Can't ioremap mdio registers\n");
1145 return -ENXIO;
1146 }
1147
1148 #warning FIXME: unhardcode gpio&reset bits
1149 ar7_gpio_disable(26);
1150 ar7_gpio_disable(27);
1151 ar7_device_reset(AR7_RESET_BIT_CPMAC_LO);
1152 ar7_device_reset(AR7_RESET_BIT_CPMAC_HI);
1153 ar7_device_reset(AR7_RESET_BIT_EPHY);
1154
1155 cpmac_mii.reset(&cpmac_mii);
1156
1157 for (i = 0; i < 300000; i++) {
1158 mask = ((struct cpmac_mdio_regs *)cpmac_mii.priv)->alive;
1159 if (mask)
1160 break;
1161 }
1162
1163 mask &= 0x7fffffff;
1164 if (mask & (mask - 1)) {
1165 external_switch = 1;
1166 mask = 0;
1167 }
1168
1169 cpmac_mii.phy_mask = ~(mask | 0x80000000);
1170
1171 res = mdiobus_register(&cpmac_mii);
1172 if (res)
1173 goto fail_mii;
1174
1175 res = platform_driver_register(&cpmac_driver);
1176 if (res)
1177 goto fail_cpmac;
1178
1179 return 0;
1180
1181 fail_cpmac:
1182 mdiobus_unregister(&cpmac_mii);
1183
1184 fail_mii:
1185 iounmap(cpmac_mii.priv);
1186
1187 return res;
1188 }
1189
1190 void __devexit cpmac_exit(void)
1191 {
1192 platform_driver_unregister(&cpmac_driver);
1193 mdiobus_unregister(&cpmac_mii);
1194 }
1195
1196 module_init(cpmac_init);
1197 module_exit(cpmac_exit);