parse mac address on RouterBOARDs
[openwrt/openwrt.git] / target / linux / ar71xx / files / drivers / net / ag71xx / ag71xx_main.c
1 /*
2 * Atheros AR71xx built-in ethernet mac driver
3 *
4 * Copyright (C) 2008 Gabor Juhos <juhosg@openwrt.org>
5 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
6 *
7 * Based on Atheros' AG7100 driver
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
12 */
13
14 #include "ag71xx.h"
15
16 static void ag71xx_dump_regs(struct ag71xx *ag)
17 {
18 DBG("%s: mac_cfg1=%08x, mac_cfg2=%08x, ipg=%08x, hdx=%08x, mfl=%08x\n",
19 ag->dev->name,
20 ag71xx_rr(ag, AG71XX_REG_MAC_CFG1),
21 ag71xx_rr(ag, AG71XX_REG_MAC_CFG2),
22 ag71xx_rr(ag, AG71XX_REG_MAC_IPG),
23 ag71xx_rr(ag, AG71XX_REG_MAC_HDX),
24 ag71xx_rr(ag, AG71XX_REG_MAC_MFL));
25 DBG("%s: mii_cfg=%08x, mii_cmd=%08x, mii_addr=%08x\n",
26 ag->dev->name,
27 ag71xx_rr(ag, AG71XX_REG_MII_CFG),
28 ag71xx_rr(ag, AG71XX_REG_MII_CMD),
29 ag71xx_rr(ag, AG71XX_REG_MII_ADDR));
30 DBG("%s: mii_ctrl=%08x, mii_status=%08x, mii_ind=%08x\n",
31 ag->dev->name,
32 ag71xx_rr(ag, AG71XX_REG_MII_CTRL),
33 ag71xx_rr(ag, AG71XX_REG_MII_STATUS),
34 ag71xx_rr(ag, AG71XX_REG_MII_IND));
35 DBG("%s: mac_ifctl=%08x, mac_addr1=%08x, mac_addr2=%08x\n",
36 ag->dev->name,
37 ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL),
38 ag71xx_rr(ag, AG71XX_REG_MAC_ADDR1),
39 ag71xx_rr(ag, AG71XX_REG_MAC_ADDR2));
40 DBG("%s: fifo_cfg0=%08x, fifo_cfg1=%08x, fifo_cfg2=%08x\n",
41 ag->dev->name,
42 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG0),
43 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG1),
44 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG2));
45 DBG("%s: fifo_cfg3=%08x, fifo_cfg3=%08x, fifo_cfg5=%08x\n",
46 ag->dev->name,
47 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG3),
48 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG4),
49 ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5));
50 }
51
52 static void ag71xx_ring_free(struct ag71xx_ring *ring)
53 {
54 kfree(ring->buf);
55
56 if (ring->descs)
57 dma_free_coherent(NULL, ring->size * sizeof(*ring->descs),
58 ring->descs, ring->descs_dma);
59 }
60
61 static int ag71xx_ring_alloc(struct ag71xx_ring *ring, unsigned int size)
62 {
63 int err;
64
65 ring->descs = dma_alloc_coherent(NULL, size * sizeof(*ring->descs),
66 &ring->descs_dma,
67 GFP_ATOMIC);
68 if (!ring->descs) {
69 err = -ENOMEM;
70 goto err;
71 }
72
73 ring->size = size;
74
75 ring->buf = kzalloc(size * sizeof(*ring->buf), GFP_KERNEL);
76 if (!ring->buf) {
77 err = -ENOMEM;
78 goto err;
79 }
80
81 return 0;
82
83 err:
84 return err;
85 }
86
87 static void ag71xx_ring_tx_clean(struct ag71xx *ag)
88 {
89 struct ag71xx_ring *ring = &ag->tx_ring;
90 struct net_device *dev = ag->dev;
91
92 while (ring->curr != ring->dirty) {
93 u32 i = ring->dirty % AG71XX_TX_RING_SIZE;
94
95 if (!ag71xx_desc_empty(&ring->descs[i])) {
96 ring->descs[i].ctrl = 0;
97 dev->stats.tx_errors++;
98 }
99
100 if (ring->buf[i].skb)
101 dev_kfree_skb_any(ring->buf[i].skb);
102
103 ring->buf[i].skb = NULL;
104
105 ring->dirty++;
106 }
107
108 /* flush descriptors */
109 wmb();
110
111 }
112
113 static void ag71xx_ring_tx_init(struct ag71xx *ag)
114 {
115 struct ag71xx_ring *ring = &ag->tx_ring;
116 int i;
117
118 for (i = 0; i < AG71XX_TX_RING_SIZE; i++) {
119 ring->descs[i].next = (u32) (ring->descs_dma +
120 sizeof(*ring->descs) * ((i + 1) % AG71XX_TX_RING_SIZE));
121
122 ring->descs[i].ctrl = DESC_EMPTY;
123 ring->buf[i].skb = NULL;
124 }
125
126 /* flush descriptors */
127 wmb();
128
129 ring->curr = 0;
130 ring->dirty = 0;
131 }
132
133 static void ag71xx_ring_rx_clean(struct ag71xx *ag)
134 {
135 struct ag71xx_ring *ring = &ag->rx_ring;
136 int i;
137
138 if (!ring->buf)
139 return;
140
141 for (i = 0; i < AG71XX_RX_RING_SIZE; i++)
142 if (ring->buf[i].skb)
143 kfree_skb(ring->buf[i].skb);
144
145 }
146
147 static int ag71xx_ring_rx_init(struct ag71xx *ag)
148 {
149 struct ag71xx_ring *ring = &ag->rx_ring;
150 unsigned int i;
151 int ret;
152
153 ret = 0;
154 for (i = 0; i < AG71XX_RX_RING_SIZE; i++)
155 ring->descs[i].next = (u32) (ring->descs_dma +
156 sizeof(*ring->descs) * ((i + 1) % AG71XX_RX_RING_SIZE));
157
158 for (i = 0; i < AG71XX_RX_RING_SIZE; i++) {
159 struct sk_buff *skb;
160
161 skb = dev_alloc_skb(AG71XX_RX_PKT_SIZE);
162 if (!skb) {
163 ret = -ENOMEM;
164 break;
165 }
166
167 skb->dev = ag->dev;
168 skb_reserve(skb, AG71XX_RX_PKT_RESERVE);
169
170 ring->buf[i].skb = skb;
171 ring->descs[i].data = virt_to_phys(skb->data);
172 ring->descs[i].ctrl = DESC_EMPTY;
173 }
174
175 /* flush descriptors */
176 wmb();
177
178 ring->curr = 0;
179 ring->dirty = 0;
180
181 return ret;
182 }
183
184 static int ag71xx_ring_rx_refill(struct ag71xx *ag)
185 {
186 struct ag71xx_ring *ring = &ag->rx_ring;
187 unsigned int count;
188
189 count = 0;
190 for (; ring->curr - ring->dirty > 0; ring->dirty++) {
191 unsigned int i;
192
193 i = ring->dirty % AG71XX_RX_RING_SIZE;
194
195 if (ring->buf[i].skb == NULL) {
196 struct sk_buff *skb;
197
198 skb = dev_alloc_skb(AG71XX_RX_PKT_SIZE);
199 if (skb == NULL) {
200 printk(KERN_ERR "%s: no memory for skb\n",
201 ag->dev->name);
202 break;
203 }
204
205 skb_reserve(skb, AG71XX_RX_PKT_RESERVE);
206 skb->dev = ag->dev;
207 ring->buf[i].skb = skb;
208 ring->descs[i].data = virt_to_phys(skb->data);
209 }
210
211 ring->descs[i].ctrl = DESC_EMPTY;
212 count++;
213 }
214
215 /* flush descriptors */
216 wmb();
217
218 DBG("%s: %u rx descriptors refilled\n", ag->dev->name, count);
219
220 return count;
221 }
222
223 static int ag71xx_rings_init(struct ag71xx *ag)
224 {
225 int ret;
226
227 ret = ag71xx_ring_alloc(&ag->tx_ring, AG71XX_TX_RING_SIZE);
228 if (ret)
229 return ret;
230
231 ag71xx_ring_tx_init(ag);
232
233 ret = ag71xx_ring_alloc(&ag->rx_ring, AG71XX_RX_RING_SIZE);
234 if (ret)
235 return ret;
236
237 ret = ag71xx_ring_rx_init(ag);
238 return ret;
239 }
240
241 static void ag71xx_rings_cleanup(struct ag71xx *ag)
242 {
243 ag71xx_ring_rx_clean(ag);
244 ag71xx_ring_free(&ag->rx_ring);
245
246 ag71xx_ring_tx_clean(ag);
247 ag71xx_ring_free(&ag->tx_ring);
248 }
249
250 static void ag71xx_hw_set_macaddr(struct ag71xx *ag, unsigned char *mac)
251 {
252 u32 t;
253
254 t = (((u32) mac[0]) << 24) | (((u32) mac[1]) << 16)
255 | (((u32) mac[2]) << 8) | ((u32) mac[2]);
256
257 ag71xx_wr(ag, AG71XX_REG_MAC_ADDR1, t);
258
259 t = (((u32) mac[4]) << 24) | (((u32) mac[5]) << 16);
260 ag71xx_wr(ag, AG71XX_REG_MAC_ADDR2, t);
261 }
262
263 #define MAC_CFG1_INIT (MAC_CFG1_RXE | MAC_CFG1_TXE | MAC_CFG1_SRX \
264 | MAC_CFG1_STX)
265
266 static void ag71xx_hw_init(struct ag71xx *ag)
267 {
268 struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
269
270 ag71xx_sb(ag, AG71XX_REG_MAC_CFG1, MAC_CFG1_SR);
271 udelay(20);
272
273 ar71xx_device_stop(pdata->reset_bit);
274 mdelay(100);
275 ar71xx_device_start(pdata->reset_bit);
276 mdelay(100);
277
278 ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, MAC_CFG1_INIT);
279
280 /* TODO: set max packet size */
281
282 ag71xx_sb(ag, AG71XX_REG_MAC_CFG2,
283 MAC_CFG2_PAD_CRC_EN | MAC_CFG2_LEN_CHECK);
284
285 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG0, 0x00001f00);
286
287 ag71xx_mii_ctrl_set_if(ag, pdata->mii_if);
288
289 ag71xx_wr(ag, AG71XX_REG_MII_CFG, MII_CFG_CLK_DIV_28);
290
291 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG1, 0x0fff0000);
292 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG2, 0x00001fff);
293 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG4, 0x0000ffff);
294 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, 0x0007ffef);
295 }
296
297 static void ag71xx_hw_start(struct ag71xx *ag)
298 {
299 /* start RX engine */
300 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
301
302 /* enable interrupts */
303 ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, AG71XX_INT_INIT);
304 }
305
306 static void ag71xx_hw_stop(struct ag71xx *ag)
307 {
308 /* stop RX and TX */
309 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0);
310 ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0);
311
312 /* disable all interrupts */
313 ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, 0);
314 }
315
316 static int ag71xx_open(struct net_device *dev)
317 {
318 struct ag71xx *ag = netdev_priv(dev);
319 int ret;
320
321 ret = ag71xx_rings_init(ag);
322 if (ret)
323 goto err;
324
325 napi_enable(&ag->napi);
326
327 netif_carrier_off(dev);
328 if (ag->phy_dev) {
329 phy_start(ag->phy_dev);
330 } else {
331 ag->duplex = DUPLEX_FULL;
332 ag->speed = SPEED_100;
333 ag->link = 1;
334 ag71xx_link_update(ag);
335 }
336
337 ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma);
338 ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->rx_ring.descs_dma);
339
340 ag71xx_hw_set_macaddr(ag, dev->dev_addr);
341
342 ag71xx_hw_start(ag);
343
344 netif_start_queue(dev);
345
346 return 0;
347
348 err:
349 ag71xx_rings_cleanup(ag);
350 return ret;
351 }
352
353 static int ag71xx_stop(struct net_device *dev)
354 {
355 struct ag71xx *ag = netdev_priv(dev);
356 unsigned long flags;
357
358 spin_lock_irqsave(&ag->lock, flags);
359
360 netif_stop_queue(dev);
361
362 ag71xx_hw_stop(ag);
363
364 netif_carrier_off(dev);
365 if (ag->phy_dev) {
366 phy_stop(ag->phy_dev);
367 } else {
368 ag->duplex = -1;
369 ag->link = 0;
370 ag->speed = 0;
371 ag71xx_link_update(ag);
372 }
373
374 napi_disable(&ag->napi);
375
376 spin_unlock_irqrestore(&ag->lock, flags);
377
378 ag71xx_rings_cleanup(ag);
379
380 return 0;
381 }
382
383 static int ag71xx_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
384 {
385 struct ag71xx *ag = netdev_priv(dev);
386 struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
387 struct ag71xx_ring *ring = &ag->tx_ring;
388 struct ag71xx_desc *desc;
389 unsigned long flags;
390 int i;
391
392 i = ring->curr % AG71XX_TX_RING_SIZE;
393 desc = &ring->descs[i];
394
395 spin_lock_irqsave(&ag->lock, flags);
396 ar71xx_ddr_flush(pdata->flush_reg);
397 spin_unlock_irqrestore(&ag->lock, flags);
398
399 if (!ag71xx_desc_empty(desc))
400 goto err_drop;
401
402 if (skb->len <= 0) {
403 DBG("%s: packet len is too small\n", ag->dev->name);
404 goto err_drop;
405 }
406
407 dma_cache_wback_inv((unsigned long)skb->data, skb->len);
408
409 ring->buf[i].skb = skb;
410
411 /* setup descriptor fields */
412 desc->data = virt_to_phys(skb->data);
413 desc->ctrl = (skb->len & DESC_PKTLEN_M);
414
415 /* flush descriptor */
416 wmb();
417
418 ring->curr++;
419 if (ring->curr == (ring->dirty + AG71XX_TX_THRES_STOP)) {
420 DBG("%s: tx queue full\n", ag->dev->name);
421 netif_stop_queue(dev);
422 }
423
424 DBG("%s: packet injected into TX queue\n", ag->dev->name);
425
426 /* enable TX engine */
427 ag71xx_wr(ag, AG71XX_REG_TX_CTRL, TX_CTRL_TXE);
428
429 dev->trans_start = jiffies;
430
431 return 0;
432
433 err_drop:
434 dev->stats.tx_dropped++;
435
436 dev_kfree_skb(skb);
437 return 0;
438 }
439
440 static int ag71xx_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
441 {
442 struct mii_ioctl_data *data = (struct mii_ioctl_data *) &ifr->ifr_data;
443 struct ag71xx *ag = netdev_priv(dev);
444 int ret;
445
446 switch (cmd) {
447 case SIOCETHTOOL:
448 if (ag->phy_dev == NULL)
449 break;
450
451 spin_lock_irq(&ag->lock);
452 ret = phy_ethtool_ioctl(ag->phy_dev, (void *) ifr->ifr_data);
453 spin_unlock_irq(&ag->lock);
454 return ret;
455
456 case SIOCSIFHWADDR:
457 if (copy_from_user
458 (dev->dev_addr, ifr->ifr_data, sizeof(dev->dev_addr)))
459 return -EFAULT;
460 return 0;
461
462 case SIOCGIFHWADDR:
463 if (copy_to_user
464 (ifr->ifr_data, dev->dev_addr, sizeof(dev->dev_addr)))
465 return -EFAULT;
466 return 0;
467
468 case SIOCGMIIPHY:
469 case SIOCGMIIREG:
470 case SIOCSMIIREG:
471 if (ag->phy_dev == NULL)
472 break;
473
474 return phy_mii_ioctl(ag->phy_dev, data, cmd);
475
476 default:
477 break;
478 }
479
480 return -EOPNOTSUPP;
481 }
482
483 static void ag71xx_tx_packets(struct ag71xx *ag)
484 {
485 struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
486 struct ag71xx_ring *ring = &ag->tx_ring;
487 unsigned int sent;
488
489 DBG("%s: processing TX ring\n", ag->dev->name);
490
491 #ifdef AG71XX_NAPI_TX
492 ar71xx_ddr_flush(pdata->flush_reg);
493 #endif
494
495 sent = 0;
496 while (ring->dirty != ring->curr) {
497 unsigned int i = ring->dirty % AG71XX_TX_RING_SIZE;
498 struct ag71xx_desc *desc = &ring->descs[i];
499 struct sk_buff *skb = ring->buf[i].skb;
500
501 if (!ag71xx_desc_empty(desc))
502 break;
503
504 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS);
505
506 ag->dev->stats.tx_bytes += skb->len;
507 ag->dev->stats.tx_packets++;
508
509 dev_kfree_skb_any(skb);
510 ring->buf[i].skb = NULL;
511
512 ring->dirty++;
513 sent++;
514 }
515
516 DBG("%s: %d packets sent out\n", ag->dev->name, sent);
517
518 if ((ring->curr - ring->dirty) < AG71XX_TX_THRES_WAKEUP)
519 netif_wake_queue(ag->dev);
520
521 }
522
523 static int ag71xx_rx_packets(struct ag71xx *ag, int limit)
524 {
525 struct net_device *dev = ag->dev;
526 struct ag71xx_ring *ring = &ag->rx_ring;
527 #ifndef AG71XX_NAPI_TX
528 struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
529 unsigned long flags;
530 #endif
531 int done = 0;
532
533 #ifndef AG71XX_NAPI_TX
534 spin_lock_irqsave(&ag->lock, flags);
535 ar71xx_ddr_flush(pdata->flush_reg);
536 spin_unlock_irqrestore(&ag->lock, flags);
537 #endif
538
539 DBG("%s: rx packets, limit=%d, curr=%u, dirty=%u\n",
540 dev->name, limit, ring->curr, ring->dirty);
541
542 while (done < limit) {
543 unsigned int i = ring->curr % AG71XX_RX_RING_SIZE;
544 struct ag71xx_desc *desc = &ring->descs[i];
545 struct sk_buff *skb;
546 int pktlen;
547
548 if (ag71xx_desc_empty(desc))
549 break;
550
551 if ((ring->dirty + AG71XX_RX_RING_SIZE) == ring->curr) {
552 ag71xx_assert(0);
553 break;
554 }
555
556 skb = ring->buf[i].skb;
557 pktlen = ag71xx_desc_pktlen(desc);
558 pktlen -= ETH_FCS_LEN;
559
560 /* TODO: move it into the refill function */
561 dma_cache_wback_inv((unsigned long)skb->data, pktlen);
562 skb_put(skb, pktlen);
563
564 skb->dev = dev;
565 skb->protocol = eth_type_trans(skb, dev);
566 skb->ip_summed = CHECKSUM_UNNECESSARY;
567
568 netif_receive_skb(skb);
569
570 dev->last_rx = jiffies;
571 dev->stats.rx_packets++;
572 dev->stats.rx_bytes += pktlen;
573
574 ring->buf[i].skb = NULL;
575 done++;
576
577 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR);
578
579 ring->curr++;
580 if ((ring->curr - ring->dirty) > (AG71XX_RX_RING_SIZE / 4))
581 ag71xx_ring_rx_refill(ag);
582 }
583
584 ag71xx_ring_rx_refill(ag);
585
586 DBG("%s: rx finish, curr=%u, dirty=%u, done=%d\n",
587 dev->name, ring->curr, ring->dirty, done);
588
589 return done;
590 }
591
592 static int ag71xx_poll(struct napi_struct *napi, int limit)
593 {
594 struct ag71xx *ag = container_of(napi, struct ag71xx, napi);
595 #ifdef AG71XX_NAPI_TX
596 struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
597 #endif
598 struct net_device *dev = ag->dev;
599 unsigned long flags;
600 u32 status;
601 int done;
602
603 #ifdef AG71XX_NAPI_TX
604 ar71xx_ddr_flush(pdata->flush_reg);
605 ag71xx_tx_packets(ag);
606 #endif
607
608 DBG("%s: processing RX ring\n", dev->name);
609 done = ag71xx_rx_packets(ag, limit);
610
611 /* TODO: add OOM handler */
612
613 status = ag71xx_rr(ag, AG71XX_REG_INT_STATUS);
614 status &= AG71XX_INT_POLL;
615
616 if ((done < limit) && (!status)) {
617 DBG("%s: disable polling mode, done=%d, status=%x\n",
618 dev->name, done, status);
619
620 netif_rx_complete(dev, napi);
621
622 /* enable interrupts */
623 spin_lock_irqsave(&ag->lock, flags);
624 ag71xx_int_enable(ag, AG71XX_INT_POLL);
625 spin_unlock_irqrestore(&ag->lock, flags);
626 return 0;
627 }
628
629 if (status & AG71XX_INT_RX_OF) {
630 printk(KERN_ALERT "%s: rx owerflow, restarting dma\n",
631 dev->name);
632
633 /* ack interrupt */
634 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_OF);
635 /* restart RX */
636 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
637 }
638
639 DBG("%s: stay in polling mode, done=%d, status=%x\n",
640 dev->name, done, status);
641 return 1;
642 }
643
644 static irqreturn_t ag71xx_interrupt(int irq, void *dev_id)
645 {
646 struct net_device *dev = dev_id;
647 struct ag71xx *ag = netdev_priv(dev);
648 u32 status;
649
650 status = ag71xx_rr(ag, AG71XX_REG_INT_STATUS);
651 status &= ag71xx_rr(ag, AG71XX_REG_INT_ENABLE);
652
653 if (unlikely(!status))
654 return IRQ_NONE;
655
656 if (unlikely(status & AG71XX_INT_ERR)) {
657 if (status & AG71XX_INT_TX_BE) {
658 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE);
659 dev_err(&dev->dev, "TX BUS error\n");
660 }
661 if (status & AG71XX_INT_RX_BE) {
662 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE);
663 dev_err(&dev->dev, "RX BUS error\n");
664 }
665 }
666
667 #if 0
668 if (unlikely(status & AG71XX_INT_TX_UR)) {
669 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_UR);
670 DBG("%s: TX underrun\n", dev->name);
671 }
672 #endif
673
674 #ifndef AG71XX_NAPI_TX
675 if (likely(status & AG71XX_INT_TX_PS))
676 ag71xx_tx_packets(ag);
677 #endif
678
679 if (likely(status & AG71XX_INT_POLL)) {
680 ag71xx_int_disable(ag, AG71XX_INT_POLL);
681 DBG("%s: enable polling mode\n", dev->name);
682 netif_rx_schedule(dev, &ag->napi);
683 }
684
685 return IRQ_HANDLED;
686 }
687
688 static void ag71xx_set_multicast_list(struct net_device *dev)
689 {
690 /* TODO */
691 }
692
693 static int __init ag71xx_probe(struct platform_device *pdev)
694 {
695 struct net_device *dev;
696 struct resource *res;
697 struct ag71xx *ag;
698 struct ag71xx_platform_data *pdata;
699 int err;
700
701 pdata = pdev->dev.platform_data;
702 if (!pdata) {
703 dev_err(&pdev->dev, "no platform data specified\n");
704 err = -ENXIO;
705 goto err_out;
706 }
707
708 dev = alloc_etherdev(sizeof(*ag));
709 if (!dev) {
710 dev_err(&pdev->dev, "alloc_etherdev failed\n");
711 err = -ENOMEM;
712 goto err_out;
713 }
714
715 SET_NETDEV_DEV(dev, &pdev->dev);
716
717 ag = netdev_priv(dev);
718 ag->pdev = pdev;
719 ag->dev = dev;
720 spin_lock_init(&ag->lock);
721
722 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mac_base");
723 if (!res) {
724 dev_err(&pdev->dev, "no mac_base resource found\n");
725 err = -ENXIO;
726 goto err_out;
727 }
728
729 ag->mac_base = ioremap_nocache(res->start, res->end - res->start + 1);
730 if (!ag->mac_base) {
731 dev_err(&pdev->dev, "unable to ioremap mac_base\n");
732 err = -ENOMEM;
733 goto err_free_dev;
734 }
735
736 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mii_ctrl");
737 if (!res) {
738 dev_err(&pdev->dev, "no mii_ctrl resource found\n");
739 err = -ENXIO;
740 goto err_unmap_base;
741 }
742
743 ag->mii_ctrl = ioremap_nocache(res->start, res->end - res->start + 1);
744 if (!ag->mii_ctrl) {
745 dev_err(&pdev->dev, "unable to ioremap mii_ctrl\n");
746 err = -ENOMEM;
747 goto err_unmap_base;
748 }
749
750 dev->irq = platform_get_irq(pdev, 0);
751 err = request_irq(dev->irq, ag71xx_interrupt,
752 IRQF_DISABLED | IRQF_SAMPLE_RANDOM,
753 dev->name, dev);
754 if (err) {
755 dev_err(&pdev->dev, "unable to request IRQ %d\n", dev->irq);
756 goto err_unmap_mii_ctrl;
757 }
758
759 dev->base_addr = (unsigned long)ag->mac_base;
760 dev->open = ag71xx_open;
761 dev->stop = ag71xx_stop;
762 dev->hard_start_xmit = ag71xx_hard_start_xmit;
763 dev->set_multicast_list = ag71xx_set_multicast_list;
764 dev->do_ioctl = ag71xx_do_ioctl;
765 dev->ethtool_ops = &ag71xx_ethtool_ops;
766
767 netif_napi_add(dev, &ag->napi, ag71xx_poll, AG71XX_NAPI_WEIGHT);
768
769 if (is_valid_ether_addr(pdata->mac_addr))
770 memcpy(dev->dev_addr, pdata->mac_addr, ETH_ALEN);
771 else {
772 dev->dev_addr[0] = 0xde;
773 dev->dev_addr[1] = 0xad;
774 get_random_bytes(&dev->dev_addr[2], 3);
775 dev->dev_addr[5] = pdev->id & 0xff;
776 }
777
778 err = register_netdev(dev);
779 if (err) {
780 dev_err(&pdev->dev, "unable to register net device\n");
781 goto err_free_irq;
782 }
783
784 printk(KERN_INFO "%s: Atheros AG71xx at 0x%08lx, irq %d\n",
785 dev->name, dev->base_addr, dev->irq);
786
787 ag71xx_dump_regs(ag);
788
789 ag71xx_hw_init(ag);
790
791 ag71xx_dump_regs(ag);
792
793 err = ag71xx_mdio_init(ag, pdev->id);
794 if (err)
795 goto err_unregister_netdev;
796
797 platform_set_drvdata(pdev, dev);
798
799 return 0;
800
801 err_unregister_netdev:
802 unregister_netdev(dev);
803 err_free_irq:
804 free_irq(dev->irq, dev);
805 err_unmap_mii_ctrl:
806 iounmap(ag->mii_ctrl);
807 err_unmap_base:
808 iounmap(ag->mac_base);
809 err_free_dev:
810 kfree(dev);
811 err_out:
812 platform_set_drvdata(pdev, NULL);
813 return err;
814 }
815
816 static int __exit ag71xx_remove(struct platform_device *pdev)
817 {
818 struct net_device *dev = platform_get_drvdata(pdev);
819
820 if (dev) {
821 struct ag71xx *ag = netdev_priv(dev);
822
823 if (ag->phy_dev)
824 phy_disconnect(ag->phy_dev);
825 ag71xx_mdio_cleanup(ag);
826 unregister_netdev(dev);
827 free_irq(dev->irq, dev);
828 iounmap(ag->mii_ctrl);
829 iounmap(ag->mac_base);
830 kfree(dev);
831 platform_set_drvdata(pdev, NULL);
832 }
833
834 return 0;
835 }
836
837 static struct platform_driver ag71xx_driver = {
838 .probe = ag71xx_probe,
839 .remove = __exit_p(ag71xx_remove),
840 .driver = {
841 .name = AG71XX_DRV_NAME,
842 }
843 };
844
845 static int __init ag71xx_module_init(void)
846 {
847 return platform_driver_register(&ag71xx_driver);
848 }
849
850 static void __exit ag71xx_module_exit(void)
851 {
852 platform_driver_unregister(&ag71xx_driver);
853 }
854
855 module_init(ag71xx_module_init);
856 module_exit(ag71xx_module_exit);
857
858 MODULE_VERSION(AG71XX_DRV_VERSION);
859 MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
860 MODULE_AUTHOR("Imre Kaloz <kaloz@openwrt.org>");
861 MODULE_LICENSE("GPL v2");
862 MODULE_ALIAS("platform:" AG71XX_DRV_NAME);