ramips: Add byte queue limits support to net/ethernet/ramips_main.c
[openwrt/svn-archive/archive.git] / target / linux / ramips / files / drivers / net / ethernet / ramips / ramips_main.c
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; version 2 of the License
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
14 *
15 * Copyright (C) 2009 John Crispin <blogic@openwrt.org>
16 */
17
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/types.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/init.h>
23 #include <linux/skbuff.h>
24 #include <linux/etherdevice.h>
25 #include <linux/ethtool.h>
26 #include <linux/platform_device.h>
27 #include <linux/phy.h>
28
29 #include <ramips_eth_platform.h>
30 #include "ramips_eth.h"
31
32 #define TX_TIMEOUT (20 * HZ / 100)
33 #define MAX_RX_LENGTH 1600
34
35 #ifdef CONFIG_RALINK_RT305X
36 #include "ramips_esw.c"
37 #else
38 static inline int rt305x_esw_init(void) { return 0; }
39 static inline void rt305x_esw_exit(void) { }
40 #endif
41
42 #define phys_to_bus(a) (a & 0x1FFFFFFF)
43
44 #ifdef CONFIG_RAMIPS_ETH_DEBUG
45 #define RADEBUG(fmt, args...) printk(KERN_DEBUG fmt, ## args)
46 #else
47 #define RADEBUG(fmt, args...) do {} while (0)
48 #endif
49
50 static struct net_device * ramips_dev;
51 static void __iomem *ramips_fe_base = 0;
52
53 static inline void
54 ramips_fe_wr(u32 val, unsigned reg)
55 {
56 __raw_writel(val, ramips_fe_base + reg);
57 }
58
59 static inline u32
60 ramips_fe_rr(unsigned reg)
61 {
62 return __raw_readl(ramips_fe_base + reg);
63 }
64
65 static inline void
66 ramips_fe_int_disable(u32 mask)
67 {
68 ramips_fe_wr(ramips_fe_rr(RAMIPS_FE_INT_ENABLE) & ~mask,
69 RAMIPS_FE_INT_ENABLE);
70 /* flush write */
71 ramips_fe_rr(RAMIPS_FE_INT_ENABLE);
72 }
73
74 static inline void
75 ramips_fe_int_enable(u32 mask)
76 {
77 ramips_fe_wr(ramips_fe_rr(RAMIPS_FE_INT_ENABLE) | mask,
78 RAMIPS_FE_INT_ENABLE);
79 /* flush write */
80 ramips_fe_rr(RAMIPS_FE_INT_ENABLE);
81 }
82
83 static inline void
84 ramips_hw_set_macaddr(unsigned char *mac)
85 {
86 ramips_fe_wr((mac[0] << 8) | mac[1], RAMIPS_GDMA1_MAC_ADRH);
87 ramips_fe_wr((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5],
88 RAMIPS_GDMA1_MAC_ADRL);
89 }
90
91 static struct sk_buff *
92 ramips_alloc_skb(struct raeth_priv *re)
93 {
94 struct sk_buff *skb;
95
96 skb = netdev_alloc_skb(re->netdev, MAX_RX_LENGTH + NET_IP_ALIGN);
97 if (!skb)
98 return NULL;
99
100 skb_reserve(skb, NET_IP_ALIGN);
101
102 return skb;
103 }
104
105 static void
106 ramips_ring_setup(struct raeth_priv *re)
107 {
108 int len;
109 int i;
110
111 memset(re->tx_info, 0, NUM_TX_DESC * sizeof(struct raeth_tx_info));
112
113 len = NUM_TX_DESC * sizeof(struct ramips_tx_dma);
114 memset(re->tx, 0, len);
115
116 for (i = 0; i < NUM_TX_DESC; i++) {
117 struct raeth_tx_info *txi;
118 struct ramips_tx_dma *txd;
119
120 txd = &re->tx[i];
121 txd->txd4 = TX_DMA_QN(3) | TX_DMA_PN(1);
122 txd->txd2 = TX_DMA_LSO | TX_DMA_DONE;
123
124 txi = &re->tx_info[i];
125 txi->tx_desc = txd;
126 if (txi->tx_skb != NULL) {
127 netdev_warn(re->netdev,
128 "dirty skb for TX desc %d\n", i);
129 txi->tx_skb = NULL;
130 }
131 }
132
133 len = NUM_RX_DESC * sizeof(struct ramips_rx_dma);
134 memset(re->rx, 0, len);
135
136 for (i = 0; i < NUM_RX_DESC; i++) {
137 struct raeth_rx_info *rxi;
138 struct ramips_rx_dma *rxd;
139 dma_addr_t dma_addr;
140
141 rxd = &re->rx[i];
142 rxi = &re->rx_info[i];
143 BUG_ON(rxi->rx_skb == NULL);
144 dma_addr = dma_map_single(&re->netdev->dev, rxi->rx_skb->data,
145 MAX_RX_LENGTH, DMA_FROM_DEVICE);
146 rxi->rx_dma = dma_addr;
147 rxi->rx_desc = rxd;
148
149 rxd->rxd1 = (unsigned int) dma_addr;
150 rxd->rxd2 = RX_DMA_LSO;
151 }
152
153 /* flush descriptors */
154 wmb();
155 }
156
157 static void
158 ramips_ring_cleanup(struct raeth_priv *re)
159 {
160 int i;
161
162 for (i = 0; i < NUM_RX_DESC; i++) {
163 struct raeth_rx_info *rxi;
164
165 rxi = &re->rx_info[i];
166 if (rxi->rx_skb)
167 dma_unmap_single(&re->netdev->dev, rxi->rx_dma,
168 MAX_RX_LENGTH, DMA_FROM_DEVICE);
169 }
170
171 for (i = 0; i < NUM_TX_DESC; i++) {
172 struct raeth_tx_info *txi;
173
174 txi = &re->tx_info[i];
175 if (txi->tx_skb) {
176 dev_kfree_skb_any(txi->tx_skb);
177 txi->tx_skb = NULL;
178 }
179 }
180
181 netdev_reset_queue(re->netdev);
182 }
183
184 #if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT3883)
185
186 #define RAMIPS_MDIO_RETRY 1000
187
188 static unsigned char *ramips_speed_str(struct raeth_priv *re)
189 {
190 switch (re->speed) {
191 case SPEED_1000:
192 return "1000";
193 case SPEED_100:
194 return "100";
195 case SPEED_10:
196 return "10";
197 }
198
199 return "?";
200 }
201
202 static void ramips_link_adjust(struct raeth_priv *re)
203 {
204 struct ramips_eth_platform_data *pdata;
205 u32 mdio_cfg;
206
207 pdata = re->parent->platform_data;
208 if (!re->link) {
209 netif_carrier_off(re->netdev);
210 netdev_info(re->netdev, "link down\n");
211 return;
212 }
213
214 mdio_cfg = RAMIPS_MDIO_CFG_TX_CLK_SKEW_200 |
215 RAMIPS_MDIO_CFG_TX_CLK_SKEW_200 |
216 RAMIPS_MDIO_CFG_GP1_FRC_EN;
217
218 if (re->duplex == DUPLEX_FULL)
219 mdio_cfg |= RAMIPS_MDIO_CFG_GP1_DUPLEX;
220
221 if (re->tx_fc)
222 mdio_cfg |= RAMIPS_MDIO_CFG_GP1_FC_TX;
223
224 if (re->rx_fc)
225 mdio_cfg |= RAMIPS_MDIO_CFG_GP1_FC_RX;
226
227 switch (re->speed) {
228 case SPEED_10:
229 mdio_cfg |= RAMIPS_MDIO_CFG_GP1_SPEED_10;
230 break;
231 case SPEED_100:
232 mdio_cfg |= RAMIPS_MDIO_CFG_GP1_SPEED_100;
233 break;
234 case SPEED_1000:
235 mdio_cfg |= RAMIPS_MDIO_CFG_GP1_SPEED_1000;
236 break;
237 default:
238 BUG();
239 }
240
241 ramips_fe_wr(mdio_cfg, RAMIPS_MDIO_CFG);
242
243 netif_carrier_on(re->netdev);
244 netdev_info(re->netdev, "link up (%sMbps/%s duplex)\n",
245 ramips_speed_str(re),
246 (DUPLEX_FULL == re->duplex) ? "Full" : "Half");
247 }
248
249 static int
250 ramips_mdio_wait_ready(struct raeth_priv *re)
251 {
252 int retries;
253
254 retries = RAMIPS_MDIO_RETRY;
255 while (1) {
256 u32 t;
257
258 t = ramips_fe_rr(RAMIPS_MDIO_ACCESS);
259 if ((t & (0x1 << 31)) == 0)
260 return 0;
261
262 if (retries-- == 0)
263 break;
264
265 udelay(1);
266 }
267
268 dev_err(re->parent, "MDIO operation timed out\n");
269 return -ETIMEDOUT;
270 }
271
272 static int
273 ramips_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
274 {
275 struct raeth_priv *re = bus->priv;
276 int err;
277 u32 t;
278
279 err = ramips_mdio_wait_ready(re);
280 if (err)
281 return 0xffff;
282
283 t = (phy_addr << 24) | (phy_reg << 16);
284 ramips_fe_wr(t, RAMIPS_MDIO_ACCESS);
285 t |= (1 << 31);
286 ramips_fe_wr(t, RAMIPS_MDIO_ACCESS);
287
288 err = ramips_mdio_wait_ready(re);
289 if (err)
290 return 0xffff;
291
292 RADEBUG("%s: addr=%04x, reg=%04x, value=%04x\n", __func__,
293 phy_addr, phy_reg, ramips_fe_rr(RAMIPS_MDIO_ACCESS) & 0xffff);
294
295 return ramips_fe_rr(RAMIPS_MDIO_ACCESS) & 0xffff;
296 }
297
298 static int
299 ramips_mdio_write(struct mii_bus *bus, int phy_addr, int phy_reg, u16 val)
300 {
301 struct raeth_priv *re = bus->priv;
302 int err;
303 u32 t;
304
305 RADEBUG("%s: addr=%04x, reg=%04x, value=%04x\n", __func__,
306 phy_addr, phy_reg, ramips_fe_rr(RAMIPS_MDIO_ACCESS) & 0xffff);
307
308 err = ramips_mdio_wait_ready(re);
309 if (err)
310 return err;
311
312 t = (1 << 30) | (phy_addr << 24) | (phy_reg << 16) | val;
313 ramips_fe_wr(t, RAMIPS_MDIO_ACCESS);
314 t |= (1 << 31);
315 ramips_fe_wr(t, RAMIPS_MDIO_ACCESS);
316
317 return ramips_mdio_wait_ready(re);
318 }
319
320 static int
321 ramips_mdio_reset(struct mii_bus *bus)
322 {
323 /* TODO */
324 return 0;
325 }
326
327 static int
328 ramips_mdio_init(struct raeth_priv *re)
329 {
330 int err;
331 int i;
332
333 re->mii_bus = mdiobus_alloc();
334 if (re->mii_bus == NULL)
335 return -ENOMEM;
336
337 re->mii_bus->name = "ramips_mdio";
338 re->mii_bus->read = ramips_mdio_read;
339 re->mii_bus->write = ramips_mdio_write;
340 re->mii_bus->reset = ramips_mdio_reset;
341 re->mii_bus->irq = re->mii_irq;
342 re->mii_bus->priv = re;
343 re->mii_bus->parent = re->parent;
344
345 snprintf(re->mii_bus->id, MII_BUS_ID_SIZE, "%s", "ramips_mdio");
346 re->mii_bus->phy_mask = 0;
347
348 for (i = 0; i < PHY_MAX_ADDR; i++)
349 re->mii_irq[i] = PHY_POLL;
350
351 err = mdiobus_register(re->mii_bus);
352 if (err)
353 goto err_free_bus;
354
355 return 0;
356
357 err_free_bus:
358 kfree(re->mii_bus);
359 return err;
360 }
361
362 static void
363 ramips_mdio_cleanup(struct raeth_priv *re)
364 {
365 mdiobus_unregister(re->mii_bus);
366 kfree(re->mii_bus);
367 }
368
369 static void
370 ramips_phy_link_adjust(struct net_device *dev)
371 {
372 struct raeth_priv *re = netdev_priv(dev);
373 struct phy_device *phydev = re->phy_dev;
374 unsigned long flags;
375 int status_change = 0;
376
377 spin_lock_irqsave(&re->phy_lock, flags);
378
379 if (phydev->link)
380 if (re->duplex != phydev->duplex ||
381 re->speed != phydev->speed)
382 status_change = 1;
383
384 if (phydev->link != re->link)
385 status_change = 1;
386
387 re->link = phydev->link;
388 re->duplex = phydev->duplex;
389 re->speed = phydev->speed;
390
391 if (status_change)
392 ramips_link_adjust(re);
393
394 spin_unlock_irqrestore(&re->phy_lock, flags);
395 }
396
397 static int
398 ramips_phy_connect_multi(struct raeth_priv *re)
399 {
400 struct net_device *netdev = re->netdev;
401 struct ramips_eth_platform_data *pdata;
402 struct phy_device *phydev = NULL;
403 int phy_addr;
404 int ret = 0;
405
406 pdata = re->parent->platform_data;
407 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
408 if (!(pdata->phy_mask & (1 << phy_addr)))
409 continue;
410
411 if (re->mii_bus->phy_map[phy_addr] == NULL)
412 continue;
413
414 RADEBUG("%s: PHY found at %s, uid=%08x\n",
415 netdev->name,
416 dev_name(&re->mii_bus->phy_map[phy_addr]->dev),
417 re->mii_bus->phy_map[phy_addr]->phy_id);
418
419 if (phydev == NULL)
420 phydev = re->mii_bus->phy_map[phy_addr];
421 }
422
423 if (!phydev) {
424 netdev_err(netdev, "no PHY found with phy_mask=%08x\n",
425 pdata->phy_mask);
426 return -ENODEV;
427 }
428
429 re->phy_dev = phy_connect(netdev, dev_name(&phydev->dev),
430 ramips_phy_link_adjust, 0,
431 pdata->phy_if_mode);
432
433 if (IS_ERR(re->phy_dev)) {
434 netdev_err(netdev, "could not connect to PHY at %s\n",
435 dev_name(&phydev->dev));
436 return PTR_ERR(re->phy_dev);
437 }
438
439 phydev->supported &= PHY_GBIT_FEATURES;
440 phydev->advertising = phydev->supported;
441
442 RADEBUG("%s: connected to PHY at %s [uid=%08x, driver=%s]\n",
443 netdev->name, dev_name(&phydev->dev),
444 phydev->phy_id, phydev->drv->name);
445
446 re->link = 0;
447 re->speed = 0;
448 re->duplex = -1;
449 re->rx_fc = 0;
450 re->tx_fc = 0;
451
452 return ret;
453 }
454
455 static int
456 ramips_phy_connect_fixed(struct raeth_priv *re)
457 {
458 struct ramips_eth_platform_data *pdata;
459
460 pdata = re->parent->platform_data;
461 switch (pdata->speed) {
462 case SPEED_10:
463 case SPEED_100:
464 case SPEED_1000:
465 break;
466 default:
467 netdev_err(re->netdev, "invalid speed specified\n");
468 return -EINVAL;
469 }
470
471 RADEBUG("%s: using fixed link parameters\n", re->netdev->name);
472
473 re->speed = pdata->speed;
474 re->duplex = pdata->duplex;
475 re->tx_fc = pdata->tx_fc;
476 re->rx_fc = pdata->tx_fc;
477
478 return 0;
479 }
480
481 static int
482 ramips_phy_connect(struct raeth_priv *re)
483 {
484 struct ramips_eth_platform_data *pdata;
485
486 pdata = re->parent->platform_data;
487 if (pdata->phy_mask)
488 return ramips_phy_connect_multi(re);
489
490 return ramips_phy_connect_fixed(re);
491 }
492
493 static void
494 ramips_phy_disconnect(struct raeth_priv *re)
495 {
496 if (re->phy_dev)
497 phy_disconnect(re->phy_dev);
498 }
499
500 static void
501 ramips_phy_start(struct raeth_priv *re)
502 {
503 unsigned long flags;
504
505 if (re->phy_dev) {
506 phy_start(re->phy_dev);
507 } else {
508 spin_lock_irqsave(&re->phy_lock, flags);
509 re->link = 1;
510 ramips_link_adjust(re);
511 spin_unlock_irqrestore(&re->phy_lock, flags);
512 }
513 }
514
515 static void
516 ramips_phy_stop(struct raeth_priv *re)
517 {
518 unsigned long flags;
519
520 if (re->phy_dev)
521 phy_stop(re->phy_dev);
522
523 spin_lock_irqsave(&re->phy_lock, flags);
524 re->link = 0;
525 ramips_link_adjust(re);
526 spin_unlock_irqrestore(&re->phy_lock, flags);
527 }
528 #else
529 static inline int
530 ramips_mdio_init(struct raeth_priv *re)
531 {
532 return 0;
533 }
534
535 static inline void
536 ramips_mdio_cleanup(struct raeth_priv *re)
537 {
538 }
539
540 static inline int
541 ramips_phy_connect(struct raeth_priv *re)
542 {
543 return 0;
544 }
545
546 static inline void
547 ramips_phy_disconnect(struct raeth_priv *re)
548 {
549 }
550
551 static inline void
552 ramips_phy_start(struct raeth_priv *re)
553 {
554 }
555
556 static inline void
557 ramips_phy_stop(struct raeth_priv *re)
558 {
559 }
560 #endif /* CONFIG_RALINK_RT288X || CONFIG_RALINK_RT3883 */
561
562 static void
563 ramips_ring_free(struct raeth_priv *re)
564 {
565 int len;
566 int i;
567
568 if (re->rx_info) {
569 for (i = 0; i < NUM_RX_DESC; i++) {
570 struct raeth_rx_info *rxi;
571
572 rxi = &re->rx_info[i];
573 if (rxi->rx_skb)
574 dev_kfree_skb_any(rxi->rx_skb);
575 }
576 kfree(re->rx_info);
577 }
578
579 if (re->rx) {
580 len = NUM_RX_DESC * sizeof(struct ramips_rx_dma);
581 dma_free_coherent(&re->netdev->dev, len, re->rx,
582 re->rx_desc_dma);
583 }
584
585 if (re->tx) {
586 len = NUM_TX_DESC * sizeof(struct ramips_tx_dma);
587 dma_free_coherent(&re->netdev->dev, len, re->tx,
588 re->tx_desc_dma);
589 }
590
591 kfree(re->tx_info);
592 }
593
594 static int
595 ramips_ring_alloc(struct raeth_priv *re)
596 {
597 int len;
598 int err = -ENOMEM;
599 int i;
600
601 re->tx_info = kzalloc(NUM_TX_DESC * sizeof(struct raeth_tx_info),
602 GFP_ATOMIC);
603 if (!re->tx_info)
604 goto err_cleanup;
605
606 re->rx_info = kzalloc(NUM_RX_DESC * sizeof(struct raeth_rx_info),
607 GFP_ATOMIC);
608 if (!re->rx_info)
609 goto err_cleanup;
610
611 /* allocate tx ring */
612 len = NUM_TX_DESC * sizeof(struct ramips_tx_dma);
613 re->tx = dma_alloc_coherent(&re->netdev->dev, len,
614 &re->tx_desc_dma, GFP_ATOMIC);
615 if (!re->tx)
616 goto err_cleanup;
617
618 /* allocate rx ring */
619 len = NUM_RX_DESC * sizeof(struct ramips_rx_dma);
620 re->rx = dma_alloc_coherent(&re->netdev->dev, len,
621 &re->rx_desc_dma, GFP_ATOMIC);
622 if (!re->rx)
623 goto err_cleanup;
624
625 for (i = 0; i < NUM_RX_DESC; i++) {
626 struct sk_buff *skb;
627
628 skb = ramips_alloc_skb(re);
629 if (!skb)
630 goto err_cleanup;
631
632 re->rx_info[i].rx_skb = skb;
633 }
634
635 return 0;
636
637 err_cleanup:
638 ramips_ring_free(re);
639 return err;
640 }
641
642 static void
643 ramips_setup_dma(struct raeth_priv *re)
644 {
645 ramips_fe_wr(re->tx_desc_dma, RAMIPS_TX_BASE_PTR0);
646 ramips_fe_wr(NUM_TX_DESC, RAMIPS_TX_MAX_CNT0);
647 ramips_fe_wr(0, RAMIPS_TX_CTX_IDX0);
648 ramips_fe_wr(RAMIPS_PST_DTX_IDX0, RAMIPS_PDMA_RST_CFG);
649
650 ramips_fe_wr(re->rx_desc_dma, RAMIPS_RX_BASE_PTR0);
651 ramips_fe_wr(NUM_RX_DESC, RAMIPS_RX_MAX_CNT0);
652 ramips_fe_wr((NUM_RX_DESC - 1), RAMIPS_RX_CALC_IDX0);
653 ramips_fe_wr(RAMIPS_PST_DRX_IDX0, RAMIPS_PDMA_RST_CFG);
654 }
655
656 static int
657 ramips_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
658 {
659 struct raeth_priv *re = netdev_priv(dev);
660 struct raeth_tx_info *txi, *txi_next;
661 struct ramips_tx_dma *txd, *txd_next;
662 unsigned long tx;
663 unsigned int tx_next;
664 dma_addr_t mapped_addr;
665
666 if (re->plat->min_pkt_len) {
667 if (skb->len < re->plat->min_pkt_len) {
668 if (skb_padto(skb, re->plat->min_pkt_len)) {
669 printk(KERN_ERR
670 "ramips_eth: skb_padto failed\n");
671 kfree_skb(skb);
672 return 0;
673 }
674 skb_put(skb, re->plat->min_pkt_len - skb->len);
675 }
676 }
677
678 dev->trans_start = jiffies;
679 mapped_addr = dma_map_single(&re->netdev->dev, skb->data, skb->len,
680 DMA_TO_DEVICE);
681
682 spin_lock(&re->page_lock);
683 tx = ramips_fe_rr(RAMIPS_TX_CTX_IDX0);
684 tx_next = (tx + 1) % NUM_TX_DESC;
685
686 txi = &re->tx_info[tx];
687 txd = txi->tx_desc;
688 txi_next = &re->tx_info[tx_next];
689 txd_next = txi_next->tx_desc;
690
691 if ((txi->tx_skb) || (txi_next->tx_skb) ||
692 !(txd->txd2 & TX_DMA_DONE) ||
693 !(txd_next->txd2 & TX_DMA_DONE))
694 goto out;
695
696 txi->tx_skb = skb;
697
698 txd->txd1 = (unsigned int) mapped_addr;
699 wmb();
700 txd->txd2 = TX_DMA_LSO | TX_DMA_PLEN0(skb->len);
701 dev->stats.tx_packets++;
702 dev->stats.tx_bytes += skb->len;
703 ramips_fe_wr(tx_next, RAMIPS_TX_CTX_IDX0);
704 netdev_sent_queue(dev, skb->len);
705 spin_unlock(&re->page_lock);
706 return NETDEV_TX_OK;
707
708 out:
709 spin_unlock(&re->page_lock);
710 dev->stats.tx_dropped++;
711 kfree_skb(skb);
712 return NETDEV_TX_OK;
713 }
714
715 static void
716 ramips_eth_rx_hw(unsigned long ptr)
717 {
718 struct net_device *dev = (struct net_device *) ptr;
719 struct raeth_priv *re = netdev_priv(dev);
720 int rx;
721 int max_rx = 16;
722
723 rx = ramips_fe_rr(RAMIPS_RX_CALC_IDX0);
724
725 while (max_rx) {
726 struct raeth_rx_info *rxi;
727 struct ramips_rx_dma *rxd;
728 struct sk_buff *rx_skb, *new_skb;
729 int pktlen;
730
731 rx = (rx + 1) % NUM_RX_DESC;
732
733 rxi = &re->rx_info[rx];
734 rxd = rxi->rx_desc;
735 if (!(rxd->rxd2 & RX_DMA_DONE))
736 break;
737
738 rx_skb = rxi->rx_skb;
739 pktlen = RX_DMA_PLEN0(rxd->rxd2);
740
741 new_skb = ramips_alloc_skb(re);
742 /* Reuse the buffer on allocation failures */
743 if (new_skb) {
744 dma_addr_t dma_addr;
745
746 dma_unmap_single(&re->netdev->dev, rxi->rx_dma,
747 MAX_RX_LENGTH, DMA_FROM_DEVICE);
748
749 skb_put(rx_skb, pktlen);
750 rx_skb->dev = dev;
751 rx_skb->protocol = eth_type_trans(rx_skb, dev);
752 rx_skb->ip_summed = CHECKSUM_NONE;
753 dev->stats.rx_packets++;
754 dev->stats.rx_bytes += pktlen;
755 netif_rx(rx_skb);
756
757 rxi->rx_skb = new_skb;
758
759 dma_addr = dma_map_single(&re->netdev->dev,
760 new_skb->data,
761 MAX_RX_LENGTH,
762 DMA_FROM_DEVICE);
763 rxi->rx_dma = dma_addr;
764 rxd->rxd1 = (unsigned int) dma_addr;
765 wmb();
766 } else {
767 dev->stats.rx_dropped++;
768 }
769
770 rxd->rxd2 = RX_DMA_LSO;
771 ramips_fe_wr(rx, RAMIPS_RX_CALC_IDX0);
772 max_rx--;
773 }
774
775 if (max_rx == 0)
776 tasklet_schedule(&re->rx_tasklet);
777 else
778 ramips_fe_int_enable(RAMIPS_RX_DLY_INT);
779 }
780
781 static void
782 ramips_eth_tx_housekeeping(unsigned long ptr)
783 {
784 struct net_device *dev = (struct net_device*)ptr;
785 struct raeth_priv *re = netdev_priv(dev);
786 unsigned int bytes_compl = 0, pkts_compl = 0;
787
788 spin_lock(&re->page_lock);
789 while (1) {
790 struct raeth_tx_info *txi;
791 struct ramips_tx_dma *txd;
792
793 txi = &re->tx_info[re->skb_free_idx];
794 txd = txi->tx_desc;
795
796 if (!(txd->txd2 & TX_DMA_DONE) || !(txi->tx_skb))
797 break;
798
799 pkts_compl++;
800 bytes_compl += txi->tx_skb->len;
801
802 dev_kfree_skb_irq(txi->tx_skb);
803 txi->tx_skb = NULL;
804 re->skb_free_idx++;
805 if (re->skb_free_idx >= NUM_TX_DESC)
806 re->skb_free_idx = 0;
807 }
808 netdev_completed_queue(dev, pkts_compl, bytes_compl);
809 spin_unlock(&re->page_lock);
810
811 ramips_fe_int_enable(RAMIPS_TX_DLY_INT);
812 }
813
814 static void
815 ramips_eth_timeout(struct net_device *dev)
816 {
817 struct raeth_priv *re = netdev_priv(dev);
818
819 tasklet_schedule(&re->tx_housekeeping_tasklet);
820 }
821
822 static irqreturn_t
823 ramips_eth_irq(int irq, void *dev)
824 {
825 struct raeth_priv *re = netdev_priv(dev);
826 unsigned int status;
827
828 status = ramips_fe_rr(RAMIPS_FE_INT_STATUS);
829 status &= ramips_fe_rr(RAMIPS_FE_INT_ENABLE);
830
831 if (!status)
832 return IRQ_NONE;
833
834 ramips_fe_wr(status, RAMIPS_FE_INT_STATUS);
835
836 if (status & RAMIPS_RX_DLY_INT) {
837 ramips_fe_int_disable(RAMIPS_RX_DLY_INT);
838 tasklet_schedule(&re->rx_tasklet);
839 }
840
841 if (status & RAMIPS_TX_DLY_INT) {
842 ramips_fe_int_disable(RAMIPS_TX_DLY_INT);
843 tasklet_schedule(&re->tx_housekeeping_tasklet);
844 }
845
846 raeth_debugfs_update_int_stats(re, status);
847
848 return IRQ_HANDLED;
849 }
850
851 static int
852 ramips_eth_open(struct net_device *dev)
853 {
854 struct raeth_priv *re = netdev_priv(dev);
855 int err;
856
857 err = request_irq(dev->irq, ramips_eth_irq, IRQF_DISABLED,
858 dev->name, dev);
859 if (err)
860 return err;
861
862 err = ramips_ring_alloc(re);
863 if (err)
864 goto err_free_irq;
865
866 ramips_ring_setup(re);
867 ramips_hw_set_macaddr(dev->dev_addr);
868
869 ramips_setup_dma(re);
870 ramips_fe_wr((ramips_fe_rr(RAMIPS_PDMA_GLO_CFG) & 0xff) |
871 (RAMIPS_TX_WB_DDONE | RAMIPS_RX_DMA_EN |
872 RAMIPS_TX_DMA_EN | RAMIPS_PDMA_SIZE_4DWORDS),
873 RAMIPS_PDMA_GLO_CFG);
874 ramips_fe_wr((ramips_fe_rr(RAMIPS_FE_GLO_CFG) &
875 ~(RAMIPS_US_CYC_CNT_MASK << RAMIPS_US_CYC_CNT_SHIFT)) |
876 ((re->plat->sys_freq / RAMIPS_US_CYC_CNT_DIVISOR) << RAMIPS_US_CYC_CNT_SHIFT),
877 RAMIPS_FE_GLO_CFG);
878
879 tasklet_init(&re->tx_housekeeping_tasklet, ramips_eth_tx_housekeeping,
880 (unsigned long)dev);
881 tasklet_init(&re->rx_tasklet, ramips_eth_rx_hw, (unsigned long)dev);
882
883 ramips_phy_start(re);
884
885 ramips_fe_wr(RAMIPS_DELAY_INIT, RAMIPS_DLY_INT_CFG);
886 ramips_fe_wr(RAMIPS_TX_DLY_INT | RAMIPS_RX_DLY_INT, RAMIPS_FE_INT_ENABLE);
887 ramips_fe_wr(ramips_fe_rr(RAMIPS_GDMA1_FWD_CFG) &
888 ~(RAMIPS_GDM1_ICS_EN | RAMIPS_GDM1_TCS_EN | RAMIPS_GDM1_UCS_EN | 0xffff),
889 RAMIPS_GDMA1_FWD_CFG);
890 ramips_fe_wr(ramips_fe_rr(RAMIPS_CDMA_CSG_CFG) &
891 ~(RAMIPS_ICS_GEN_EN | RAMIPS_TCS_GEN_EN | RAMIPS_UCS_GEN_EN),
892 RAMIPS_CDMA_CSG_CFG);
893 ramips_fe_wr(RAMIPS_PSE_FQFC_CFG_INIT, RAMIPS_PSE_FQ_CFG);
894 ramips_fe_wr(1, RAMIPS_FE_RST_GL);
895 ramips_fe_wr(0, RAMIPS_FE_RST_GL);
896
897 netif_start_queue(dev);
898 return 0;
899
900 err_free_irq:
901 free_irq(dev->irq, dev);
902 return err;
903 }
904
905 static int
906 ramips_eth_stop(struct net_device *dev)
907 {
908 struct raeth_priv *re = netdev_priv(dev);
909
910 ramips_fe_wr(ramips_fe_rr(RAMIPS_PDMA_GLO_CFG) &
911 ~(RAMIPS_TX_WB_DDONE | RAMIPS_RX_DMA_EN | RAMIPS_TX_DMA_EN),
912 RAMIPS_PDMA_GLO_CFG);
913
914 /* disable all interrupts in the hw */
915 ramips_fe_wr(0, RAMIPS_FE_INT_ENABLE);
916
917 ramips_phy_stop(re);
918 free_irq(dev->irq, dev);
919 netif_stop_queue(dev);
920 tasklet_kill(&re->tx_housekeeping_tasklet);
921 tasklet_kill(&re->rx_tasklet);
922 ramips_ring_cleanup(re);
923 ramips_ring_free(re);
924 RADEBUG("ramips_eth: stopped\n");
925 return 0;
926 }
927
928 static int __init
929 ramips_eth_probe(struct net_device *dev)
930 {
931 struct raeth_priv *re = netdev_priv(dev);
932 int err;
933
934 BUG_ON(!re->plat->reset_fe);
935 re->plat->reset_fe();
936 net_srandom(jiffies);
937 memcpy(dev->dev_addr, re->plat->mac, ETH_ALEN);
938
939 ether_setup(dev);
940 dev->mtu = 1500;
941 dev->watchdog_timeo = TX_TIMEOUT;
942 spin_lock_init(&re->page_lock);
943 spin_lock_init(&re->phy_lock);
944
945 err = ramips_mdio_init(re);
946 if (err)
947 return err;
948
949 err = ramips_phy_connect(re);
950 if (err)
951 goto err_mdio_cleanup;
952
953 err = raeth_debugfs_init(re);
954 if (err)
955 goto err_phy_disconnect;
956
957 return 0;
958
959 err_phy_disconnect:
960 ramips_phy_disconnect(re);
961 err_mdio_cleanup:
962 ramips_mdio_cleanup(re);
963 return err;
964 }
965
966 static void
967 ramips_eth_uninit(struct net_device *dev)
968 {
969 struct raeth_priv *re = netdev_priv(dev);
970
971 raeth_debugfs_exit(re);
972 ramips_phy_disconnect(re);
973 ramips_mdio_cleanup(re);
974 }
975
976 static const struct net_device_ops ramips_eth_netdev_ops = {
977 .ndo_init = ramips_eth_probe,
978 .ndo_uninit = ramips_eth_uninit,
979 .ndo_open = ramips_eth_open,
980 .ndo_stop = ramips_eth_stop,
981 .ndo_start_xmit = ramips_eth_hard_start_xmit,
982 .ndo_tx_timeout = ramips_eth_timeout,
983 .ndo_change_mtu = eth_change_mtu,
984 .ndo_set_mac_address = eth_mac_addr,
985 .ndo_validate_addr = eth_validate_addr,
986 };
987
988 static int
989 ramips_eth_plat_probe(struct platform_device *plat)
990 {
991 struct raeth_priv *re;
992 struct ramips_eth_platform_data *data = plat->dev.platform_data;
993 struct resource *res;
994 int err;
995
996 if (!data) {
997 dev_err(&plat->dev, "no platform data specified\n");
998 return -EINVAL;
999 }
1000
1001 res = platform_get_resource(plat, IORESOURCE_MEM, 0);
1002 if (!res) {
1003 dev_err(&plat->dev, "no memory resource found\n");
1004 return -ENXIO;
1005 }
1006
1007 ramips_fe_base = ioremap_nocache(res->start, res->end - res->start + 1);
1008 if (!ramips_fe_base)
1009 return -ENOMEM;
1010
1011 ramips_dev = alloc_etherdev(sizeof(struct raeth_priv));
1012 if (!ramips_dev) {
1013 dev_err(&plat->dev, "alloc_etherdev failed\n");
1014 err = -ENOMEM;
1015 goto err_unmap;
1016 }
1017
1018 strcpy(ramips_dev->name, "eth%d");
1019 ramips_dev->irq = platform_get_irq(plat, 0);
1020 if (ramips_dev->irq < 0) {
1021 dev_err(&plat->dev, "no IRQ resource found\n");
1022 err = -ENXIO;
1023 goto err_free_dev;
1024 }
1025 ramips_dev->addr_len = ETH_ALEN;
1026 ramips_dev->base_addr = (unsigned long)ramips_fe_base;
1027 ramips_dev->netdev_ops = &ramips_eth_netdev_ops;
1028
1029 re = netdev_priv(ramips_dev);
1030
1031 re->netdev = ramips_dev;
1032 re->parent = &plat->dev;
1033 re->speed = data->speed;
1034 re->duplex = data->duplex;
1035 re->rx_fc = data->rx_fc;
1036 re->tx_fc = data->tx_fc;
1037 re->plat = data;
1038
1039 err = register_netdev(ramips_dev);
1040 if (err) {
1041 dev_err(&plat->dev, "error bringing up device\n");
1042 goto err_free_dev;
1043 }
1044
1045 RADEBUG("ramips_eth: loaded\n");
1046 return 0;
1047
1048 err_free_dev:
1049 kfree(ramips_dev);
1050 err_unmap:
1051 iounmap(ramips_fe_base);
1052 return err;
1053 }
1054
1055 static int
1056 ramips_eth_plat_remove(struct platform_device *plat)
1057 {
1058 unregister_netdev(ramips_dev);
1059 free_netdev(ramips_dev);
1060 RADEBUG("ramips_eth: unloaded\n");
1061 return 0;
1062 }
1063
1064 static struct platform_driver ramips_eth_driver = {
1065 .probe = ramips_eth_plat_probe,
1066 .remove = ramips_eth_plat_remove,
1067 .driver = {
1068 .name = "ramips_eth",
1069 .owner = THIS_MODULE,
1070 },
1071 };
1072
1073 static int __init
1074 ramips_eth_init(void)
1075 {
1076 int ret;
1077
1078 ret = raeth_debugfs_root_init();
1079 if (ret)
1080 goto err_out;
1081
1082 ret = rt305x_esw_init();
1083 if (ret)
1084 goto err_debugfs_exit;
1085
1086 ret = platform_driver_register(&ramips_eth_driver);
1087 if (ret) {
1088 printk(KERN_ERR
1089 "ramips_eth: Error registering platfom driver!\n");
1090 goto esw_cleanup;
1091 }
1092
1093 return 0;
1094
1095 esw_cleanup:
1096 rt305x_esw_exit();
1097 err_debugfs_exit:
1098 raeth_debugfs_root_exit();
1099 err_out:
1100 return ret;
1101 }
1102
1103 static void __exit
1104 ramips_eth_cleanup(void)
1105 {
1106 platform_driver_unregister(&ramips_eth_driver);
1107 rt305x_esw_exit();
1108 raeth_debugfs_root_exit();
1109 }
1110
1111 module_init(ramips_eth_init);
1112 module_exit(ramips_eth_cleanup);
1113
1114 MODULE_LICENSE("GPL");
1115 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
1116 MODULE_DESCRIPTION("ethernet driver for ramips boards");