2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; version 2 of the License
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
15 * Copyright (C) 2009 John Crispin <blogic@openwrt.org>
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/types.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/init.h>
23 #include <linux/skbuff.h>
24 #include <linux/etherdevice.h>
25 #include <linux/ethtool.h>
26 #include <linux/platform_device.h>
27 #include <linux/phy.h>
29 #include <ramips_eth_platform.h>
30 #include "ramips_eth.h"
32 #define TX_TIMEOUT (20 * HZ / 100)
33 #define MAX_RX_LENGTH 1600
35 #ifdef CONFIG_RALINK_RT305X
37 #include "ramips_esw.c"
39 static inline int rt305x_esw_init(void) { return 0; }
40 static inline void rt305x_esw_exit(void) { }
41 static inline int soc_is_rt5350(void) { return 0; }
44 #define phys_to_bus(a) (a & 0x1FFFFFFF)
46 #ifdef CONFIG_RAMIPS_ETH_DEBUG
47 #define RADEBUG(fmt, args...) printk(KERN_DEBUG fmt, ## args)
49 #define RADEBUG(fmt, args...) do {} while (0)
52 #define RX_DLY_INT ((soc_is_rt5350())?(RT5350_RX_DLY_INT):(RAMIPS_RX_DLY_INT))
53 #define TX_DLY_INT ((soc_is_rt5350())?(RT5350_TX_DLY_INT):(RAMIPS_TX_DLY_INT))
56 RAETH_REG_PDMA_GLO_CFG
= 0,
57 RAETH_REG_PDMA_RST_CFG
,
58 RAETH_REG_DLY_INT_CFG
,
59 RAETH_REG_TX_BASE_PTR0
,
60 RAETH_REG_TX_MAX_CNT0
,
61 RAETH_REG_TX_CTX_IDX0
,
62 RAETH_REG_RX_BASE_PTR0
,
63 RAETH_REG_RX_MAX_CNT0
,
64 RAETH_REG_RX_CALC_IDX0
,
65 RAETH_REG_FE_INT_ENABLE
,
66 RAETH_REG_FE_INT_STATUS
,
70 static const u32 ramips_reg_table
[RAETH_REG_COUNT
] = {
71 [RAETH_REG_PDMA_GLO_CFG
] = RAMIPS_PDMA_GLO_CFG
,
72 [RAETH_REG_PDMA_RST_CFG
] = RAMIPS_PDMA_RST_CFG
,
73 [RAETH_REG_DLY_INT_CFG
] = RAMIPS_DLY_INT_CFG
,
74 [RAETH_REG_TX_BASE_PTR0
] = RAMIPS_TX_BASE_PTR0
,
75 [RAETH_REG_TX_MAX_CNT0
] = RAMIPS_TX_MAX_CNT0
,
76 [RAETH_REG_TX_CTX_IDX0
] = RAMIPS_TX_CTX_IDX0
,
77 [RAETH_REG_RX_BASE_PTR0
] = RAMIPS_RX_BASE_PTR0
,
78 [RAETH_REG_RX_MAX_CNT0
] = RAMIPS_RX_MAX_CNT0
,
79 [RAETH_REG_RX_CALC_IDX0
] = RAMIPS_RX_CALC_IDX0
,
80 [RAETH_REG_FE_INT_ENABLE
] = RAMIPS_FE_INT_ENABLE
,
81 [RAETH_REG_FE_INT_STATUS
] = RAMIPS_FE_INT_STATUS
,
84 static const u32 rt5350_reg_table
[RAETH_REG_COUNT
] = {
85 [RAETH_REG_PDMA_GLO_CFG
] = RT5350_PDMA_GLO_CFG
,
86 [RAETH_REG_PDMA_RST_CFG
] = RT5350_PDMA_RST_CFG
,
87 [RAETH_REG_DLY_INT_CFG
] = RT5350_DLY_INT_CFG
,
88 [RAETH_REG_TX_BASE_PTR0
] = RT5350_TX_BASE_PTR0
,
89 [RAETH_REG_TX_MAX_CNT0
] = RT5350_TX_MAX_CNT0
,
90 [RAETH_REG_TX_CTX_IDX0
] = RT5350_TX_CTX_IDX0
,
91 [RAETH_REG_RX_BASE_PTR0
] = RT5350_RX_BASE_PTR0
,
92 [RAETH_REG_RX_MAX_CNT0
] = RT5350_RX_MAX_CNT0
,
93 [RAETH_REG_RX_CALC_IDX0
] = RT5350_RX_CALC_IDX0
,
94 [RAETH_REG_FE_INT_ENABLE
] = RT5350_FE_INT_ENABLE
,
95 [RAETH_REG_FE_INT_STATUS
] = RT5350_FE_INT_STATUS
,
98 static struct net_device
* ramips_dev
;
99 static void __iomem
*ramips_fe_base
= 0;
101 static inline u32
get_reg_offset(enum raeth_reg reg
)
106 table
= rt5350_reg_table
;
108 table
= ramips_reg_table
;
114 ramips_fe_wr(u32 val
, unsigned reg
)
116 __raw_writel(val
, ramips_fe_base
+ reg
);
120 ramips_fe_rr(unsigned reg
)
122 return __raw_readl(ramips_fe_base
+ reg
);
126 ramips_fe_twr(u32 val
, enum raeth_reg reg
)
128 ramips_fe_wr(val
, get_reg_offset(reg
));
132 ramips_fe_trr(enum raeth_reg reg
)
134 return ramips_fe_rr(get_reg_offset(reg
));
138 ramips_fe_int_disable(u32 mask
)
140 ramips_fe_twr(ramips_fe_trr(RAETH_REG_FE_INT_ENABLE
) & ~mask
,
141 RAETH_REG_FE_INT_ENABLE
);
143 ramips_fe_trr(RAETH_REG_FE_INT_ENABLE
);
147 ramips_fe_int_enable(u32 mask
)
149 ramips_fe_twr(ramips_fe_trr(RAETH_REG_FE_INT_ENABLE
) | mask
,
150 RAETH_REG_FE_INT_ENABLE
);
152 ramips_fe_trr(RAETH_REG_FE_INT_ENABLE
);
156 ramips_hw_set_macaddr(unsigned char *mac
)
158 if (soc_is_rt5350()) {
159 ramips_fe_wr((mac
[0] << 8) | mac
[1], RT5350_SDM_MAC_ADRH
);
160 ramips_fe_wr((mac
[2] << 24) | (mac
[3] << 16) | (mac
[4] << 8) | mac
[5],
161 RT5350_SDM_MAC_ADRL
);
163 ramips_fe_wr((mac
[0] << 8) | mac
[1], RAMIPS_GDMA1_MAC_ADRH
);
164 ramips_fe_wr((mac
[2] << 24) | (mac
[3] << 16) | (mac
[4] << 8) | mac
[5],
165 RAMIPS_GDMA1_MAC_ADRL
);
169 static struct sk_buff
*
170 ramips_alloc_skb(struct raeth_priv
*re
)
174 skb
= netdev_alloc_skb(re
->netdev
, MAX_RX_LENGTH
+ NET_IP_ALIGN
);
178 skb_reserve(skb
, NET_IP_ALIGN
);
184 ramips_ring_setup(struct raeth_priv
*re
)
189 memset(re
->tx_info
, 0, NUM_TX_DESC
* sizeof(struct raeth_tx_info
));
191 len
= NUM_TX_DESC
* sizeof(struct ramips_tx_dma
);
192 memset(re
->tx
, 0, len
);
194 for (i
= 0; i
< NUM_TX_DESC
; i
++) {
195 struct raeth_tx_info
*txi
;
196 struct ramips_tx_dma
*txd
;
199 txd
->txd4
= TX_DMA_QN(3) | TX_DMA_PN(1);
200 txd
->txd2
= TX_DMA_LSO
| TX_DMA_DONE
;
202 txi
= &re
->tx_info
[i
];
204 if (txi
->tx_skb
!= NULL
) {
205 netdev_warn(re
->netdev
,
206 "dirty skb for TX desc %d\n", i
);
211 len
= NUM_RX_DESC
* sizeof(struct ramips_rx_dma
);
212 memset(re
->rx
, 0, len
);
214 for (i
= 0; i
< NUM_RX_DESC
; i
++) {
215 struct raeth_rx_info
*rxi
;
216 struct ramips_rx_dma
*rxd
;
220 rxi
= &re
->rx_info
[i
];
221 BUG_ON(rxi
->rx_skb
== NULL
);
222 dma_addr
= dma_map_single(&re
->netdev
->dev
, rxi
->rx_skb
->data
,
223 MAX_RX_LENGTH
, DMA_FROM_DEVICE
);
224 rxi
->rx_dma
= dma_addr
;
227 rxd
->rxd1
= (unsigned int) dma_addr
;
228 rxd
->rxd2
= RX_DMA_LSO
;
231 /* flush descriptors */
236 ramips_ring_cleanup(struct raeth_priv
*re
)
240 for (i
= 0; i
< NUM_RX_DESC
; i
++) {
241 struct raeth_rx_info
*rxi
;
243 rxi
= &re
->rx_info
[i
];
245 dma_unmap_single(&re
->netdev
->dev
, rxi
->rx_dma
,
246 MAX_RX_LENGTH
, DMA_FROM_DEVICE
);
249 for (i
= 0; i
< NUM_TX_DESC
; i
++) {
250 struct raeth_tx_info
*txi
;
252 txi
= &re
->tx_info
[i
];
254 dev_kfree_skb_any(txi
->tx_skb
);
259 netdev_reset_queue(re
->netdev
);
262 #if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT3883)
264 #define RAMIPS_MDIO_RETRY 1000
266 static unsigned char *ramips_speed_str(struct raeth_priv
*re
)
280 static void ramips_link_adjust(struct raeth_priv
*re
)
282 struct ramips_eth_platform_data
*pdata
;
285 pdata
= re
->parent
->platform_data
;
287 netif_carrier_off(re
->netdev
);
288 netdev_info(re
->netdev
, "link down\n");
292 mdio_cfg
= RAMIPS_MDIO_CFG_TX_CLK_SKEW_200
|
293 RAMIPS_MDIO_CFG_TX_CLK_SKEW_200
|
294 RAMIPS_MDIO_CFG_GP1_FRC_EN
;
296 if (re
->duplex
== DUPLEX_FULL
)
297 mdio_cfg
|= RAMIPS_MDIO_CFG_GP1_DUPLEX
;
300 mdio_cfg
|= RAMIPS_MDIO_CFG_GP1_FC_TX
;
303 mdio_cfg
|= RAMIPS_MDIO_CFG_GP1_FC_RX
;
307 mdio_cfg
|= RAMIPS_MDIO_CFG_GP1_SPEED_10
;
310 mdio_cfg
|= RAMIPS_MDIO_CFG_GP1_SPEED_100
;
313 mdio_cfg
|= RAMIPS_MDIO_CFG_GP1_SPEED_1000
;
319 ramips_fe_wr(mdio_cfg
, RAMIPS_MDIO_CFG
);
321 netif_carrier_on(re
->netdev
);
322 netdev_info(re
->netdev
, "link up (%sMbps/%s duplex)\n",
323 ramips_speed_str(re
),
324 (DUPLEX_FULL
== re
->duplex
) ? "Full" : "Half");
328 ramips_mdio_wait_ready(struct raeth_priv
*re
)
332 retries
= RAMIPS_MDIO_RETRY
;
336 t
= ramips_fe_rr(RAMIPS_MDIO_ACCESS
);
337 if ((t
& (0x1 << 31)) == 0)
346 dev_err(re
->parent
, "MDIO operation timed out\n");
351 ramips_mdio_read(struct mii_bus
*bus
, int phy_addr
, int phy_reg
)
353 struct raeth_priv
*re
= bus
->priv
;
357 err
= ramips_mdio_wait_ready(re
);
361 t
= (phy_addr
<< 24) | (phy_reg
<< 16);
362 ramips_fe_wr(t
, RAMIPS_MDIO_ACCESS
);
364 ramips_fe_wr(t
, RAMIPS_MDIO_ACCESS
);
366 err
= ramips_mdio_wait_ready(re
);
370 RADEBUG("%s: addr=%04x, reg=%04x, value=%04x\n", __func__
,
371 phy_addr
, phy_reg
, ramips_fe_rr(RAMIPS_MDIO_ACCESS
) & 0xffff);
373 return ramips_fe_rr(RAMIPS_MDIO_ACCESS
) & 0xffff;
377 ramips_mdio_write(struct mii_bus
*bus
, int phy_addr
, int phy_reg
, u16 val
)
379 struct raeth_priv
*re
= bus
->priv
;
383 RADEBUG("%s: addr=%04x, reg=%04x, value=%04x\n", __func__
,
384 phy_addr
, phy_reg
, ramips_fe_rr(RAMIPS_MDIO_ACCESS
) & 0xffff);
386 err
= ramips_mdio_wait_ready(re
);
390 t
= (1 << 30) | (phy_addr
<< 24) | (phy_reg
<< 16) | val
;
391 ramips_fe_wr(t
, RAMIPS_MDIO_ACCESS
);
393 ramips_fe_wr(t
, RAMIPS_MDIO_ACCESS
);
395 return ramips_mdio_wait_ready(re
);
399 ramips_mdio_reset(struct mii_bus
*bus
)
406 ramips_mdio_init(struct raeth_priv
*re
)
411 re
->mii_bus
= mdiobus_alloc();
412 if (re
->mii_bus
== NULL
)
415 re
->mii_bus
->name
= "ramips_mdio";
416 re
->mii_bus
->read
= ramips_mdio_read
;
417 re
->mii_bus
->write
= ramips_mdio_write
;
418 re
->mii_bus
->reset
= ramips_mdio_reset
;
419 re
->mii_bus
->irq
= re
->mii_irq
;
420 re
->mii_bus
->priv
= re
;
421 re
->mii_bus
->parent
= re
->parent
;
423 snprintf(re
->mii_bus
->id
, MII_BUS_ID_SIZE
, "%s", "ramips_mdio");
424 re
->mii_bus
->phy_mask
= 0;
426 for (i
= 0; i
< PHY_MAX_ADDR
; i
++)
427 re
->mii_irq
[i
] = PHY_POLL
;
429 err
= mdiobus_register(re
->mii_bus
);
441 ramips_mdio_cleanup(struct raeth_priv
*re
)
443 mdiobus_unregister(re
->mii_bus
);
448 ramips_phy_link_adjust(struct net_device
*dev
)
450 struct raeth_priv
*re
= netdev_priv(dev
);
451 struct phy_device
*phydev
= re
->phy_dev
;
453 int status_change
= 0;
455 spin_lock_irqsave(&re
->phy_lock
, flags
);
458 if (re
->duplex
!= phydev
->duplex
||
459 re
->speed
!= phydev
->speed
)
462 if (phydev
->link
!= re
->link
)
465 re
->link
= phydev
->link
;
466 re
->duplex
= phydev
->duplex
;
467 re
->speed
= phydev
->speed
;
470 ramips_link_adjust(re
);
472 spin_unlock_irqrestore(&re
->phy_lock
, flags
);
476 ramips_phy_connect_multi(struct raeth_priv
*re
)
478 struct net_device
*netdev
= re
->netdev
;
479 struct ramips_eth_platform_data
*pdata
;
480 struct phy_device
*phydev
= NULL
;
484 pdata
= re
->parent
->platform_data
;
485 for (phy_addr
= 0; phy_addr
< PHY_MAX_ADDR
; phy_addr
++) {
486 if (!(pdata
->phy_mask
& (1 << phy_addr
)))
489 if (re
->mii_bus
->phy_map
[phy_addr
] == NULL
)
492 RADEBUG("%s: PHY found at %s, uid=%08x\n",
494 dev_name(&re
->mii_bus
->phy_map
[phy_addr
]->dev
),
495 re
->mii_bus
->phy_map
[phy_addr
]->phy_id
);
498 phydev
= re
->mii_bus
->phy_map
[phy_addr
];
502 netdev_err(netdev
, "no PHY found with phy_mask=%08x\n",
507 re
->phy_dev
= phy_connect(netdev
, dev_name(&phydev
->dev
),
508 ramips_phy_link_adjust
, 0,
511 if (IS_ERR(re
->phy_dev
)) {
512 netdev_err(netdev
, "could not connect to PHY at %s\n",
513 dev_name(&phydev
->dev
));
514 return PTR_ERR(re
->phy_dev
);
517 phydev
->supported
&= PHY_GBIT_FEATURES
;
518 phydev
->advertising
= phydev
->supported
;
520 RADEBUG("%s: connected to PHY at %s [uid=%08x, driver=%s]\n",
521 netdev
->name
, dev_name(&phydev
->dev
),
522 phydev
->phy_id
, phydev
->drv
->name
);
534 ramips_phy_connect_fixed(struct raeth_priv
*re
)
536 struct ramips_eth_platform_data
*pdata
;
538 pdata
= re
->parent
->platform_data
;
539 switch (pdata
->speed
) {
545 netdev_err(re
->netdev
, "invalid speed specified\n");
549 RADEBUG("%s: using fixed link parameters\n", re
->netdev
->name
);
551 re
->speed
= pdata
->speed
;
552 re
->duplex
= pdata
->duplex
;
553 re
->tx_fc
= pdata
->tx_fc
;
554 re
->rx_fc
= pdata
->tx_fc
;
560 ramips_phy_connect(struct raeth_priv
*re
)
562 struct ramips_eth_platform_data
*pdata
;
564 pdata
= re
->parent
->platform_data
;
566 return ramips_phy_connect_multi(re
);
568 return ramips_phy_connect_fixed(re
);
572 ramips_phy_disconnect(struct raeth_priv
*re
)
575 phy_disconnect(re
->phy_dev
);
579 ramips_phy_start(struct raeth_priv
*re
)
584 phy_start(re
->phy_dev
);
586 spin_lock_irqsave(&re
->phy_lock
, flags
);
588 ramips_link_adjust(re
);
589 spin_unlock_irqrestore(&re
->phy_lock
, flags
);
594 ramips_phy_stop(struct raeth_priv
*re
)
599 phy_stop(re
->phy_dev
);
601 spin_lock_irqsave(&re
->phy_lock
, flags
);
603 ramips_link_adjust(re
);
604 spin_unlock_irqrestore(&re
->phy_lock
, flags
);
608 ramips_mdio_init(struct raeth_priv
*re
)
614 ramips_mdio_cleanup(struct raeth_priv
*re
)
619 ramips_phy_connect(struct raeth_priv
*re
)
625 ramips_phy_disconnect(struct raeth_priv
*re
)
630 ramips_phy_start(struct raeth_priv
*re
)
635 ramips_phy_stop(struct raeth_priv
*re
)
638 #endif /* CONFIG_RALINK_RT288X || CONFIG_RALINK_RT3883 */
641 ramips_ring_free(struct raeth_priv
*re
)
647 for (i
= 0; i
< NUM_RX_DESC
; i
++) {
648 struct raeth_rx_info
*rxi
;
650 rxi
= &re
->rx_info
[i
];
652 dev_kfree_skb_any(rxi
->rx_skb
);
658 len
= NUM_RX_DESC
* sizeof(struct ramips_rx_dma
);
659 dma_free_coherent(&re
->netdev
->dev
, len
, re
->rx
,
664 len
= NUM_TX_DESC
* sizeof(struct ramips_tx_dma
);
665 dma_free_coherent(&re
->netdev
->dev
, len
, re
->tx
,
673 ramips_ring_alloc(struct raeth_priv
*re
)
679 re
->tx_info
= kzalloc(NUM_TX_DESC
* sizeof(struct raeth_tx_info
),
684 re
->rx_info
= kzalloc(NUM_RX_DESC
* sizeof(struct raeth_rx_info
),
689 /* allocate tx ring */
690 len
= NUM_TX_DESC
* sizeof(struct ramips_tx_dma
);
691 re
->tx
= dma_alloc_coherent(&re
->netdev
->dev
, len
,
692 &re
->tx_desc_dma
, GFP_ATOMIC
);
696 /* allocate rx ring */
697 len
= NUM_RX_DESC
* sizeof(struct ramips_rx_dma
);
698 re
->rx
= dma_alloc_coherent(&re
->netdev
->dev
, len
,
699 &re
->rx_desc_dma
, GFP_ATOMIC
);
703 for (i
= 0; i
< NUM_RX_DESC
; i
++) {
706 skb
= ramips_alloc_skb(re
);
710 re
->rx_info
[i
].rx_skb
= skb
;
716 ramips_ring_free(re
);
721 ramips_setup_dma(struct raeth_priv
*re
)
723 ramips_fe_twr(re
->tx_desc_dma
, RAETH_REG_TX_BASE_PTR0
);
724 ramips_fe_twr(NUM_TX_DESC
, RAETH_REG_TX_MAX_CNT0
);
725 ramips_fe_twr(0, RAETH_REG_TX_CTX_IDX0
);
726 ramips_fe_twr(RAMIPS_PST_DTX_IDX0
, RAETH_REG_PDMA_RST_CFG
);
728 ramips_fe_twr(re
->rx_desc_dma
, RAETH_REG_RX_BASE_PTR0
);
729 ramips_fe_twr(NUM_RX_DESC
, RAETH_REG_RX_MAX_CNT0
);
730 ramips_fe_twr((NUM_RX_DESC
- 1), RAETH_REG_RX_CALC_IDX0
);
731 ramips_fe_twr(RAMIPS_PST_DRX_IDX0
, RAETH_REG_PDMA_RST_CFG
);
735 ramips_eth_hard_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
737 struct raeth_priv
*re
= netdev_priv(dev
);
738 struct raeth_tx_info
*txi
, *txi_next
;
739 struct ramips_tx_dma
*txd
, *txd_next
;
741 unsigned int tx_next
;
742 dma_addr_t mapped_addr
;
744 if (re
->plat
->min_pkt_len
) {
745 if (skb
->len
< re
->plat
->min_pkt_len
) {
746 if (skb_padto(skb
, re
->plat
->min_pkt_len
)) {
748 "ramips_eth: skb_padto failed\n");
752 skb_put(skb
, re
->plat
->min_pkt_len
- skb
->len
);
756 dev
->trans_start
= jiffies
;
757 mapped_addr
= dma_map_single(&re
->netdev
->dev
, skb
->data
, skb
->len
,
760 spin_lock(&re
->page_lock
);
761 tx
= ramips_fe_trr(RAETH_REG_TX_CTX_IDX0
);
762 tx_next
= (tx
+ 1) % NUM_TX_DESC
;
764 txi
= &re
->tx_info
[tx
];
766 txi_next
= &re
->tx_info
[tx_next
];
767 txd_next
= txi_next
->tx_desc
;
769 if ((txi
->tx_skb
) || (txi_next
->tx_skb
) ||
770 !(txd
->txd2
& TX_DMA_DONE
) ||
771 !(txd_next
->txd2
& TX_DMA_DONE
))
776 txd
->txd1
= (unsigned int) mapped_addr
;
778 txd
->txd2
= TX_DMA_LSO
| TX_DMA_PLEN0(skb
->len
);
779 dev
->stats
.tx_packets
++;
780 dev
->stats
.tx_bytes
+= skb
->len
;
781 ramips_fe_twr(tx_next
, RAETH_REG_TX_CTX_IDX0
);
782 netdev_sent_queue(dev
, skb
->len
);
783 spin_unlock(&re
->page_lock
);
787 spin_unlock(&re
->page_lock
);
788 dev
->stats
.tx_dropped
++;
794 ramips_eth_rx_hw(unsigned long ptr
)
796 struct net_device
*dev
= (struct net_device
*) ptr
;
797 struct raeth_priv
*re
= netdev_priv(dev
);
801 rx
= ramips_fe_trr(RAETH_REG_RX_CALC_IDX0
);
804 struct raeth_rx_info
*rxi
;
805 struct ramips_rx_dma
*rxd
;
806 struct sk_buff
*rx_skb
, *new_skb
;
809 rx
= (rx
+ 1) % NUM_RX_DESC
;
811 rxi
= &re
->rx_info
[rx
];
813 if (!(rxd
->rxd2
& RX_DMA_DONE
))
816 rx_skb
= rxi
->rx_skb
;
817 pktlen
= RX_DMA_PLEN0(rxd
->rxd2
);
819 new_skb
= ramips_alloc_skb(re
);
820 /* Reuse the buffer on allocation failures */
824 dma_unmap_single(&re
->netdev
->dev
, rxi
->rx_dma
,
825 MAX_RX_LENGTH
, DMA_FROM_DEVICE
);
827 skb_put(rx_skb
, pktlen
);
829 rx_skb
->protocol
= eth_type_trans(rx_skb
, dev
);
830 rx_skb
->ip_summed
= CHECKSUM_NONE
;
831 dev
->stats
.rx_packets
++;
832 dev
->stats
.rx_bytes
+= pktlen
;
835 rxi
->rx_skb
= new_skb
;
837 dma_addr
= dma_map_single(&re
->netdev
->dev
,
841 rxi
->rx_dma
= dma_addr
;
842 rxd
->rxd1
= (unsigned int) dma_addr
;
845 dev
->stats
.rx_dropped
++;
848 rxd
->rxd2
= RX_DMA_LSO
;
849 ramips_fe_twr(rx
, RAETH_REG_RX_CALC_IDX0
);
854 tasklet_schedule(&re
->rx_tasklet
);
856 ramips_fe_int_enable(RX_DLY_INT
);
860 ramips_eth_tx_housekeeping(unsigned long ptr
)
862 struct net_device
*dev
= (struct net_device
*)ptr
;
863 struct raeth_priv
*re
= netdev_priv(dev
);
864 unsigned int bytes_compl
= 0, pkts_compl
= 0;
866 spin_lock(&re
->page_lock
);
868 struct raeth_tx_info
*txi
;
869 struct ramips_tx_dma
*txd
;
871 txi
= &re
->tx_info
[re
->skb_free_idx
];
874 if (!(txd
->txd2
& TX_DMA_DONE
) || !(txi
->tx_skb
))
878 bytes_compl
+= txi
->tx_skb
->len
;
880 dev_kfree_skb_irq(txi
->tx_skb
);
883 if (re
->skb_free_idx
>= NUM_TX_DESC
)
884 re
->skb_free_idx
= 0;
886 netdev_completed_queue(dev
, pkts_compl
, bytes_compl
);
887 spin_unlock(&re
->page_lock
);
889 ramips_fe_int_enable(TX_DLY_INT
);
893 ramips_eth_timeout(struct net_device
*dev
)
895 struct raeth_priv
*re
= netdev_priv(dev
);
897 tasklet_schedule(&re
->tx_housekeeping_tasklet
);
901 ramips_eth_irq(int irq
, void *dev
)
903 struct raeth_priv
*re
= netdev_priv(dev
);
906 status
= ramips_fe_trr(RAETH_REG_FE_INT_STATUS
);
907 status
&= ramips_fe_trr(RAETH_REG_FE_INT_ENABLE
);
912 ramips_fe_twr(status
, RAETH_REG_FE_INT_STATUS
);
914 if (status
& RX_DLY_INT
) {
915 ramips_fe_int_disable(RX_DLY_INT
);
916 tasklet_schedule(&re
->rx_tasklet
);
919 if (status
& TX_DLY_INT
) {
920 ramips_fe_int_disable(TX_DLY_INT
);
921 tasklet_schedule(&re
->tx_housekeeping_tasklet
);
924 raeth_debugfs_update_int_stats(re
, status
);
930 ramips_eth_open(struct net_device
*dev
)
932 struct raeth_priv
*re
= netdev_priv(dev
);
935 err
= request_irq(dev
->irq
, ramips_eth_irq
, IRQF_DISABLED
,
940 err
= ramips_ring_alloc(re
);
944 ramips_ring_setup(re
);
945 ramips_hw_set_macaddr(dev
->dev_addr
);
947 ramips_setup_dma(re
);
948 ramips_fe_twr((ramips_fe_trr(RAETH_REG_PDMA_GLO_CFG
) & 0xff) |
949 (RAMIPS_TX_WB_DDONE
| RAMIPS_RX_DMA_EN
|
950 RAMIPS_TX_DMA_EN
| RAMIPS_PDMA_SIZE_4DWORDS
),
951 RAETH_REG_PDMA_GLO_CFG
);
952 ramips_fe_wr((ramips_fe_rr(RAMIPS_FE_GLO_CFG
) &
953 ~(RAMIPS_US_CYC_CNT_MASK
<< RAMIPS_US_CYC_CNT_SHIFT
)) |
954 ((re
->plat
->sys_freq
/ RAMIPS_US_CYC_CNT_DIVISOR
) << RAMIPS_US_CYC_CNT_SHIFT
),
957 tasklet_init(&re
->tx_housekeeping_tasklet
, ramips_eth_tx_housekeeping
,
959 tasklet_init(&re
->rx_tasklet
, ramips_eth_rx_hw
, (unsigned long)dev
);
961 ramips_phy_start(re
);
963 ramips_fe_twr(RAMIPS_DELAY_INIT
, RAETH_REG_DLY_INT_CFG
);
964 ramips_fe_twr(TX_DLY_INT
| RX_DLY_INT
, RAETH_REG_FE_INT_ENABLE
);
965 if (soc_is_rt5350()) {
966 ramips_fe_wr(ramips_fe_rr(RT5350_SDM_CFG
) &
967 ~(RT5350_SDM_ICS_EN
| RT5350_SDM_TCS_EN
| RT5350_SDM_UCS_EN
| 0xffff),
970 ramips_fe_wr(ramips_fe_rr(RAMIPS_GDMA1_FWD_CFG
) &
971 ~(RAMIPS_GDM1_ICS_EN
| RAMIPS_GDM1_TCS_EN
| RAMIPS_GDM1_UCS_EN
| 0xffff),
972 RAMIPS_GDMA1_FWD_CFG
);
973 ramips_fe_wr(ramips_fe_rr(RAMIPS_CDMA_CSG_CFG
) &
974 ~(RAMIPS_ICS_GEN_EN
| RAMIPS_TCS_GEN_EN
| RAMIPS_UCS_GEN_EN
),
975 RAMIPS_CDMA_CSG_CFG
);
976 ramips_fe_wr(RAMIPS_PSE_FQFC_CFG_INIT
, RAMIPS_PSE_FQ_CFG
);
978 ramips_fe_wr(1, RAMIPS_FE_RST_GL
);
979 ramips_fe_wr(0, RAMIPS_FE_RST_GL
);
981 netif_start_queue(dev
);
985 free_irq(dev
->irq
, dev
);
990 ramips_eth_stop(struct net_device
*dev
)
992 struct raeth_priv
*re
= netdev_priv(dev
);
994 ramips_fe_twr(ramips_fe_trr(RAETH_REG_PDMA_GLO_CFG
) &
995 ~(RAMIPS_TX_WB_DDONE
| RAMIPS_RX_DMA_EN
| RAMIPS_TX_DMA_EN
),
996 RAETH_REG_PDMA_GLO_CFG
);
998 /* disable all interrupts in the hw */
999 ramips_fe_twr(0, RAETH_REG_FE_INT_ENABLE
);
1001 ramips_phy_stop(re
);
1002 free_irq(dev
->irq
, dev
);
1003 netif_stop_queue(dev
);
1004 tasklet_kill(&re
->tx_housekeeping_tasklet
);
1005 tasklet_kill(&re
->rx_tasklet
);
1006 ramips_ring_cleanup(re
);
1007 ramips_ring_free(re
);
1008 RADEBUG("ramips_eth: stopped\n");
1013 ramips_eth_probe(struct net_device
*dev
)
1015 struct raeth_priv
*re
= netdev_priv(dev
);
1018 BUG_ON(!re
->plat
->reset_fe
);
1019 re
->plat
->reset_fe();
1020 net_srandom(jiffies
);
1021 memcpy(dev
->dev_addr
, re
->plat
->mac
, ETH_ALEN
);
1025 dev
->watchdog_timeo
= TX_TIMEOUT
;
1026 spin_lock_init(&re
->page_lock
);
1027 spin_lock_init(&re
->phy_lock
);
1029 err
= ramips_mdio_init(re
);
1033 err
= ramips_phy_connect(re
);
1035 goto err_mdio_cleanup
;
1037 err
= raeth_debugfs_init(re
);
1039 goto err_phy_disconnect
;
1044 ramips_phy_disconnect(re
);
1046 ramips_mdio_cleanup(re
);
1051 ramips_eth_uninit(struct net_device
*dev
)
1053 struct raeth_priv
*re
= netdev_priv(dev
);
1055 raeth_debugfs_exit(re
);
1056 ramips_phy_disconnect(re
);
1057 ramips_mdio_cleanup(re
);
1060 static const struct net_device_ops ramips_eth_netdev_ops
= {
1061 .ndo_init
= ramips_eth_probe
,
1062 .ndo_uninit
= ramips_eth_uninit
,
1063 .ndo_open
= ramips_eth_open
,
1064 .ndo_stop
= ramips_eth_stop
,
1065 .ndo_start_xmit
= ramips_eth_hard_start_xmit
,
1066 .ndo_tx_timeout
= ramips_eth_timeout
,
1067 .ndo_change_mtu
= eth_change_mtu
,
1068 .ndo_set_mac_address
= eth_mac_addr
,
1069 .ndo_validate_addr
= eth_validate_addr
,
1073 ramips_eth_plat_probe(struct platform_device
*plat
)
1075 struct raeth_priv
*re
;
1076 struct ramips_eth_platform_data
*data
= plat
->dev
.platform_data
;
1077 struct resource
*res
;
1081 dev_err(&plat
->dev
, "no platform data specified\n");
1085 res
= platform_get_resource(plat
, IORESOURCE_MEM
, 0);
1087 dev_err(&plat
->dev
, "no memory resource found\n");
1091 ramips_fe_base
= ioremap_nocache(res
->start
, res
->end
- res
->start
+ 1);
1092 if (!ramips_fe_base
)
1095 ramips_dev
= alloc_etherdev(sizeof(struct raeth_priv
));
1097 dev_err(&plat
->dev
, "alloc_etherdev failed\n");
1102 strcpy(ramips_dev
->name
, "eth%d");
1103 ramips_dev
->irq
= platform_get_irq(plat
, 0);
1104 if (ramips_dev
->irq
< 0) {
1105 dev_err(&plat
->dev
, "no IRQ resource found\n");
1109 ramips_dev
->addr_len
= ETH_ALEN
;
1110 ramips_dev
->base_addr
= (unsigned long)ramips_fe_base
;
1111 ramips_dev
->netdev_ops
= &ramips_eth_netdev_ops
;
1113 re
= netdev_priv(ramips_dev
);
1115 re
->netdev
= ramips_dev
;
1116 re
->parent
= &plat
->dev
;
1117 re
->speed
= data
->speed
;
1118 re
->duplex
= data
->duplex
;
1119 re
->rx_fc
= data
->rx_fc
;
1120 re
->tx_fc
= data
->tx_fc
;
1123 err
= register_netdev(ramips_dev
);
1125 dev_err(&plat
->dev
, "error bringing up device\n");
1129 RADEBUG("ramips_eth: loaded\n");
1135 iounmap(ramips_fe_base
);
1140 ramips_eth_plat_remove(struct platform_device
*plat
)
1142 unregister_netdev(ramips_dev
);
1143 free_netdev(ramips_dev
);
1144 RADEBUG("ramips_eth: unloaded\n");
1148 static struct platform_driver ramips_eth_driver
= {
1149 .probe
= ramips_eth_plat_probe
,
1150 .remove
= ramips_eth_plat_remove
,
1152 .name
= "ramips_eth",
1153 .owner
= THIS_MODULE
,
1158 ramips_eth_init(void)
1162 ret
= raeth_debugfs_root_init();
1166 ret
= rt305x_esw_init();
1168 goto err_debugfs_exit
;
1170 ret
= platform_driver_register(&ramips_eth_driver
);
1173 "ramips_eth: Error registering platfom driver!\n");
1182 raeth_debugfs_root_exit();
1188 ramips_eth_cleanup(void)
1190 platform_driver_unregister(&ramips_eth_driver
);
1192 raeth_debugfs_root_exit();
1195 module_init(ramips_eth_init
);
1196 module_exit(ramips_eth_cleanup
);
1198 MODULE_LICENSE("GPL");
1199 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
1200 MODULE_DESCRIPTION("ethernet driver for ramips boards");