Index: linux-3.14.18/arch/mips/include/asm/mach-ralink/rt305x_esw_platform.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-3.14.18/arch/mips/include/asm/mach-ralink/rt305x_esw_platform.h 2014-10-28 10:43:11.882131200 +0100
++++ linux-3.14.18/arch/mips/include/asm/mach-ralink/rt305x_esw_platform.h 2014-10-29 20:25:55.429357372 +0100
@@ -0,0 +1,27 @@
+/*
+ * Ralink RT305x SoC platform device registration
+#endif /* _RT305X_ESW_PLATFORM_H */
Index: linux-3.14.18/arch/mips/ralink/rt305x.c
===================================================================
---- linux-3.14.18.orig/arch/mips/ralink/rt305x.c 2014-10-28 10:43:11.766130990 +0100
-+++ linux-3.14.18/arch/mips/ralink/rt305x.c 2014-10-28 10:43:11.882131200 +0100
+--- linux-3.14.18.orig/arch/mips/ralink/rt305x.c 2014-10-29 20:25:55.377355456 +0100
++++ linux-3.14.18/arch/mips/ralink/rt305x.c 2014-10-29 20:25:55.433357519 +0100
@@ -199,6 +199,7 @@
}
Index: linux-3.14.18/drivers/net/ethernet/Kconfig
===================================================================
--- linux-3.14.18.orig/drivers/net/ethernet/Kconfig 2014-09-06 01:34:59.000000000 +0200
-+++ linux-3.14.18/drivers/net/ethernet/Kconfig 2014-10-29 14:59:06.927617708 +0100
++++ linux-3.14.18/drivers/net/ethernet/Kconfig 2014-10-29 20:39:55.156284038 +0100
@@ -134,6 +134,7 @@
source "drivers/net/ethernet/packetengines/Kconfig"
source "drivers/net/ethernet/pasemi/Kconfig"
Index: linux-3.14.18/drivers/net/ethernet/Makefile
===================================================================
--- linux-3.14.18.orig/drivers/net/ethernet/Makefile 2014-09-06 01:34:59.000000000 +0200
-+++ linux-3.14.18/drivers/net/ethernet/Makefile 2014-10-29 14:59:06.927617708 +0100
++++ linux-3.14.18/drivers/net/ethernet/Makefile 2014-10-29 20:39:55.156284038 +0100
@@ -56,6 +56,7 @@
obj-$(CONFIG_NET_PACKET_ENGINE) += packetengines/
obj-$(CONFIG_NET_VENDOR_PASEMI) += pasemi/
Index: linux-3.14.18/drivers/net/ethernet/ralink/Kconfig
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-3.14.18/drivers/net/ethernet/ralink/Kconfig 2014-10-28 10:43:11.886131208 +0100
-@@ -0,0 +1,32 @@
++++ linux-3.14.18/drivers/net/ethernet/ralink/Kconfig 2014-10-29 20:25:55.433357519 +0100
+@@ -0,0 +1,31 @@
+config NET_RALINK
+ tristate "Ralink RT288X/RT3X5X/RT3662/RT3883/MT7620 ethernet driver"
+ depends on RALINK
+config NET_RALINK_GSW_MT7620
+ def_bool NET_RALINK
+ depends on SOC_MT7620
-+ select INET_LRO
+ select NET_RALINK_MDIO
+ select PHYLIB
+ select SWCONFIG
Index: linux-3.14.18/drivers/net/ethernet/ralink/Makefile
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-3.14.18/drivers/net/ethernet/ralink/Makefile 2014-10-28 10:43:11.886131208 +0100
++++ linux-3.14.18/drivers/net/ethernet/ralink/Makefile 2014-10-29 20:25:55.433357519 +0100
@@ -0,0 +1,18 @@
+#
+# Makefile for the Ralink SoCs built-in ethernet macs
+#
+
-+ralink-eth-y += ralink_soc_eth.o
++ralink-eth-y += ralink_soc_eth.o ralink_ethtool.o
+
+ralink-eth-$(CONFIG_NET_RALINK_MDIO) += mdio.o
+ralink-eth-$(CONFIG_NET_RALINK_MDIO_RT2880) += mdio_rt2880.o
Index: linux-3.14.18/drivers/net/ethernet/ralink/esw_rt3052.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-3.14.18/drivers/net/ethernet/ralink/esw_rt3052.c 2014-10-28 10:43:11.886131208 +0100
++++ linux-3.14.18/drivers/net/ethernet/ralink/esw_rt3052.c 2014-10-29 20:25:55.433357519 +0100
@@ -0,0 +1,1463 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
Index: linux-3.14.18/drivers/net/ethernet/ralink/esw_rt3052.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-3.14.18/drivers/net/ethernet/ralink/esw_rt3052.h 2014-10-28 10:43:11.886131208 +0100
++++ linux-3.14.18/drivers/net/ethernet/ralink/esw_rt3052.h 2014-10-29 20:25:55.433357519 +0100
@@ -0,0 +1,32 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
Index: linux-3.14.18/drivers/net/ethernet/ralink/gsw_mt7620a.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-3.14.18/drivers/net/ethernet/ralink/gsw_mt7620a.c 2014-10-29 14:58:32.303564882 +0100
-@@ -0,0 +1,568 @@
++++ linux-3.14.18/drivers/net/ethernet/ralink/gsw_mt7620a.c 2014-10-29 20:33:38.670422146 +0100
+@@ -0,0 +1,569 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ _mt7620_mii_write(gsw, 1, 4, 0x05e1);
+ _mt7620_mii_write(gsw, 2, 4, 0x05e1);
+ _mt7620_mii_write(gsw, 3, 4, 0x05e1);
++
+ _mt7620_mii_write(gsw, 1, 31, 0xa000); //local, page 2
+ _mt7620_mii_write(gsw, 0, 16, 0x1111);
+ _mt7620_mii_write(gsw, 1, 16, 0x1010);
Index: linux-3.14.18/drivers/net/ethernet/ralink/gsw_mt7620a.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-3.14.18/drivers/net/ethernet/ralink/gsw_mt7620a.h 2014-10-28 10:43:11.890131215 +0100
++++ linux-3.14.18/drivers/net/ethernet/ralink/gsw_mt7620a.h 2014-10-29 20:25:55.433357519 +0100
@@ -0,0 +1,30 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
Index: linux-3.14.18/drivers/net/ethernet/ralink/mdio.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-3.14.18/drivers/net/ethernet/ralink/mdio.c 2014-10-28 10:43:11.890131215 +0100
-@@ -0,0 +1,244 @@
++++ linux-3.14.18/drivers/net/ethernet/ralink/mdio.c 2014-10-29 20:25:55.433357519 +0100
+@@ -0,0 +1,275 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ return 0;
+}
+
++static void phy_init(struct fe_priv *priv, struct phy_device *phy)
++{
++ phy_attach(priv->netdev, dev_name(&phy->dev), PHY_INTERFACE_MODE_MII);
++
++ phy->autoneg = AUTONEG_ENABLE;
++ phy->speed = 0;
++ phy->duplex = 0;
++ phy->supported &= PHY_BASIC_FEATURES;
++ phy->advertising = phy->supported | ADVERTISED_Autoneg;
++
++ phy_start_aneg(phy);
++}
++
+static int fe_phy_connect(struct fe_priv *priv)
+{
++ int i;
++
++ for (i = 0; i < 8; i++) {
++ if (priv->phy->phy_node[i]) {
++ if (!priv->phy_dev) {
++ priv->phy_dev = priv->phy->phy[i];
++ priv->phy_flags = FE_PHY_FLAG_PORT;
++ }
++ } else if (priv->mii_bus && priv->mii_bus->phy_map[i]) {
++ phy_init(priv, priv->mii_bus->phy_map[i]);
++ if (!priv->phy_dev) {
++ priv->phy_dev = priv->mii_bus->phy_map[i];
++ priv->phy_flags = FE_PHY_FLAG_ATTACH;
++ }
++ }
++ }
++
+ return 0;
+}
+
+ spin_unlock_irqrestore(&priv->phy->lock, flags);
+ } else if (priv->phy->phy[i]) {
+ phy_disconnect(priv->phy->phy[i]);
++ } else if (priv->mii_bus && priv->mii_bus->phy_map[i]) {
++ phy_detach(priv->mii_bus->phy_map[i]);
+ }
+}
+
+ priv->mii_bus->read = priv->soc->mdio_read;
+ priv->mii_bus->write = priv->soc->mdio_write;
+ priv->mii_bus->reset = fe_mdio_reset;
-+ priv->mii_bus->irq = priv->mii_irq;
+ priv->mii_bus->priv = priv;
+ priv->mii_bus->parent = priv->device;
+
Index: linux-3.14.18/drivers/net/ethernet/ralink/mdio.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-3.14.18/drivers/net/ethernet/ralink/mdio.h 2014-10-28 10:43:11.890131215 +0100
++++ linux-3.14.18/drivers/net/ethernet/ralink/mdio.h 2014-10-29 20:25:55.433357519 +0100
@@ -0,0 +1,29 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
Index: linux-3.14.18/drivers/net/ethernet/ralink/mdio_rt2880.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-3.14.18/drivers/net/ethernet/ralink/mdio_rt2880.c 2014-10-28 10:43:11.890131215 +0100
++++ linux-3.14.18/drivers/net/ethernet/ralink/mdio_rt2880.c 2014-10-29 20:25:55.433357519 +0100
@@ -0,0 +1,232 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
Index: linux-3.14.18/drivers/net/ethernet/ralink/mdio_rt2880.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-3.14.18/drivers/net/ethernet/ralink/mdio_rt2880.h 2014-10-28 10:43:11.890131215 +0100
++++ linux-3.14.18/drivers/net/ethernet/ralink/mdio_rt2880.h 2014-10-29 20:25:55.433357519 +0100
@@ -0,0 +1,26 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
Index: linux-3.14.18/drivers/net/ethernet/ralink/mt7530.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-3.14.18/drivers/net/ethernet/ralink/mt7530.c 2014-10-29 14:58:52.303587070 +0100
++++ linux-3.14.18/drivers/net/ethernet/ralink/mt7530.c 2014-10-29 20:32:49.124597652 +0100
@@ -0,0 +1,582 @@
+/*
+ * This program is free software; you can redistribute it and/or
Index: linux-3.14.18/drivers/net/ethernet/ralink/mt7530.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-3.14.18/drivers/net/ethernet/ralink/mt7530.h 2014-10-28 12:34:25.187728544 +0100
++++ linux-3.14.18/drivers/net/ethernet/ralink/mt7530.h 2014-10-29 20:30:49.296185172 +0100
@@ -0,0 +1,20 @@
+/*
+ * This program is free software; you can redistribute it and/or
Index: linux-3.14.18/drivers/net/ethernet/ralink/ralink_soc_eth.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-3.14.18/drivers/net/ethernet/ralink/ralink_soc_eth.c 2014-10-28 10:43:11.890131215 +0100
-@@ -0,0 +1,844 @@
++++ linux-3.14.18/drivers/net/ethernet/ralink/ralink_soc_eth.c 2014-10-29 20:25:55.433357519 +0100
+@@ -0,0 +1,1331 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+#include <linux/of_mdio.h>
+#include <linux/if_vlan.h>
+#include <linux/reset.h>
++#include <linux/tcp.h>
++#include <linux/io.h>
+
+#include <asm/mach-ralink/ralink_regs.h>
+
+#include "ralink_soc_eth.h"
+#include "esw_rt3052.h"
+#include "mdio.h"
++#include "ralink_ethtool.h"
+
+#define TX_TIMEOUT (2 * HZ)
+#define MAX_RX_LENGTH 1536
++#define FE_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
++#define FE_RX_HLEN (FE_RX_OFFSET + VLAN_ETH_HLEN + VLAN_HLEN + \
++ ETH_FCS_LEN)
+#define DMA_DUMMY_DESC 0xffffffff
++#define FE_DEFAULT_MSG_ENABLE \
++ (NETIF_MSG_DRV | \
++ NETIF_MSG_PROBE | \
++ NETIF_MSG_LINK | \
++ NETIF_MSG_TIMER | \
++ NETIF_MSG_IFDOWN | \
++ NETIF_MSG_IFUP | \
++ NETIF_MSG_RX_ERR | \
++ NETIF_MSG_TX_ERR)
++
++#define TX_DMA_DESP2_DEF (TX_DMA_LS0 | TX_DMA_DONE)
++#define TX_DMA_DESP4_DEF (TX_DMA_QN(3) | TX_DMA_PN(1))
++#define NEXT_TX_DESP_IDX(X) (((X) + 1) & (NUM_DMA_DESC - 1))
++#define NEXT_RX_DESP_IDX(X) (((X) + 1) & (NUM_DMA_DESC - 1))
++
++static int fe_msg_level = -1;
++module_param_named(msg_level, fe_msg_level, int, 0);
++MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
+
+static const u32 fe_reg_table_default[FE_REG_COUNT] = {
+ [FE_REG_PDMA_GLO_CFG] = FE_PDMA_GLO_CFG,
+ [FE_REG_RX_CALC_IDX0] = FE_RX_CALC_IDX0,
+ [FE_REG_FE_INT_ENABLE] = FE_FE_INT_ENABLE,
+ [FE_REG_FE_INT_STATUS] = FE_FE_INT_STATUS,
++ [FE_REG_FE_DMA_VID_BASE] = FE_DMA_VID0,
++ [FE_REG_FE_COUNTER_BASE] = FE_GDMA1_TX_GBCNT,
+};
+
+static const u32 *fe_reg_table = fe_reg_table_default;
+ return __raw_readl(fe_base + reg);
+}
+
-+static inline void fe_reg_w32(u32 val, enum fe_reg reg)
++void fe_reg_w32(u32 val, enum fe_reg reg)
+{
+ fe_w32(val, fe_reg_table[reg]);
+}
+
-+static inline u32 fe_reg_r32(enum fe_reg reg)
++u32 fe_reg_r32(enum fe_reg reg)
+{
+ return fe_r32(fe_reg_table[reg]);
+}
+ return ret;
+}
+
-+static struct sk_buff* fe_alloc_skb(struct fe_priv *priv)
++static inline int fe_max_frag_size(int mtu)
+{
-+ struct sk_buff *skb;
++ return SKB_DATA_ALIGN(FE_RX_HLEN + mtu) +
++ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
++}
+
-+ skb = netdev_alloc_skb(priv->netdev, MAX_RX_LENGTH + NET_IP_ALIGN);
-+ if (!skb)
-+ return NULL;
++static inline int fe_max_buf_size(int frag_size)
++{
++ return frag_size - FE_RX_HLEN -
++ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
++}
++
++static void fe_clean_rx(struct fe_priv *priv)
++{
++ int i;
+
-+ skb_reserve(skb, NET_IP_ALIGN);
++ if (priv->rx_data) {
++ for (i = 0; i < NUM_DMA_DESC; i++)
++ if (priv->rx_data[i]) {
++ if (priv->rx_dma && priv->rx_dma[i].rxd1)
++ dma_unmap_single(&priv->netdev->dev,
++ priv->rx_dma[i].rxd1,
++ priv->rx_buf_size,
++ DMA_FROM_DEVICE);
++ put_page(virt_to_head_page(priv->rx_data[i]));
++ }
++
++ kfree(priv->rx_data);
++ priv->rx_data = NULL;
++ }
+
-+ return skb;
++ if (priv->rx_dma) {
++ dma_free_coherent(&priv->netdev->dev,
++ NUM_DMA_DESC * sizeof(*priv->rx_dma),
++ priv->rx_dma,
++ priv->rx_phys);
++ priv->rx_dma = NULL;
++ }
+}
+
+static int fe_alloc_rx(struct fe_priv *priv)
+{
-+ int size = NUM_DMA_DESC * sizeof(struct fe_rx_dma);
++ struct net_device *netdev = priv->netdev;
+ int i;
+
-+ priv->rx_dma = dma_alloc_coherent(&priv->netdev->dev, size,
-+ &priv->rx_phys, GFP_ATOMIC);
-+ if (!priv->rx_dma)
-+ return -ENOMEM;
-+
-+ memset(priv->rx_dma, 0, size);
++ priv->rx_data = kcalloc(NUM_DMA_DESC, sizeof(*priv->rx_data),
++ GFP_KERNEL);
++ if (!priv->rx_data)
++ goto no_rx_mem;
+
+ for (i = 0; i < NUM_DMA_DESC; i++) {
-+ priv->rx_skb[i] = fe_alloc_skb(priv);
-+ if (!priv->rx_skb[i])
-+ return -ENOMEM;
++ priv->rx_data[i] = netdev_alloc_frag(priv->frag_size);
++ if (!priv->rx_data[i])
++ goto no_rx_mem;
+ }
+
++ priv->rx_dma = dma_alloc_coherent(&netdev->dev,
++ NUM_DMA_DESC * sizeof(*priv->rx_dma),
++ &priv->rx_phys,
++ GFP_ATOMIC | __GFP_ZERO);
++ if (!priv->rx_dma)
++ goto no_rx_mem;
++
+ for (i = 0; i < NUM_DMA_DESC; i++) {
-+ dma_addr_t dma_addr = dma_map_single(&priv->netdev->dev,
-+ priv->rx_skb[i]->data,
-+ MAX_RX_LENGTH,
-+ DMA_FROM_DEVICE);
++ dma_addr_t dma_addr = dma_map_single(&netdev->dev,
++ priv->rx_data[i] + FE_RX_OFFSET,
++ priv->rx_buf_size,
++ DMA_FROM_DEVICE);
++ if (unlikely(dma_mapping_error(&netdev->dev, dma_addr)))
++ goto no_rx_mem;
+ priv->rx_dma[i].rxd1 = (unsigned int) dma_addr;
+
+ if (priv->soc->rx_dma)
-+ priv->soc->rx_dma(priv, i, MAX_RX_LENGTH);
++ priv->soc->rx_dma(priv, i, priv->rx_buf_size);
+ else
+ priv->rx_dma[i].rxd2 = RX_DMA_LSO;
+ }
+ fe_reg_w32(FE_PST_DRX_IDX0, FE_REG_PDMA_RST_CFG);
+
+ return 0;
++
++no_rx_mem:
++ return -ENOMEM;
++}
++
++static void fe_clean_tx(struct fe_priv *priv)
++{
++ int i;
++
++ if (priv->tx_skb) {
++ for (i = 0; i < NUM_DMA_DESC; i++) {
++ if (priv->tx_skb[i])
++ dev_kfree_skb_any(priv->tx_skb[i]);
++ }
++ kfree(priv->tx_skb);
++ priv->tx_skb = NULL;
++ }
++
++ if (priv->tx_dma) {
++ dma_free_coherent(&priv->netdev->dev,
++ NUM_DMA_DESC * sizeof(*priv->tx_dma),
++ priv->tx_dma,
++ priv->tx_phys);
++ priv->tx_dma = NULL;
++ }
+}
+
+static int fe_alloc_tx(struct fe_priv *priv)
+{
-+ int size = NUM_DMA_DESC * sizeof(struct fe_tx_dma);
+ int i;
+
+ priv->tx_free_idx = 0;
+
-+ priv->tx_dma = dma_alloc_coherent(&priv->netdev->dev, size,
-+ &priv->tx_phys, GFP_ATOMIC);
-+ if (!priv->tx_dma)
-+ return -ENOMEM;
++ priv->tx_skb = kcalloc(NUM_DMA_DESC, sizeof(*priv->tx_skb),
++ GFP_KERNEL);
++ if (!priv->tx_skb)
++ goto no_tx_mem;
+
-+ memset(priv->tx_dma, 0, size);
++ priv->tx_dma = dma_alloc_coherent(&priv->netdev->dev,
++ NUM_DMA_DESC * sizeof(*priv->tx_dma),
++ &priv->tx_phys,
++ GFP_ATOMIC | __GFP_ZERO);
++ if (!priv->tx_dma)
++ goto no_tx_mem;
+
+ for (i = 0; i < NUM_DMA_DESC; i++) {
+ if (priv->soc->tx_dma) {
+ priv->soc->tx_dma(priv, i, NULL);
+ continue;
+ }
-+
-+ priv->tx_dma[i].txd2 = TX_DMA_LSO | TX_DMA_DONE;
-+ priv->tx_dma[i].txd4 = TX_DMA_QN(3) | TX_DMA_PN(1);
++ priv->tx_dma[i].txd2 = TX_DMA_DESP2_DEF;
+ }
++ wmb();
+
+ fe_reg_w32(priv->tx_phys, FE_REG_TX_BASE_PTR0);
+ fe_reg_w32(NUM_DMA_DESC, FE_REG_TX_MAX_CNT0);
+ fe_reg_w32(FE_PST_DTX_IDX0, FE_REG_PDMA_RST_CFG);
+
+ return 0;
++
++no_tx_mem:
++ return -ENOMEM;
++}
++
++static int fe_init_dma(struct fe_priv *priv)
++{
++ int err;
++
++ err = fe_alloc_tx(priv);
++ if (err)
++ return err;
++
++ err = fe_alloc_rx(priv);
++ if (err)
++ return err;
++
++ return 0;
+}
+
+static void fe_free_dma(struct fe_priv *priv)
+{
-+ int i;
++ fe_clean_tx(priv);
++ fe_clean_rx(priv);
+
-+ for (i = 0; i < NUM_DMA_DESC; i++) {
-+ if (priv->rx_skb[i]) {
-+ dma_unmap_single(&priv->netdev->dev, priv->rx_dma[i].rxd1,
-+ MAX_RX_LENGTH, DMA_FROM_DEVICE);
-+ dev_kfree_skb_any(priv->rx_skb[i]);
-+ priv->rx_skb[i] = NULL;
-+ }
++ netdev_reset_queue(priv->netdev);
++}
+
-+ if (priv->tx_skb[i]) {
-+ dev_kfree_skb_any(priv->tx_skb[i]);
-+ priv->tx_skb[i] = NULL;
-+ }
-+ }
++static inline void txd_unmap_single(struct device *dev, struct fe_tx_dma *txd)
++{
++ if (txd->txd1 && TX_DMA_GET_PLEN0(txd->txd2))
++ dma_unmap_single(dev, txd->txd1,
++ TX_DMA_GET_PLEN0(txd->txd2),
++ DMA_TO_DEVICE);
++}
+
-+ if (priv->rx_dma) {
-+ int size = NUM_DMA_DESC * sizeof(struct fe_rx_dma);
-+ dma_free_coherent(&priv->netdev->dev, size, priv->rx_dma,
-+ priv->rx_phys);
++static inline void txd_unmap_page0(struct device *dev, struct fe_tx_dma *txd)
++{
++ if (txd->txd1 && TX_DMA_GET_PLEN0(txd->txd2))
++ dma_unmap_page(dev, txd->txd1,
++ TX_DMA_GET_PLEN0(txd->txd2),
++ DMA_TO_DEVICE);
++}
++
++static inline void txd_unmap_page1(struct device *dev, struct fe_tx_dma *txd)
++{
++ if (txd->txd3 && TX_DMA_GET_PLEN1(txd->txd2))
++ dma_unmap_page(dev, txd->txd3,
++ TX_DMA_GET_PLEN1(txd->txd2),
++ DMA_TO_DEVICE);
++}
++
++void fe_stats_update(struct fe_priv *priv)
++{
++ struct fe_hw_stats *hwstats = priv->hw_stats;
++ unsigned int base = fe_reg_table[FE_REG_FE_COUNTER_BASE];
++
++ u64_stats_update_begin(&hwstats->syncp);
++
++ hwstats->tx_bytes += fe_r32(base);
++ hwstats->tx_packets += fe_r32(base + 0x04);
++ hwstats->tx_skip += fe_r32(base + 0x08);
++ hwstats->tx_collisions += fe_r32(base + 0x0c);
++ hwstats->rx_bytes += fe_r32(base + 0x20);
++ hwstats->rx_packets += fe_r32(base + 0x24);
++ hwstats->rx_overflow += fe_r32(base + 0x28);
++ hwstats->rx_fcs_errors += fe_r32(base + 0x2c);
++ hwstats->rx_short_errors += fe_r32(base + 0x30);
++ hwstats->rx_long_errors += fe_r32(base + 0x34);
++ hwstats->rx_checksum_errors += fe_r32(base + 0x38);
++ hwstats->rx_flow_control_packets += fe_r32(base + 0x3c);
++
++ u64_stats_update_end(&hwstats->syncp);
++}
++
++static struct rtnl_link_stats64 *fe_get_stats64(struct net_device *dev,
++ struct rtnl_link_stats64 *storage)
++{
++ struct fe_priv *priv = netdev_priv(dev);
++ struct fe_hw_stats *hwstats = priv->hw_stats;
++ unsigned int base = fe_reg_table[FE_REG_FE_COUNTER_BASE];
++ unsigned int start;
++
++ if (!base) {
++ netdev_stats_to_stats64(storage, &dev->stats);
++ return storage;
+ }
+
-+ if (priv->tx_dma) {
-+ int size = NUM_DMA_DESC * sizeof(struct fe_tx_dma);
-+ dma_free_coherent(&priv->netdev->dev, size, priv->tx_dma,
-+ priv->tx_phys);
++ if (netif_running(dev) && netif_device_present(dev)) {
++ if (spin_trylock(&hwstats->stats_lock)) {
++ fe_stats_update(priv);
++ spin_unlock(&hwstats->stats_lock);
++ }
+ }
+
-+ netdev_reset_queue(priv->netdev);
++ do {
++ start = u64_stats_fetch_begin_bh(&hwstats->syncp);
++ storage->rx_packets = hwstats->rx_packets;
++ storage->tx_packets = hwstats->tx_packets;
++ storage->rx_bytes = hwstats->rx_bytes;
++ storage->tx_bytes = hwstats->tx_bytes;
++ storage->collisions = hwstats->tx_collisions;
++ storage->rx_length_errors = hwstats->rx_short_errors +
++ hwstats->rx_long_errors;
++ storage->rx_over_errors = hwstats->rx_overflow;
++ storage->rx_crc_errors = hwstats->rx_fcs_errors;
++ storage->rx_errors = hwstats->rx_checksum_errors;
++ storage->tx_aborted_errors = hwstats->tx_skip;
++ } while (u64_stats_fetch_retry_bh(&hwstats->syncp, start));
++
++ storage->tx_errors = priv->netdev->stats.tx_errors;
++ storage->rx_dropped = priv->netdev->stats.rx_dropped;
++ storage->tx_dropped = priv->netdev->stats.tx_dropped;
++
++ return storage;
+}
+
-+static void fe_start_tso(struct sk_buff *skb, struct net_device *dev, unsigned int nr_frags, int idx)
++static int fe_tx_map_dma(struct sk_buff *skb, struct net_device *dev,
++ int idx)
+{
-+ struct fe_priv *priv = netdev_priv(dev);
++ struct fe_priv *priv = netdev_priv(dev);
+ struct skb_frag_struct *frag;
-+ int i;
++ struct fe_tx_dma *txd;
++ dma_addr_t mapped_addr;
++ unsigned int nr_frags;
++ u32 def_txd4, txd2;
++ int i, j, unmap_idx, tx_num;
++
++ txd = &priv->tx_dma[idx];
++ nr_frags = skb_shinfo(skb)->nr_frags;
++ tx_num = 1 + (nr_frags >> 1);
++
++ /* init tx descriptor */
++ if (priv->soc->tx_dma)
++ priv->soc->tx_dma(priv, idx, skb);
++ else
++ txd->txd4 = TX_DMA_DESP4_DEF;
++ def_txd4 = txd->txd4;
++
++ /* use dma_unmap_single to free it */
++ txd->txd4 |= priv->soc->tx_udf_bit;
++
++ /* TX Checksum offload */
++ if (skb->ip_summed == CHECKSUM_PARTIAL)
++ txd->txd4 |= TX_DMA_CHKSUM;
+
++ /* VLAN header offload */
++ if (vlan_tx_tag_present(skb)) {
++ txd->txd4 |= TX_DMA_INS_VLAN |
++ ((vlan_tx_tag_get(skb) >> VLAN_PRIO_SHIFT) << 4) |
++ (vlan_tx_tag_get(skb) & 0xF);
++ }
++
++ /* TSO: fill MSS info in tcp checksum field */
++ if (skb_is_gso(skb)) {
++ if (skb_cow_head(skb, 0)) {
++ netif_warn(priv, tx_err, dev,
++ "GSO expand head fail.\n");
++ goto err_out;
++ }
++ if (skb_shinfo(skb)->gso_type &
++ (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
++ txd->txd4 |= TX_DMA_TSO;
++ tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
++ }
++ }
++
++ mapped_addr = dma_map_single(&dev->dev, skb->data,
++ skb_headlen(skb), DMA_TO_DEVICE);
++ if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
++ goto err_out;
++ txd->txd1 = mapped_addr;
++ txd2 = TX_DMA_PLEN0(skb_headlen(skb));
++
++ /* TX SG offload */
++ j = idx;
+ for (i = 0; i < nr_frags; i++) {
-+ dma_addr_t mapped_addr;
+
+ frag = &skb_shinfo(skb)->frags[i];
-+ mapped_addr = skb_frag_dma_map(&dev->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE);
-+ if (i % 2) {
-+ idx = (idx + 1) % NUM_DMA_DESC;
-+ priv->tx_dma[idx].txd1 = mapped_addr;
-+ if (i == nr_frags - 1)
-+ priv->tx_dma[idx].txd2 = TX_DMA_LSO | TX_DMA_PLEN0(frag->size);
-+ else
-+ priv->tx_dma[idx].txd2 = TX_DMA_PLEN0(frag->size);
++ mapped_addr = skb_frag_dma_map(&dev->dev, frag, 0,
++ skb_frag_size(frag), DMA_TO_DEVICE);
++ if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
++ goto err_dma;
++
++ if (i & 0x1) {
++ j = NEXT_TX_DESP_IDX(j);
++ txd = &priv->tx_dma[j];
++ txd->txd1 = mapped_addr;
++ txd2 = TX_DMA_PLEN0(frag->size);
++ txd->txd4 = def_txd4;
+ } else {
-+ priv->tx_dma[idx].txd3 = mapped_addr;
-+ if (i == nr_frags - 1)
-+ priv->tx_dma[idx].txd2 |= TX_DMA_LS1 | TX_DMA_PLEN1(frag->size);
-+ else
-+ priv->tx_dma[idx].txd2 |= TX_DMA_PLEN1(frag->size);
++ txd->txd3 = mapped_addr;
++ txd2 |= TX_DMA_PLEN1(frag->size);
++ if (i != (nr_frags -1))
++ txd->txd2 = txd2;
++ priv->tx_skb[j] = (struct sk_buff *) DMA_DUMMY_DESC;
++ }
++ }
++
++ /* set last segment */
++ if (nr_frags & 0x1)
++ txd->txd2 = (txd2 | TX_DMA_LS1);
++ else
++ txd->txd2 = (txd2 | TX_DMA_LS0);
++
++ /* store skb to cleanup */
++ priv->tx_skb[j] = skb;
++
++ wmb();
++ j = NEXT_TX_DESP_IDX(j);
++ fe_reg_w32(j, FE_REG_TX_CTX_IDX0);
++
++ return 0;
++
++err_dma:
++ /* unmap dma */
++ txd = &priv->tx_dma[idx];
++ txd_unmap_single(&dev->dev, txd);
++
++ j = idx;
++ unmap_idx = i;
++ for (i = 0; i < unmap_idx; i++) {
++ if (i & 0x1) {
++ j = NEXT_TX_DESP_IDX(j);
++ txd = &priv->tx_dma[j];
++ txd_unmap_page0(&dev->dev, txd);
++ } else {
++ txd_unmap_page1(&dev->dev, txd);
++ }
++ }
++
++err_out:
++ /* reinit descriptors and skb */
++ j = idx;
++ for (i = 0; i < tx_num; i++) {
++ priv->tx_dma[j].txd2 = TX_DMA_DESP2_DEF;
++ priv->tx_skb[j] = NULL;
++ j = NEXT_TX_DESP_IDX(j);
++ }
++ wmb();
++
++ return -1;
++}
++
++static inline int fe_skb_padto(struct sk_buff *skb, struct fe_priv *priv) {
++ unsigned int len;
++ int ret;
++
++ ret = 0;
++ if (unlikely(skb->len < VLAN_ETH_ZLEN)) {
++ if ((priv->flags & FE_FLAG_PADDING_64B) &&
++ !(priv->flags & FE_FLAG_PADDING_BUG))
++ return ret;
++
++ if (vlan_tx_tag_present(skb))
++ len = ETH_ZLEN;
++ else if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
++ len = VLAN_ETH_ZLEN;
++ else if(!(priv->flags & FE_FLAG_PADDING_64B))
++ len = ETH_ZLEN;
++ else
++ return ret;
++
++ if (skb->len < len) {
++ if ((ret = skb_pad(skb, len - skb->len)) < 0)
++ return ret;
++ skb->len = len;
++ skb_set_tail_pointer(skb, len);
+ }
+ }
++
++ return ret;
++}
++
++static inline u32 fe_empty_txd(struct fe_priv *priv, u32 tx_fill_idx)
++{
++ return (u32)(NUM_DMA_DESC - ((tx_fill_idx - priv->tx_free_idx) &
++ (NUM_DMA_DESC - 1)));
+}
+
+static int fe_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
-+ unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
+ struct fe_priv *priv = netdev_priv(dev);
-+ dma_addr_t mapped_addr;
-+ u32 tx_next, tx, tx_num = 1;
-+ int i;
++ struct net_device_stats *stats = &dev->stats;
++ u32 tx;
++ int tx_num;
+
-+ if (priv->soc->min_pkt_len) {
-+ if (skb->len < priv->soc->min_pkt_len) {
-+ if (skb_padto(skb, priv->soc->min_pkt_len)) {
-+ printk(KERN_ERR
-+ "fe_eth: skb_padto failed\n");
-+ kfree_skb(skb);
-+ return 0;
-+ }
-+ skb_put(skb, priv->soc->min_pkt_len - skb->len);
-+ }
++ if (fe_skb_padto(skb, priv)) {
++ netif_warn(priv, tx_err, dev, "tx padding failed!\n");
++ return NETDEV_TX_OK;
+ }
+
-+ dev->trans_start = jiffies;
-+ mapped_addr = dma_map_single(&priv->netdev->dev, skb->data,
-+ skb->len, DMA_TO_DEVICE);
-+
+ spin_lock(&priv->page_lock);
-+
++ tx_num = 1 + (skb_shinfo(skb)->nr_frags >> 1);
+ tx = fe_reg_r32(FE_REG_TX_CTX_IDX0);
-+ if (priv->soc->tso && nr_frags)
-+ tx_num += nr_frags >> 1;
-+ tx_next = (tx + tx_num) % NUM_DMA_DESC;
-+ if ((priv->tx_skb[tx]) || (priv->tx_skb[tx_next]) ||
-+ !(priv->tx_dma[tx].txd2 & TX_DMA_DONE) ||
-+ !(priv->tx_dma[tx_next].txd2 & TX_DMA_DONE))
++ if (unlikely(fe_empty_txd(priv, tx) <= tx_num))
+ {
++ netif_stop_queue(dev);
+ spin_unlock(&priv->page_lock);
-+ dev->stats.tx_dropped++;
++ netif_err(priv, tx_queued,dev,
++ "Tx Ring full when queue awake!\n");
++ return NETDEV_TX_BUSY;
++ }
++
++ if (fe_tx_map_dma(skb, dev, tx) < 0) {
+ kfree_skb(skb);
+
-+ return NETDEV_TX_OK;
++ stats->tx_dropped++;
++ } else {
++ netdev_sent_queue(dev, skb->len);
++ skb_tx_timestamp(skb);
++
++ stats->tx_packets++;
++ stats->tx_bytes += skb->len;
+ }
+
-+ if (priv->soc->tso) {
-+ int t = tx_num;
++ spin_unlock(&priv->page_lock);
+
-+ priv->tx_skb[(tx + t - 1) % NUM_DMA_DESC] = skb;
-+ while (--t)
-+ priv->tx_skb[(tx + t - 1) % NUM_DMA_DESC] = (struct sk_buff *) DMA_DUMMY_DESC;
-+ } else {
-+ priv->tx_skb[tx] = skb;
++ return NETDEV_TX_OK;
++}
++
++static inline void fe_rx_vlan(struct sk_buff *skb)
++{
++ struct ethhdr *ehdr;
++ u16 vlanid;
++
++ if (!__vlan_get_tag(skb, &vlanid)) {
++ /* pop the vlan tag */
++ ehdr = (struct ethhdr *)skb->data;
++ memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
++ skb_pull(skb, VLAN_HLEN);
++ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
+ }
-+ priv->tx_dma[tx].txd1 = (unsigned int) mapped_addr;
-+ wmb();
++}
+
-+ priv->tx_dma[tx].txd4 &= ~0x80;
-+ if (priv->soc->tx_dma)
-+ priv->soc->tx_dma(priv, tx, skb);
-+ else
-+ priv->tx_dma[tx].txd2 = TX_DMA_LSO | TX_DMA_PLEN0(skb->len);
++static int fe_poll_rx(struct napi_struct *napi, int budget,
++ struct fe_priv *priv)
++{
++ struct net_device *netdev = priv->netdev;
++ struct net_device_stats *stats = &netdev->stats;
++ struct fe_soc_data *soc = priv->soc;
++ u32 checksum_bit;
++ int idx = fe_reg_r32(FE_REG_RX_CALC_IDX0);
++ struct sk_buff *skb;
++ u8 *data, *new_data;
++ struct fe_rx_dma *rxd;
++ int done = 0;
++ bool rx_vlan = netdev->features & NETIF_F_HW_VLAN_CTAG_RX;
+
-+ if (skb->ip_summed == CHECKSUM_PARTIAL)
-+ priv->tx_dma[tx].txd4 |= TX_DMA_CHKSUM;
++ if (netdev->features & NETIF_F_RXCSUM)
++ checksum_bit = soc->checksum_bit;
+ else
-+ priv->tx_dma[tx].txd4 &= ~TX_DMA_CHKSUM;
-+
-+ if (priv->soc->tso)
-+ fe_start_tso(skb, dev, nr_frags, tx);
-+
-+ if (priv->soc->tso && (skb_shinfo(skb)->gso_segs > 1)) {
-+ struct iphdr *iph = NULL;
-+ struct tcphdr *th = NULL;
-+ struct ipv6hdr *ip6h = NULL;
-+
-+ ip6h = (struct ipv6hdr *) skb_network_header(skb);
-+ iph = (struct iphdr *) skb_network_header(skb);
-+ if ((iph->version == 4) && (iph->protocol == IPPROTO_TCP)) {
-+ th = (struct tcphdr *)skb_transport_header(skb);
-+ priv->tx_dma[tx].txd4 |= BIT(28);
-+ th->check = htons(skb_shinfo(skb)->gso_size);
-+ dma_cache_sync(NULL, th, sizeof(struct tcphdr), DMA_TO_DEVICE);
-+ } else if ((ip6h->version == 6) && (ip6h->nexthdr == NEXTHDR_TCP)) {
-+ th = (struct tcphdr *)skb_transport_header(skb);
-+ priv->tx_dma[tx].txd4 |= BIT(28);
-+ th->check = htons(skb_shinfo(skb)->gso_size);
-+ dma_cache_sync(NULL, th, sizeof(struct tcphdr), DMA_TO_DEVICE);
-+ }
-+ }
++ checksum_bit = 0;
+
-+ for (i = 0; i < tx_num; i++)
-+ dma_cache_sync(NULL, &priv->tx_dma[tx + i], sizeof(struct fe_tx_dma), DMA_TO_DEVICE);
++ while (done < budget) {
++ unsigned int pktlen;
++ dma_addr_t dma_addr;
++ idx = NEXT_RX_DESP_IDX(idx);
++ rxd = &priv->rx_dma[idx];
++ data = priv->rx_data[idx];
+
-+ dev->stats.tx_packets++;
-+ dev->stats.tx_bytes += skb->len;
++ if (!(rxd->rxd2 & RX_DMA_DONE))
++ break;
+
-+ wmb();
-+ fe_reg_w32(tx_next, FE_REG_TX_CTX_IDX0);
-+ netdev_sent_queue(dev, skb->len);
++ /* alloc new buffer */
++ new_data = netdev_alloc_frag(priv->frag_size);
++ if (unlikely(!new_data)) {
++ stats->rx_dropped++;
++ goto release_desc;
++ }
++ dma_addr = dma_map_single(&netdev->dev,
++ new_data + FE_RX_OFFSET,
++ priv->rx_buf_size,
++ DMA_FROM_DEVICE);
++ if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) {
++ put_page(virt_to_head_page(new_data));
++ goto release_desc;
++ }
+
-+ spin_unlock(&priv->page_lock);
++ /* receive data */
++ skb = build_skb(data, priv->frag_size);
++ if (unlikely(!skb)) {
++ put_page(virt_to_head_page(new_data));
++ goto release_desc;
++ }
++ skb_reserve(skb, FE_RX_OFFSET);
++
++ dma_unmap_single(&netdev->dev, rxd->rxd1,
++ priv->rx_buf_size, DMA_FROM_DEVICE);
++ pktlen = RX_DMA_PLEN0(rxd->rxd2);
++ skb_put(skb, pktlen);
++ skb->dev = netdev;
++ if (rxd->rxd4 & checksum_bit) {
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++ } else {
++ skb_checksum_none_assert(skb);
++ }
++ if (rx_vlan)
++ fe_rx_vlan(skb);
++ skb->protocol = eth_type_trans(skb, netdev);
+
-+ return NETDEV_TX_OK;
-+}
++ stats->rx_packets++;
++ stats->rx_bytes += pktlen;
+
-+static int fe_poll_rx(struct napi_struct *napi, int budget)
-+{
-+ struct fe_priv *priv = container_of(napi, struct fe_priv, rx_napi);
-+ int idx = fe_reg_r32(FE_REG_RX_CALC_IDX0);
-+ int complete = 0;
-+ int rx = 0;
++ napi_gro_receive(napi, skb);
+
-+ while ((rx < budget) && !complete) {
-+ idx = (idx + 1) % NUM_DMA_DESC;
++ priv->rx_data[idx] = new_data;
++ rxd->rxd1 = (unsigned int) dma_addr;
+
-+ if (priv->rx_dma[idx].rxd2 & RX_DMA_DONE) {
-+ struct sk_buff *new_skb = fe_alloc_skb(priv);
++release_desc:
++ if (soc->rx_dma)
++ soc->rx_dma(priv, idx, priv->rx_buf_size);
++ else
++ rxd->rxd2 = RX_DMA_LSO;
+
-+ if (new_skb) {
-+ int pktlen = RX_DMA_PLEN0(priv->rx_dma[idx].rxd2);
-+ dma_addr_t dma_addr;
++ wmb();
++ fe_reg_w32(idx, FE_REG_RX_CALC_IDX0);
++ done++;
++ }
+
-+ dma_unmap_single(&priv->netdev->dev, priv->rx_dma[idx].rxd1,
-+ MAX_RX_LENGTH, DMA_FROM_DEVICE);
++ return done;
++}
+
-+ skb_put(priv->rx_skb[idx], pktlen);
-+ priv->rx_skb[idx]->dev = priv->netdev;
-+ priv->rx_skb[idx]->protocol = eth_type_trans(priv->rx_skb[idx], priv->netdev);
-+ if (priv->rx_dma[idx].rxd4 & priv->soc->checksum_bit)
-+ priv->rx_skb[idx]->ip_summed = CHECKSUM_UNNECESSARY;
-+ else
-+ priv->rx_skb[idx]->ip_summed = CHECKSUM_NONE;
-+ priv->netdev->stats.rx_packets++;
-+ priv->netdev->stats.rx_bytes += pktlen;
++static int fe_poll_tx(struct fe_priv *priv, int budget)
++{
++ struct net_device *netdev = priv->netdev;
++ struct device *dev = &netdev->dev;
++ unsigned int bytes_compl = 0;
++ struct sk_buff *skb;
++ struct fe_tx_dma *txd;
++ int done = 0, idx;
++ u32 udf_bit = priv->soc->tx_udf_bit;
+
-+#ifdef CONFIG_INET_LRO
-+ if (priv->soc->get_skb_header && priv->rx_skb[idx]->ip_summed == CHECKSUM_UNNECESSARY)
-+ lro_receive_skb(&priv->lro_mgr, priv->rx_skb[idx], NULL);
-+ else
-+#endif
-+ netif_receive_skb(priv->rx_skb[idx]);
++ idx = priv->tx_free_idx;
++ while (done < budget) {
++ txd = &priv->tx_dma[idx];
++ skb = priv->tx_skb[idx];
+
-+ priv->rx_skb[idx] = new_skb;
++ if (!(txd->txd2 & TX_DMA_DONE) || !skb)
++ break;
+
-+ dma_addr = dma_map_single(&priv->netdev->dev,
-+ new_skb->data,
-+ MAX_RX_LENGTH,
-+ DMA_FROM_DEVICE);
-+ priv->rx_dma[idx].rxd1 = (unsigned int) dma_addr;
-+ wmb();
-+ } else {
-+ priv->netdev->stats.rx_dropped++;
-+ }
++ txd_unmap_page1(dev, txd);
+
-+ if (priv->soc->rx_dma)
-+ priv->soc->rx_dma(priv, idx, MAX_RX_LENGTH);
-+ else
-+ priv->rx_dma[idx].rxd2 = RX_DMA_LSO;
-+ fe_reg_w32(idx, FE_REG_RX_CALC_IDX0);
++ if (txd->txd4 & udf_bit)
++ txd_unmap_single(dev, txd);
++ else
++ txd_unmap_page0(dev, txd);
+
-+ rx++;
-+ } else {
-+ complete = 1;
++ if (skb != (struct sk_buff *) DMA_DUMMY_DESC) {
++ bytes_compl += skb->len;
++ dev_kfree_skb_any(skb);
++ done++;
+ }
++ priv->tx_skb[idx] = NULL;
++ idx = NEXT_TX_DESP_IDX(idx);
+ }
++ priv->tx_free_idx = idx;
+
-+#ifdef CONFIG_INET_LRO
-+ if (priv->soc->get_skb_header)
-+ lro_flush_all(&priv->lro_mgr);
-+#endif
-+ if (complete) {
-+ napi_complete(&priv->rx_napi);
-+ fe_int_enable(priv->soc->rx_dly_int);
++ if (!done)
++ return 0;
++
++ netdev_completed_queue(netdev, done, bytes_compl);
++ if (unlikely(netif_queue_stopped(netdev) &&
++ netif_carrier_ok(netdev))) {
++ netif_wake_queue(netdev);
+ }
+
-+ return rx;
++ return done;
+}
+
-+static void fe_tx_housekeeping(unsigned long ptr)
++static int fe_poll(struct napi_struct *napi, int budget)
+{
-+ struct net_device *dev = (struct net_device*)ptr;
-+ struct fe_priv *priv = netdev_priv(dev);
-+ unsigned int bytes_compl = 0;
-+ unsigned int pkts_compl = 0;
-+
-+ spin_lock(&priv->page_lock);
-+ while (1) {
-+ struct fe_tx_dma *txd;
++ struct fe_priv *priv = container_of(napi, struct fe_priv, rx_napi);
++ struct fe_hw_stats *hwstat = priv->hw_stats;
++ int tx_done, rx_done;
++ u32 status, mask;
++ u32 tx_intr, rx_intr;
+
-+ txd = &priv->tx_dma[priv->tx_free_idx];
++ status = fe_reg_r32(FE_REG_FE_INT_STATUS);
++ tx_intr = priv->soc->tx_dly_int;
++ rx_intr = priv->soc->rx_dly_int;
++ tx_done = rx_done = 0;
++
++poll_again:
++ if (status & tx_intr) {
++ tx_done += fe_poll_tx(priv, budget - tx_done);
++ if (tx_done < budget) {
++ fe_reg_w32(tx_intr, FE_REG_FE_INT_STATUS);
++ }
++ status = fe_reg_r32(FE_REG_FE_INT_STATUS);
++ }
+
-+ if (!(txd->txd2 & TX_DMA_DONE) || !(priv->tx_skb[priv->tx_free_idx]))
-+ break;
++ if (status & rx_intr) {
++ rx_done += fe_poll_rx(napi, budget - rx_done, priv);
++ if (rx_done < budget) {
++ fe_reg_w32(rx_intr, FE_REG_FE_INT_STATUS);
++ }
++ }
+
-+ if (priv->tx_skb[priv->tx_free_idx] != (struct sk_buff *) DMA_DUMMY_DESC) {
-+ bytes_compl += priv->tx_skb[priv->tx_free_idx]->len;
-+ dev_kfree_skb_irq(priv->tx_skb[priv->tx_free_idx]);
++ if (unlikely(hwstat && (status & FE_CNT_GDM_AF))) {
++ if (spin_trylock(&hwstat->stats_lock)) {
++ fe_stats_update(priv);
++ spin_unlock(&hwstat->stats_lock);
+ }
-+ pkts_compl++;
-+ priv->tx_skb[priv->tx_free_idx] = NULL;
-+ priv->tx_free_idx++;
-+ if (priv->tx_free_idx >= NUM_DMA_DESC)
-+ priv->tx_free_idx = 0;
++ fe_reg_w32(FE_CNT_GDM_AF, FE_REG_FE_INT_STATUS);
++ }
++
++ if (unlikely(netif_msg_intr(priv))) {
++ mask = fe_reg_r32(FE_REG_FE_INT_ENABLE);
++ netdev_info(priv->netdev,
++ "done tx %d, rx %d, intr 0x%x/0x%x\n",
++ tx_done, rx_done, status, mask);
+ }
+
-+ netdev_completed_queue(priv->netdev, pkts_compl, bytes_compl);
-+ spin_unlock(&priv->page_lock);
++ if ((tx_done < budget) && (rx_done < budget)) {
++ status = fe_reg_r32(FE_REG_FE_INT_STATUS);
++ if (status & (tx_intr | rx_intr )) {
++ goto poll_again;
++ }
++ napi_complete(napi);
++ fe_int_enable(tx_intr | rx_intr);
++ }
+
-+ fe_int_enable(priv->soc->tx_dly_int);
++ return rx_done;
+}
+
+static void fe_tx_timeout(struct net_device *dev)
+{
+ struct fe_priv *priv = netdev_priv(dev);
+
-+ tasklet_schedule(&priv->tx_tasklet);
+ priv->netdev->stats.tx_errors++;
-+ netdev_err(dev, "transmit timed out, waking up the queue\n");
++ netif_err(priv, tx_err, dev,
++ "transmit timed out, waking up the queue\n");
++ netif_info(priv, drv, dev, ": dma_cfg:%08x, free_idx:%d, " \
++ "dma_ctx_idx=%u, dma_crx_idx=%u\n",
++ fe_reg_r32(FE_REG_PDMA_GLO_CFG), priv->tx_free_idx,
++ fe_reg_r32(FE_REG_TX_CTX_IDX0),
++ fe_reg_r32(FE_REG_RX_CALC_IDX0));
+ netif_wake_queue(dev);
+}
+
+static irqreturn_t fe_handle_irq(int irq, void *dev)
+{
+ struct fe_priv *priv = netdev_priv(dev);
-+ unsigned int status;
-+ unsigned int mask;
++ u32 status, dly_int;
+
+ status = fe_reg_r32(FE_REG_FE_INT_STATUS);
-+ mask = fe_reg_r32(FE_REG_FE_INT_ENABLE);
+
-+ if (!(status & mask))
++ if (unlikely(!status))
+ return IRQ_NONE;
+
-+ if (status & priv->soc->rx_dly_int) {
-+ fe_int_disable(priv->soc->rx_dly_int);
++ dly_int = (priv->soc->rx_dly_int | priv->soc->tx_dly_int);
++ if (likely(status & dly_int)) {
++ fe_int_disable(dly_int);
+ napi_schedule(&priv->rx_napi);
++ } else {
++ fe_reg_w32(status, FE_REG_FE_INT_STATUS);
+ }
+
-+ if (status & priv->soc->tx_dly_int) {
-+ fe_int_disable(priv->soc->tx_dly_int);
-+ tasklet_schedule(&priv->tx_tasklet);
++ return IRQ_HANDLED;
++}
++
++int fe_set_clock_cycle(struct fe_priv *priv)
++{
++ unsigned long sysclk = priv->sysclk;
++
++ if (!sysclk) {
++ return -EINVAL;
+ }
+
-+ fe_reg_w32(status, FE_REG_FE_INT_STATUS);
++ sysclk /= FE_US_CYC_CNT_DIVISOR;
++ sysclk <<= FE_US_CYC_CNT_SHIFT;
+
-+ return IRQ_HANDLED;
++ fe_w32((fe_r32(FE_FE_GLO_CFG) &
++ ~(FE_US_CYC_CNT_MASK << FE_US_CYC_CNT_SHIFT)) |
++ sysclk,
++ FE_FE_GLO_CFG);
++ return 0;
++}
++
++void fe_fwd_config(struct fe_priv *priv)
++{
++ u32 fwd_cfg;
++
++ fwd_cfg = fe_r32(FE_GDMA1_FWD_CFG);
++
++ /* disable jumbo frame */
++ if (priv->flags & FE_FLAG_JUMBO_FRAME)
++ fwd_cfg &= ~FE_GDM1_JMB_EN;
++
++ /* set unicast/multicast/broadcast frame to cpu */
++ fwd_cfg &= ~0xffff;
++
++ fe_w32(fwd_cfg, FE_GDMA1_FWD_CFG);
++}
++
++static void fe_rxcsum_config(bool enable)
++{
++ if (enable)
++ fe_w32(fe_r32(FE_GDMA1_FWD_CFG) | (FE_GDM1_ICS_EN |
++ FE_GDM1_TCS_EN | FE_GDM1_UCS_EN),
++ FE_GDMA1_FWD_CFG);
++ else
++ fe_w32(fe_r32(FE_GDMA1_FWD_CFG) & ~(FE_GDM1_ICS_EN |
++ FE_GDM1_TCS_EN | FE_GDM1_UCS_EN),
++ FE_GDMA1_FWD_CFG);
++}
++
++static void fe_txcsum_config(bool enable)
++{
++ if (enable)
++ fe_w32(fe_r32(FE_CDMA_CSG_CFG) | (FE_ICS_GEN_EN |
++ FE_TCS_GEN_EN | FE_UCS_GEN_EN),
++ FE_CDMA_CSG_CFG);
++ else
++ fe_w32(fe_r32(FE_CDMA_CSG_CFG) & ~(FE_ICS_GEN_EN |
++ FE_TCS_GEN_EN | FE_UCS_GEN_EN),
++ FE_CDMA_CSG_CFG);
++}
++
++void fe_csum_config(struct fe_priv *priv)
++{
++ struct net_device *dev = priv_netdev(priv);
++
++ fe_txcsum_config((dev->features & NETIF_F_IP_CSUM));
++ fe_rxcsum_config((dev->features & NETIF_F_RXCSUM));
+}
+
+static int fe_hw_init(struct net_device *dev)
+{
+ struct fe_priv *priv = netdev_priv(dev);
-+ int err;
++ int i, err;
+
+ err = devm_request_irq(priv->device, dev->irq, fe_handle_irq, 0,
+ dev_name(priv->device), dev);
+ if (err)
+ return err;
+
-+ err = fe_alloc_rx(priv);
-+ if (!err)
-+ err = fe_alloc_tx(priv);
-+ if (err)
-+ return err;
-+
+ if (priv->soc->set_mac)
+ priv->soc->set_mac(priv, dev->dev_addr);
+ else
+
+ fe_int_disable(priv->soc->tx_dly_int | priv->soc->rx_dly_int);
+
-+ tasklet_init(&priv->tx_tasklet, fe_tx_housekeeping, (unsigned long)dev);
++ /* frame engine will push VLAN tag regarding to VIDX feild in Tx desc. */
++ if (fe_reg_table[FE_REG_FE_DMA_VID_BASE])
++ for (i = 0; i < 16; i += 2)
++ fe_w32(((i + 1) << 16) + i,
++ fe_reg_table[FE_REG_FE_DMA_VID_BASE] +
++ (i * 2));
+
-+ if (priv->soc->fwd_config) {
-+ priv->soc->fwd_config(priv);
-+ } else {
-+ unsigned long sysclk = priv->sysclk;
-+
-+ if (!sysclk) {
-+ netdev_err(dev, "unable to get clock\n");
-+ return -EINVAL;
-+ }
-+
-+ sysclk /= FE_US_CYC_CNT_DIVISOR;
-+ sysclk <<= FE_US_CYC_CNT_SHIFT;
-+
-+ fe_w32((fe_r32(FE_FE_GLO_CFG) &
-+ ~(FE_US_CYC_CNT_MASK << FE_US_CYC_CNT_SHIFT)) | sysclk,
-+ FE_FE_GLO_CFG);
-+
-+ fe_w32(fe_r32(FE_GDMA1_FWD_CFG) & ~0xffff, FE_GDMA1_FWD_CFG);
-+ fe_w32(fe_r32(FE_GDMA1_FWD_CFG) | (FE_GDM1_ICS_EN | FE_GDM1_TCS_EN | FE_GDM1_UCS_EN),
-+ FE_GDMA1_FWD_CFG);
-+ fe_w32(fe_r32(FE_CDMA_CSG_CFG) | (FE_ICS_GEN_EN | FE_TCS_GEN_EN | FE_UCS_GEN_EN),
-+ FE_CDMA_CSG_CFG);
-+ fe_w32(FE_PSE_FQFC_CFG_INIT, FE_PSE_FQ_CFG);
-+ }
++ BUG_ON(!priv->soc->fwd_config);
++ if (priv->soc->fwd_config(priv))
++ netdev_err(dev, "unable to get clock\n");
+
+ fe_w32(1, FE_FE_RST_GL);
+ fe_w32(0, FE_FE_RST_GL);
+ struct fe_priv *priv = netdev_priv(dev);
+ unsigned long flags;
+ u32 val;
++ int err;
++
++ err = fe_init_dma(priv);
++ if (err)
++ goto err_out;
+
+ spin_lock_irqsave(&priv->page_lock, flags);
+ napi_enable(&priv->rx_napi);
+ fe_int_enable(priv->soc->tx_dly_int | priv->soc->rx_dly_int);
+
+ return 0;
++
++err_out:
++ fe_free_dma(priv);
++ return err;
+}
+
+static int fe_stop(struct net_device *dev)
+{
+ struct fe_priv *priv = netdev_priv(dev);
+ unsigned long flags;
++ int i;
+
+ fe_int_disable(priv->soc->tx_dly_int | priv->soc->rx_dly_int);
+
-+ netif_stop_queue(dev);
++ netif_tx_disable(dev);
+
+ if (priv->phy)
+ priv->phy->stop(priv);
+ FE_REG_PDMA_GLO_CFG);
+ spin_unlock_irqrestore(&priv->page_lock, flags);
+
++ /* wait dma stop */
++ for (i = 0; i < 10; i++) {
++ if (fe_reg_r32(FE_REG_PDMA_GLO_CFG) &
++ (FE_TX_DMA_BUSY | FE_RX_DMA_BUSY)) {
++ msleep(10);
++ continue;
++ }
++ break;
++ }
++
++ fe_free_dma(priv);
++
+ return 0;
+}
+
+ if (err)
+ return err;
+
-+ if (priv->phy) {
-+ err = priv->phy->connect(priv);
-+ if (err)
-+ goto err_mdio_cleanup;
-+ }
-+
+ if (priv->soc->port_init)
+ for_each_child_of_node(priv->device->of_node, port)
+ if (of_device_is_compatible(port, "ralink,eth-port") && of_device_is_available(port))
+ priv->soc->port_init(priv, port);
+
++ if (priv->phy) {
++ err = priv->phy->connect(priv);
++ if (err)
++ goto err_phy_disconnect;
++ }
++
+ err = fe_hw_init(dev);
+ if (err)
+ goto err_phy_disconnect;
+err_phy_disconnect:
+ if (priv->phy)
+ priv->phy->disconnect(priv);
-+err_mdio_cleanup:
+ fe_mdio_cleanup(priv);
+
+ return err;
+{
+ struct fe_priv *priv = netdev_priv(dev);
+
-+ tasklet_kill(&priv->tx_tasklet);
-+
+ if (priv->phy)
+ priv->phy->disconnect(priv);
+ fe_mdio_cleanup(priv);
+
+ fe_reg_w32(0, FE_REG_FE_INT_ENABLE);
+ free_irq(dev->irq, dev);
++}
+
-+ fe_free_dma(priv);
++static int fe_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
++{
++ struct fe_priv *priv = netdev_priv(dev);
++
++ if (!priv->phy_dev)
++ return -ENODEV;
++
++ switch (cmd) {
++ case SIOCETHTOOL:
++ return phy_ethtool_ioctl(priv->phy_dev,
++ (void *) ifr->ifr_data);
++ case SIOCGMIIPHY:
++ case SIOCGMIIREG:
++ case SIOCSMIIREG:
++ return phy_mii_ioctl(priv->phy_dev, ifr, cmd);
++ default:
++ break;
++ }
++
++ return -EOPNOTSUPP;
++}
++
++static int fe_change_mtu(struct net_device *dev, int new_mtu)
++{
++ struct fe_priv *priv = netdev_priv(dev);
++ int frag_size, old_mtu;
++ u32 fwd_cfg;
++
++ if (!(priv->flags & FE_FLAG_JUMBO_FRAME))
++ return eth_change_mtu(dev, new_mtu);
++
++ frag_size = fe_max_frag_size(new_mtu);
++ if (new_mtu < 68 || frag_size > PAGE_SIZE)
++ return -EINVAL;
++
++ old_mtu = dev->mtu;
++ dev->mtu = new_mtu;
++
++ /* return early if the buffer sizes will not change */
++ if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
++ return 0;
++ if (old_mtu > ETH_DATA_LEN && new_mtu > ETH_DATA_LEN)
++ return 0;
++
++ if (new_mtu <= ETH_DATA_LEN) {
++ priv->frag_size = fe_max_frag_size(ETH_DATA_LEN);
++ priv->rx_buf_size = fe_max_buf_size(ETH_DATA_LEN);
++ } else {
++ priv->frag_size = PAGE_SIZE;
++ priv->rx_buf_size = fe_max_buf_size(PAGE_SIZE);
++ }
++
++ if (!netif_running(dev))
++ return 0;
++
++ fe_stop(dev);
++ fwd_cfg = fe_r32(FE_GDMA1_FWD_CFG);
++ if (new_mtu <= ETH_DATA_LEN)
++ fwd_cfg &= ~FE_GDM1_JMB_EN;
++ else {
++ fwd_cfg &= ~(FE_GDM1_JMB_LEN_MASK << FE_GDM1_JMB_LEN_SHIFT);
++ fwd_cfg |= (DIV_ROUND_UP(frag_size, 1024) <<
++ FE_GDM1_JMB_LEN_SHIFT) | FE_GDM1_JMB_EN;
++ }
++ fe_w32(fwd_cfg, FE_GDMA1_FWD_CFG);
++
++ return fe_open(dev);
+}
+
+static const struct net_device_ops fe_netdev_ops = {
+ .ndo_open = fe_open,
+ .ndo_stop = fe_stop,
+ .ndo_start_xmit = fe_start_xmit,
-+ .ndo_tx_timeout = fe_tx_timeout,
+ .ndo_set_mac_address = fe_set_mac_address,
-+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
++ .ndo_do_ioctl = fe_do_ioctl,
++ .ndo_change_mtu = fe_change_mtu,
++ .ndo_tx_timeout = fe_tx_timeout,
++ .ndo_get_stats64 = fe_get_stats64,
+};
+
+static int fe_probe(struct platform_device *pdev)
+{
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ const struct of_device_id *match;
-+ struct fe_soc_data *soc = NULL;
++ struct fe_soc_data *soc;
+ struct net_device *netdev;
+ struct fe_priv *priv;
+ struct clk *sysclk;
+ match = of_match_device(of_fe_match, &pdev->dev);
+ soc = (struct fe_soc_data *) match->data;
+
-+ if (soc->init_data)
-+ soc->init_data(soc);
+ if (soc->reg_table)
+ fe_reg_table = soc->reg_table;
++ else
++ soc->reg_table = fe_reg_table;
+
+ fe_base = devm_request_and_ioremap(&pdev->dev, res);
-+ if (!fe_base)
-+ return -ENOMEM;
++ if (!fe_base) {
++ err = -EADDRNOTAVAIL;
++ goto err_out;
++ }
+
-+ netdev = alloc_etherdev(sizeof(struct fe_priv));
++ netdev = alloc_etherdev(sizeof(*priv));
+ if (!netdev) {
+ dev_err(&pdev->dev, "alloc_etherdev failed\n");
-+ return -ENOMEM;
++ err = -ENOMEM;
++ goto err_iounmap;
+ }
+
-+ strcpy(netdev->name, "eth%d");
++ SET_NETDEV_DEV(netdev, &pdev->dev);
+ netdev->netdev_ops = &fe_netdev_ops;
+ netdev->base_addr = (unsigned long) fe_base;
+ netdev->watchdog_timeo = TX_TIMEOUT;
-+ netdev->features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
-+
-+ if (fe_reg_table[FE_REG_FE_DMA_VID_BASE])
-+ netdev->features |= NETIF_F_HW_VLAN_CTAG_TX;
-+
-+ if (soc->tso) {
-+ dev_info(&pdev->dev, "Enabling TSO\n");
-+ netdev->features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_IPV6_CSUM;
-+ }
-+ netdev->hw_features = netdev->features;
+
+ netdev->irq = platform_get_irq(pdev, 0);
+ if (netdev->irq < 0) {
+ dev_err(&pdev->dev, "no IRQ resource found\n");
-+ kfree(netdev);
-+ return -ENXIO;
++ err = -ENXIO;
++ goto err_free_dev;
+ }
+
++ if (soc->init_data)
++ soc->init_data(soc, netdev);
++ /* fake NETIF_F_HW_VLAN_CTAG_RX for good GRO performance */
++ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
++ netdev->vlan_features = netdev->hw_features &
++ ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
++ netdev->features |= netdev->hw_features;
++
+ priv = netdev_priv(netdev);
-+ memset(priv, 0, sizeof(struct fe_priv));
+ spin_lock_init(&priv->page_lock);
++ if (fe_reg_table[FE_REG_FE_COUNTER_BASE]) {
++ priv->hw_stats = kzalloc(sizeof(*priv->hw_stats), GFP_KERNEL);
++ if (!priv->hw_stats) {
++ err = -ENOMEM;
++ goto err_free_dev;
++ }
++ spin_lock_init(&priv->hw_stats->stats_lock);
++ }
+
+ sysclk = devm_clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(sysclk))
+ priv->netdev = netdev;
+ priv->device = &pdev->dev;
+ priv->soc = soc;
++ priv->msg_enable = netif_msg_init(fe_msg_level, FE_DEFAULT_MSG_ENABLE);
++ priv->frag_size = fe_max_frag_size(ETH_DATA_LEN);
++ priv->rx_buf_size = fe_max_buf_size(ETH_DATA_LEN);
++ if (priv->frag_size > PAGE_SIZE) {
++ dev_err(&pdev->dev, "error frag size.\n");
++ err = -EINVAL;
++ goto err_free_dev;
++ }
++
++ netif_napi_add(netdev, &priv->rx_napi, fe_poll, 32);
++ fe_set_ethtool_ops(netdev);
+
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(&pdev->dev, "error bringing up device\n");
-+ kfree(netdev);
-+ return err;
-+ }
-+ netif_napi_add(netdev, &priv->rx_napi, fe_poll_rx, 32);
-+
-+#ifdef CONFIG_INET_LRO
-+ if (priv->soc->get_skb_header) {
-+ priv->lro_mgr.dev = netdev;
-+ memset(&priv->lro_mgr.stats, 0, sizeof(priv->lro_mgr.stats));
-+ priv->lro_mgr.features = LRO_F_NAPI;
-+ priv->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
-+ priv->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
-+ priv->lro_mgr.max_desc = ARRAY_SIZE(priv->lro_arr);
-+ priv->lro_mgr.max_aggr = 64;
-+ priv->lro_mgr.frag_align_pad = 0;
-+ priv->lro_mgr.lro_arr = priv->lro_arr;
-+ priv->lro_mgr.get_skb_header = priv->soc->get_skb_header;
++ goto err_free_dev;
+ }
-+#endif
+
+ platform_set_drvdata(pdev, netdev);
+
-+ netdev_info(netdev, "done loading\n");
++ netif_info(priv, probe, netdev, "ralink at 0x%08lx, irq %d\n",
++ netdev->base_addr, netdev->irq);
+
+ return 0;
++
++err_free_dev:
++ free_netdev(netdev);
++err_iounmap:
++ devm_iounmap(&pdev->dev, fe_base);
++err_out:
++ return err;
+}
+
+static int fe_remove(struct platform_device *pdev)
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct fe_priv *priv = netdev_priv(dev);
+
-+ netif_stop_queue(dev);
+ netif_napi_del(&priv->rx_napi);
++ if (priv->hw_stats)
++ kfree(priv->hw_stats);
+
+ unregister_netdev(dev);
+ free_netdev(dev);
++ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
+MODULE_DESCRIPTION("Ethernet driver for Ralink SoC");
++MODULE_VERSION(FE_DRV_VERSION);
Index: linux-3.14.18/drivers/net/ethernet/ralink/ralink_soc_eth.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-3.14.18/drivers/net/ethernet/ralink/ralink_soc_eth.h 2014-10-28 10:43:11.894131220 +0100
-@@ -0,0 +1,384 @@
++++ linux-3.14.18/drivers/net/ethernet/ralink/ralink_soc_eth.h 2014-10-29 20:25:55.433357519 +0100
+@@ -0,0 +1,448 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+#include <linux/netdevice.h>
+#include <linux/dma-mapping.h>
+#include <linux/phy.h>
-+#include <linux/inet_lro.h>
-+
++#include <linux/ethtool.h>
+
+enum fe_reg {
+ FE_REG_PDMA_GLO_CFG = 0,
+ FE_REG_FE_INT_ENABLE,
+ FE_REG_FE_INT_STATUS,
+ FE_REG_FE_DMA_VID_BASE,
++ FE_REG_FE_COUNTER_BASE,
+ FE_REG_COUNT
+};
+
-+#define NUM_DMA_DESC 0x100
++#define FE_DRV_VERSION "0.1.0"
++
++/* power of 2 to let NEXT_TX_DESP_IDX work */
++#define NUM_DMA_DESC (1 << 7)
++#define MAX_DMA_DESC 0xfff
+
+#define FE_DELAY_EN_INT 0x80
+#define FE_DELAY_MAX_INT 0x04
+#define FE_DELAY_MAX_TOUT 0x04
++#define FE_DELAY_TIME 20
+#define FE_DELAY_CHAN (((FE_DELAY_EN_INT | FE_DELAY_MAX_INT) << 8) | FE_DELAY_MAX_TOUT)
+#define FE_DELAY_INIT ((FE_DELAY_CHAN << 16) | FE_DELAY_CHAN)
+#define FE_PSE_FQFC_CFG_INIT 0x80504000
++#define FE_PSE_FQFC_CFG_256Q 0xff908000
+
+/* interrupt bits */
+#define FE_CNT_PPE_AF BIT(31)
+#define FE_MDIO_CFG_TX_CLK_SKEW_INV 3
+
+/* uni-cast port */
++#define FE_GDM1_JMB_LEN_MASK 0xf
++#define FE_GDM1_JMB_LEN_SHIFT 28
+#define FE_GDM1_ICS_EN BIT(22)
+#define FE_GDM1_TCS_EN BIT(21)
+#define FE_GDM1_UCS_EN BIT(20)
+#define TX_DMA_PLEN0_MASK ((0x3fff) << 16)
+#define TX_DMA_PLEN0(_x) (((_x) & 0x3fff) << 16)
+#define TX_DMA_PLEN1(_x) ((_x) & 0x3fff)
++#define TX_DMA_GET_PLEN0(_x) (((_x) >> 16 ) & 0x3fff)
++#define TX_DMA_GET_PLEN1(_x) ((_x) & 0x3fff)
+#define TX_DMA_LS1 BIT(14)
-+#define TX_DMA_LSO BIT(30)
++#define TX_DMA_LS0 BIT(30)
+#define TX_DMA_DONE BIT(31)
++
++#define TX_DMA_INS_VLAN BIT(7)
++#define TX_DMA_INS_PPPOE BIT(12)
+#define TX_DMA_QN(_x) ((_x) << 16)
+#define TX_DMA_PN(_x) ((_x) << 24)
+#define TX_DMA_QN_MASK TX_DMA_QN(0x7)
+#define TX_DMA_PN_MASK TX_DMA_PN(0x7)
++#define TX_DMA_UDF BIT(20)
+#define TX_DMA_CHKSUM (0x7 << 29)
++#define TX_DMA_TSO BIT(28)
++
++/* frame engine counters */
++#define FE_PPE_AC_BCNT0 (FE_CMTABLE_OFFSET + 0x00)
++#define FE_GDMA1_TX_GBCNT (FE_CMTABLE_OFFSET + 0x300)
++#define FE_GDMA2_TX_GBCNT (FE_GDMA1_TX_GBCNT + 0x40)
++
++/* phy device flags */
++#define FE_PHY_FLAG_PORT BIT(0)
++#define FE_PHY_FLAG_ATTACH BIT(1)
+
+struct fe_tx_dma {
+ unsigned int txd1;
+ unsigned char mac[6];
+ const u32 *reg_table;
+
-+ void (*init_data)(struct fe_soc_data *data);
++ void (*init_data)(struct fe_soc_data *data, struct net_device *netdev);
+ void (*reset_fe)(void);
+ void (*set_mac)(struct fe_priv *priv, unsigned char *mac);
-+ void (*fwd_config)(struct fe_priv *priv);
++ int (*fwd_config)(struct fe_priv *priv);
+ void (*tx_dma)(struct fe_priv *priv, int idx, struct sk_buff *skb);
+ void (*rx_dma)(struct fe_priv *priv, int idx, int len);
+ int (*switch_init)(struct fe_priv *priv);
+ int (*mdio_write)(struct mii_bus *bus, int phy_addr, int phy_reg, u16 val);
+ int (*mdio_read)(struct mii_bus *bus, int phy_addr, int phy_reg);
+ void (*mdio_adjust_link)(struct fe_priv *priv, int port);
-+ int (*get_skb_header)(struct sk_buff *skb, void **iphdr, void **tcph, u64 *hdr_flags, void *priv);
+
+ void *swpriv;
+ u32 pdma_glo_cfg;
+ u32 rx_dly_int;
+ u32 tx_dly_int;
+ u32 checksum_bit;
-+ u32 tso;
++ u32 tx_udf_bit;
++};
+
-+ int min_pkt_len;
++#define FE_FLAG_PADDING_64B BIT(0)
++#define FE_FLAG_PADDING_BUG BIT(1)
++#define FE_FLAG_JUMBO_FRAME BIT(2)
++
++#define FE_STAT_REG_DECLARE \
++ _FE(tx_bytes) \
++ _FE(tx_packets) \
++ _FE(tx_skip) \
++ _FE(tx_collisions) \
++ _FE(rx_bytes) \
++ _FE(rx_packets) \
++ _FE(rx_overflow) \
++ _FE(rx_fcs_errors) \
++ _FE(rx_short_errors) \
++ _FE(rx_long_errors) \
++ _FE(rx_checksum_errors) \
++ _FE(rx_flow_control_packets)
++
++struct fe_hw_stats
++{
++ spinlock_t stats_lock;
++ struct u64_stats_sync syncp;
++#define _FE(x) u64 x;
++FE_STAT_REG_DECLARE
++#undef _FE
+};
+
+struct fe_priv
+
+ struct fe_soc_data *soc;
+ struct net_device *netdev;
++ u32 msg_enable;
++ u32 flags;
++
+ struct device *device;
+ unsigned long sysclk;
+
++ u16 frag_size;
++ u16 rx_buf_size;
+ struct fe_rx_dma *rx_dma;
-+ struct napi_struct rx_napi;
-+ struct sk_buff *rx_skb[NUM_DMA_DESC];
++ u8 **rx_data;
+ dma_addr_t rx_phys;
++ struct napi_struct rx_napi;
+
+ struct fe_tx_dma *tx_dma;
-+ struct tasklet_struct tx_tasklet;
-+ struct sk_buff *tx_skb[NUM_DMA_DESC];
++ struct sk_buff **tx_skb;
+ dma_addr_t tx_phys;
+ unsigned int tx_free_idx;
+
+ struct fe_phy *phy;
+ struct mii_bus *mii_bus;
-+ int mii_irq[PHY_MAX_ADDR];
++ struct phy_device *phy_dev;
++ u32 phy_flags;
+
+ int link[8];
+
-+ struct net_lro_mgr lro_mgr;
-+ struct net_lro_desc lro_arr[8];
++ struct fe_hw_stats *hw_stats;
+};
+
+extern const struct of_device_id of_fe_match[];
+void fe_w32(u32 val, unsigned reg);
+u32 fe_r32(unsigned reg);
+
++int fe_set_clock_cycle(struct fe_priv *priv);
++void fe_csum_config(struct fe_priv *priv);
++void fe_stats_update(struct fe_priv *priv);
++void fe_fwd_config(struct fe_priv *priv);
++void fe_reg_w32(u32 val, enum fe_reg reg);
++u32 fe_reg_r32(enum fe_reg reg);
++
++static inline void *priv_netdev(struct fe_priv *priv)
++{
++ return (char *)priv - ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
++}
++
+#endif /* FE_ETH_H */
Index: linux-3.14.18/drivers/net/ethernet/ralink/soc_mt7620.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-3.14.18/drivers/net/ethernet/ralink/soc_mt7620.c 2014-10-28 10:43:11.894131220 +0100
-@@ -0,0 +1,172 @@
++++ linux-3.14.18/drivers/net/ethernet/ralink/soc_mt7620.c 2014-10-29 20:25:55.433357519 +0100
+@@ -0,0 +1,164 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+#define MT7620A_RESET_ESW BIT(23)
+#define MT7620_L4_VALID BIT(23)
+
++#define MT7620_TX_DMA_UDF BIT(15)
++#define TX_DMA_FP_BMAP ((0xff) << 19)
++
+#define SYSC_REG_RESET_CTRL 0x34
-+#define MAX_RX_LENGTH 1536
+
+#define CDMA_ICS_EN BIT(2)
+#define CDMA_UCS_EN BIT(1)
+#define GDMA_TCS_EN BIT(21)
+#define GDMA_UCS_EN BIT(20)
+
-+static const u32 rt5350_reg_table[FE_REG_COUNT] = {
++/* frame engine counters */
++#define MT7620_REG_MIB_OFFSET 0x1000
++#define MT7620_PPE_AC_BCNT0 (MT7620_REG_MIB_OFFSET + 0x00)
++#define MT7620_GDM1_TX_GBCNT (MT7620_REG_MIB_OFFSET + 0x300)
++#define MT7620_GDM2_TX_GBCNT (MT7620_GDM1_TX_GBCNT + 0x40)
++
++static const u32 mt7620_reg_table[FE_REG_COUNT] = {
+ [FE_REG_PDMA_GLO_CFG] = RT5350_PDMA_GLO_CFG,
+ [FE_REG_PDMA_RST_CFG] = RT5350_PDMA_RST_CFG,
+ [FE_REG_DLY_INT_CFG] = RT5350_DLY_INT_CFG,
+ [FE_REG_FE_INT_ENABLE] = RT5350_FE_INT_ENABLE,
+ [FE_REG_FE_INT_STATUS] = RT5350_FE_INT_STATUS,
+ [FE_REG_FE_DMA_VID_BASE] = MT7620_DMA_VID,
++ [FE_REG_FE_COUNTER_BASE] = MT7620_GDM1_TX_GBCNT,
+};
+
+static void mt7620_fe_reset(void)
+ rt_sysc_w32(0, SYSC_REG_RESET_CTRL);
+}
+
-+static void mt7620_fwd_config(struct fe_priv *priv)
++static void mt7620_rxcsum_config(bool enable)
+{
-+ int i;
-+
-+ /* frame engine will push VLAN tag regarding to VIDX feild in Tx desc. */
-+ for (i = 0; i < 16; i += 2)
-+ fe_w32(((i + 1) << 16) + i, MT7620_DMA_VID + (i * 2));
++ if (enable)
++ fe_w32(fe_r32(MT7620A_GDMA1_FWD_CFG) | (GDMA_ICS_EN |
++ GDMA_TCS_EN | GDMA_UCS_EN),
++ MT7620A_GDMA1_FWD_CFG);
++ else
++ fe_w32(fe_r32(MT7620A_GDMA1_FWD_CFG) & ~(GDMA_ICS_EN |
++ GDMA_TCS_EN | GDMA_UCS_EN),
++ MT7620A_GDMA1_FWD_CFG);
++}
+
-+ fe_w32(fe_r32(MT7620A_GDMA1_FWD_CFG) & ~7, MT7620A_GDMA1_FWD_CFG);
-+ fe_w32(fe_r32(MT7620A_GDMA1_FWD_CFG) | (GDMA_ICS_EN | GDMA_TCS_EN | GDMA_UCS_EN), MT7620A_GDMA1_FWD_CFG);
-+ fe_w32(fe_r32(MT7620A_CDMA_CSG_CFG) | (CDMA_ICS_EN | CDMA_UCS_EN | CDMA_TCS_EN), MT7620A_CDMA_CSG_CFG);
++static void mt7620_txcsum_config(bool enable)
++{
++ if (enable)
++ fe_w32(fe_r32(MT7620A_CDMA_CSG_CFG) | (CDMA_ICS_EN |
++ CDMA_UCS_EN | CDMA_TCS_EN),
++ MT7620A_CDMA_CSG_CFG);
++ else
++ fe_w32(fe_r32(MT7620A_CDMA_CSG_CFG) & ~(CDMA_ICS_EN |
++ CDMA_UCS_EN | CDMA_TCS_EN),
++ MT7620A_CDMA_CSG_CFG);
+}
+
-+static void mt7620_tx_dma(struct fe_priv *priv, int idx, struct sk_buff *skb)
++static int mt7620_fwd_config(struct fe_priv *priv)
+{
-+ unsigned int nr_frags = 0;
-+ unsigned int len = 0;
++ struct net_device *dev = priv_netdev(priv);
+
-+ if (skb) {
-+ nr_frags = skb_shinfo(skb)->nr_frags;
-+ len = skb->len - skb->data_len;
-+ }
++ fe_w32(fe_r32(MT7620A_GDMA1_FWD_CFG) & ~7, MT7620A_GDMA1_FWD_CFG);
+
-+ if (!skb)
-+ priv->tx_dma[idx].txd2 = TX_DMA_LSO | TX_DMA_DONE;
-+ else if (!nr_frags)
-+ priv->tx_dma[idx].txd2 = TX_DMA_LSO | TX_DMA_PLEN0(len);
-+ else
-+ priv->tx_dma[idx].txd2 = TX_DMA_PLEN0(len);
++ mt7620_txcsum_config((dev->features & NETIF_F_IP_CSUM));
++ mt7620_rxcsum_config((dev->features & NETIF_F_RXCSUM));
+
-+ if(skb && vlan_tx_tag_present(skb))
-+ priv->tx_dma[idx].txd4 = 0x80 | (vlan_tx_tag_get(skb) >> 13) << 4 | (vlan_tx_tag_get(skb) & 0xF);
-+ else
-+ priv->tx_dma[idx].txd4 = 0;
++ return 0;
++}
++
++static void mt7620_tx_dma(struct fe_priv *priv, int idx, struct sk_buff *skb)
++{
++ priv->tx_dma[idx].txd4 = 0;
+}
+
+static void mt7620_rx_dma(struct fe_priv *priv, int idx, int len)
+ priv->rx_dma[idx].rxd2 = RX_DMA_PLEN0(len);
+}
+
-+#ifdef CONFIG_INET_LRO
-+static int
-+mt7620_get_skb_header(struct sk_buff *skb, void **iphdr, void **tcph,
-+ u64 *hdr_flags, void *_priv)
++static void mt7620_init_data(struct fe_soc_data *data,
++ struct net_device *netdev)
+{
-+ struct iphdr *iph = NULL;
-+ int vhdr_len = 0;
-+
-+ /*
-+ * Make sure that this packet is Ethernet II, is not VLAN
-+ * tagged, is IPv4, has a valid IP header, and is TCP.
-+ */
-+ if (skb->protocol == 0x0081)
-+ vhdr_len = VLAN_HLEN;
-+
-+ iph = (struct iphdr *)(skb->data + vhdr_len);
-+ if(iph->protocol != IPPROTO_TCP)
-+ return -1;
-+
-+ *iphdr = iph;
-+ *tcph = skb->data + (iph->ihl << 2) + vhdr_len;
-+ *hdr_flags = LRO_IPV4 | LRO_TCP;
++ struct fe_priv *priv = netdev_priv(netdev);
+
-+ return 0;
-+}
-+#endif
++ priv->flags = FE_FLAG_PADDING_64B;
++ netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
++ NETIF_F_HW_VLAN_CTAG_TX;
+
-+static void mt7620_init_data(struct fe_soc_data *data)
-+{
+ if (mt7620_get_eco() >= 5)
-+ data->tso = 1;
++ netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
++ NETIF_F_IPV6_CSUM;
+}
+
+static struct fe_soc_data mt7620_data = {
+ .switch_init = mt7620_gsw_probe,
+ .switch_config = mt7620_gsw_config,
+ .port_init = mt7620_port_init,
-+ .min_pkt_len = 0,
-+ .reg_table = rt5350_reg_table,
++ .reg_table = mt7620_reg_table,
+ .pdma_glo_cfg = FE_PDMA_SIZE_16DWORDS | MT7620A_DMA_2B_OFFSET,
+ .rx_dly_int = RT5350_RX_DLY_INT,
+ .tx_dly_int = RT5350_TX_DLY_INT,
+ .checksum_bit = MT7620_L4_VALID,
++ .tx_udf_bit = MT7620_TX_DMA_UDF,
+ .has_carrier = mt7620a_has_carrier,
+ .mdio_read = mt7620_mdio_read,
+ .mdio_write = mt7620_mdio_write,
+ .mdio_adjust_link = mt7620_mdio_link_adjust,
-+#ifdef CONFIG_INET_LRO
-+ .get_skb_header = mt7620_get_skb_header,
-+#endif
+};
+
+const struct of_device_id of_fe_match[] = {
Index: linux-3.14.18/drivers/net/ethernet/ralink/soc_rt2880.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-3.14.18/drivers/net/ethernet/ralink/soc_rt2880.c 2014-10-28 10:43:11.894131220 +0100
-@@ -0,0 +1,52 @@
++++ linux-3.14.18/drivers/net/ethernet/ralink/soc_rt2880.c 2014-10-29 20:25:55.433357519 +0100
+@@ -0,0 +1,81 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+#define SYSC_REG_RESET_CTRL 0x034
+#define RT2880_RESET_FE BIT(18)
+
++static void rt2880_init_data(struct fe_soc_data *data,
++ struct net_device *netdev)
++{
++ struct fe_priv *priv = netdev_priv(netdev);
++
++ priv->flags = FE_FLAG_PADDING_64B | FE_FLAG_PADDING_BUG |
++ FE_FLAG_JUMBO_FRAME;
++ netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_TX;
++ /* maybe have hardware bug. */
++ //netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
++}
++
+void rt2880_fe_reset(void)
+{
+ rt_sysc_w32(RT2880_RESET_FE, SYSC_REG_RESET_CTRL);
+}
+
++static int rt2880_fwd_config(struct fe_priv *priv)
++{
++ int ret;
++
++ ret = fe_set_clock_cycle(priv);
++ if (ret)
++ return ret;
++
++ fe_fwd_config(priv);
++ fe_w32(FE_PSE_FQFC_CFG_INIT, FE_PSE_FQ_CFG);
++ fe_csum_config(priv);
++
++ return ret;
++}
++
+struct fe_soc_data rt2880_data = {
+ .mac = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 },
++ .init_data = rt2880_init_data,
+ .reset_fe = rt2880_fe_reset,
-+ .min_pkt_len = 64,
-+ .pdma_glo_cfg = FE_PDMA_SIZE_4DWORDS,
++ .fwd_config = rt2880_fwd_config,
++ .pdma_glo_cfg = FE_PDMA_SIZE_8DWORDS,
+ .checksum_bit = RX_DMA_L4VALID,
++ .tx_udf_bit = TX_DMA_UDF,
+ .rx_dly_int = FE_RX_DLY_INT,
+ .tx_dly_int = FE_TX_DLY_INT,
+ .mdio_read = rt2880_mdio_read,
Index: linux-3.14.18/drivers/net/ethernet/ralink/soc_rt305x.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-3.14.18/drivers/net/ethernet/ralink/soc_rt305x.c 2014-10-28 10:43:11.894131220 +0100
-@@ -0,0 +1,113 @@
++++ linux-3.14.18/drivers/net/ethernet/ralink/soc_rt305x.c 2014-10-29 20:25:55.433357519 +0100
+@@ -0,0 +1,161 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+#include <linux/module.h>
+
+#include <asm/mach-ralink/ralink_regs.h>
++#include <asm/mach-ralink/rt305x.h>
+
+#include "ralink_soc_eth.h"
++#include "mdio_rt2880.h"
+
+#define RT305X_RESET_FE BIT(21)
+#define RT305X_RESET_ESW BIT(23)
+ [FE_REG_FE_DMA_VID_BASE] = 0,
+};
+
++static void rt305x_init_data(struct fe_soc_data *data,
++ struct net_device *netdev)
++{
++ struct fe_priv *priv = netdev_priv(netdev);
++
++ priv->flags = FE_FLAG_PADDING_64B | FE_FLAG_PADDING_BUG;
++ netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
++ NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX;
++}
++
++static int rt3050_fwd_config(struct fe_priv *priv)
++{
++ int ret;
++
++ if (soc_is_rt3052()) {
++ ret = fe_set_clock_cycle(priv);
++ if (ret)
++ return ret;
++ }
++
++ fe_fwd_config(priv);
++ if (!soc_is_rt3352())
++ fe_w32(FE_PSE_FQFC_CFG_INIT, FE_PSE_FQ_CFG);
++ fe_csum_config(priv);
++
++ return 0;
++}
++
+static void rt305x_fe_reset(void)
+{
+ rt_sysc_w32(RT305X_RESET_FE, SYSC_REG_RESET_CTRL);
+ rt_sysc_w32(0, SYSC_REG_RESET_CTRL);
+}
+
++static void rt5350_init_data(struct fe_soc_data *data,
++ struct net_device *netdev)
++{
++ netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM;
++}
++
+static void rt5350_set_mac(struct fe_priv *priv, unsigned char *mac)
+{
+ unsigned long flags;
+ spin_unlock_irqrestore(&priv->page_lock, flags);
+}
+
-+static void rt5350_fwd_config(struct fe_priv *priv)
++static void rt5350_rxcsum_config(bool enable)
+{
-+ unsigned long sysclk = priv->sysclk;
++ if (enable)
++ fe_w32(fe_r32(RT5350_SDM_CFG) | (RT5350_SDM_ICS_EN |
++ RT5350_SDM_TCS_EN | RT5350_SDM_UCS_EN),
++ RT5350_SDM_CFG);
++ else
++ fe_w32(fe_r32(RT5350_SDM_CFG) & ~(RT5350_SDM_ICS_EN |
++ RT5350_SDM_TCS_EN | RT5350_SDM_UCS_EN),
++ RT5350_SDM_CFG);
++}
+
-+ if (sysclk) {
-+ sysclk /= FE_US_CYC_CNT_DIVISOR;
-+ sysclk <<= FE_US_CYC_CNT_SHIFT;
++static int rt5350_fwd_config(struct fe_priv *priv)
++{
++ struct net_device *dev = priv_netdev(priv);
+
-+ fe_w32((fe_r32(FE_FE_GLO_CFG) &
-+ ~(FE_US_CYC_CNT_MASK << FE_US_CYC_CNT_SHIFT)) | sysclk,
-+ FE_FE_GLO_CFG);
-+ }
++ rt5350_rxcsum_config((dev->features & NETIF_F_RXCSUM));
+
-+ fe_w32(fe_r32(RT5350_SDM_CFG) & ~0xffff, RT5350_SDM_CFG);
-+ fe_w32(fe_r32(RT5350_SDM_CFG) | RT5350_SDM_ICS_EN | RT5350_SDM_TCS_EN | RT5350_SDM_UCS_EN,
-+ RT5350_SDM_CFG);
++ return 0;
++}
++
++static void rt5350_tx_dma(struct fe_priv *priv, int idx, struct sk_buff *skb)
++{
++ priv->tx_dma[idx].txd4 = 0;
+}
+
+static void rt5350_fe_reset(void)
+
+static struct fe_soc_data rt3050_data = {
+ .mac = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 },
++ .init_data = rt305x_init_data,
+ .reset_fe = rt305x_fe_reset,
-+ .min_pkt_len = 64,
-+ .pdma_glo_cfg = FE_PDMA_SIZE_4DWORDS,
++ .fwd_config = rt3050_fwd_config,
++ .pdma_glo_cfg = FE_PDMA_SIZE_8DWORDS,
+ .checksum_bit = RX_DMA_L4VALID,
++ .tx_udf_bit = TX_DMA_UDF,
+ .rx_dly_int = FE_RX_DLY_INT,
+ .tx_dly_int = FE_TX_DLY_INT,
+};
+
+static struct fe_soc_data rt5350_data = {
+ .mac = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 },
++ .init_data = rt5350_init_data,
+ .reg_table = rt5350_reg_table,
+ .reset_fe = rt5350_fe_reset,
+ .set_mac = rt5350_set_mac,
+ .fwd_config = rt5350_fwd_config,
-+ .min_pkt_len = 64,
-+ .pdma_glo_cfg = FE_PDMA_SIZE_4DWORDS,
++ .tx_dma = rt5350_tx_dma,
++ .pdma_glo_cfg = FE_PDMA_SIZE_8DWORDS,
+ .checksum_bit = RX_DMA_L4VALID,
++ .tx_udf_bit = TX_DMA_UDF,
+ .rx_dly_int = RT5350_RX_DLY_INT,
+ .tx_dly_int = RT5350_TX_DLY_INT,
+};
Index: linux-3.14.18/drivers/net/ethernet/ralink/soc_rt3883.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
-+++ linux-3.14.18/drivers/net/ethernet/ralink/soc_rt3883.c 2014-10-28 10:43:11.894131220 +0100
-@@ -0,0 +1,60 @@
++++ linux-3.14.18/drivers/net/ethernet/ralink/soc_rt3883.c 2014-10-29 20:25:55.433357519 +0100
+@@ -0,0 +1,88 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ rt_sysc_w32(t, RT3883_SYSC_REG_RSTCTRL);
+}
+
++static int rt3883_fwd_config(struct fe_priv *priv)
++{
++ int ret;
++
++ ret = fe_set_clock_cycle(priv);
++ if (ret)
++ return ret;
++
++ fe_fwd_config(priv);
++ fe_w32(FE_PSE_FQFC_CFG_256Q, FE_PSE_FQ_CFG);
++ fe_csum_config(priv);
++
++ return ret;
++}
++
++static void rt3883_init_data(struct fe_soc_data *data,
++ struct net_device *netdev)
++{
++ struct fe_priv *priv = netdev_priv(netdev);
++
++ priv->flags = FE_FLAG_PADDING_64B | FE_FLAG_PADDING_BUG |
++ FE_FLAG_JUMBO_FRAME;
++ netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
++ NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX;
++}
++
+static struct fe_soc_data rt3883_data = {
+ .mac = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 },
++ .init_data = rt3883_init_data,
+ .reset_fe = rt3883_fe_reset,
-+ .min_pkt_len = 64,
-+ .pdma_glo_cfg = FE_PDMA_SIZE_4DWORDS,
++ .fwd_config = rt3883_fwd_config,
++ .pdma_glo_cfg = FE_PDMA_SIZE_8DWORDS,
+ .rx_dly_int = FE_RX_DLY_INT,
+ .tx_dly_int = FE_TX_DLY_INT,
+ .checksum_bit = RX_DMA_L4VALID,
++ .tx_udf_bit = TX_DMA_UDF,
+ .mdio_read = rt2880_mdio_read,
+ .mdio_write = rt2880_mdio_write,
+ .mdio_adjust_link = rt2880_mdio_link_adjust,
+
+MODULE_DEVICE_TABLE(of, of_fe_match);
+
+Index: linux-3.14.18/drivers/net/ethernet/ralink/ralink_ethtool.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.14.18/drivers/net/ethernet/ralink/ralink_ethtool.c 2014-10-29 20:25:55.433357519 +0100
+@@ -0,0 +1,262 @@
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ *
++ * Copyright (C) 2009-2013 Michael Lee <igvtee@gmail.com>
++ */
++
++#include "ralink_soc_eth.h"
++
++static const char fe_gdma_str[][ETH_GSTRING_LEN] = {
++#define _FE(x...) # x,
++FE_STAT_REG_DECLARE
++#undef _FE
++};
++
++static int fe_get_settings(struct net_device *dev,
++ struct ethtool_cmd *cmd)
++{
++ struct fe_priv *priv = netdev_priv(dev);
++ int err;
++
++ if (!priv->phy_dev)
++ goto out_gset;
++
++ if (priv->phy_flags == FE_PHY_FLAG_ATTACH) {
++ err = phy_read_status(priv->phy_dev);
++ if (err)
++ goto out_gset;
++ }
++
++ return phy_ethtool_gset(priv->phy_dev, cmd);
++
++out_gset:
++ return -ENODEV;
++}
++
++static int fe_set_settings(struct net_device *dev,
++ struct ethtool_cmd *cmd)
++{
++ struct fe_priv *priv = netdev_priv(dev);
++
++ if (!priv->phy_dev)
++ goto out_sset;
++
++ if (cmd->phy_address != priv->phy_dev->addr) {
++ if (priv->phy->phy_node[cmd->phy_address]) {
++ priv->phy_dev = priv->phy->phy[cmd->phy_address];
++ priv->phy_flags = FE_PHY_FLAG_PORT;
++ } else if (priv->mii_bus &&
++ priv->mii_bus->phy_map[cmd->phy_address]) {
++ priv->phy_dev = priv->mii_bus->phy_map[cmd->phy_address];
++ priv->phy_flags = FE_PHY_FLAG_ATTACH;
++ } else
++ goto out_sset;
++ }
++
++ return phy_ethtool_sset(priv->phy_dev, cmd);
++
++out_sset:
++ return -ENODEV;
++}
++
++static void fe_get_drvinfo (struct net_device *dev,
++ struct ethtool_drvinfo *info)
++{
++ struct fe_priv *priv = netdev_priv(dev);
++ struct fe_soc_data *soc = priv->soc;
++
++ strlcpy(info->driver, priv->device->driver->name, sizeof(info->driver));
++ strlcpy(info->version, FE_DRV_VERSION, sizeof(info->version));
++ strlcpy(info->bus_info, dev_name(priv->device), sizeof(info->bus_info));
++
++ if (soc->reg_table[FE_REG_FE_COUNTER_BASE])
++ info->n_stats = ARRAY_SIZE(fe_gdma_str);
++}
++
++static u32 fe_get_msglevel(struct net_device *dev)
++{
++ struct fe_priv *priv = netdev_priv(dev);
++
++ return priv->msg_enable;
++}
++
++static void fe_set_msglevel(struct net_device *dev, u32 value)
++{
++ struct fe_priv *priv = netdev_priv(dev);
++
++ priv->msg_enable = value;
++}
++
++static int fe_nway_reset(struct net_device *dev)
++{
++ struct fe_priv *priv = netdev_priv(dev);
++
++ if (!priv->phy_dev)
++ goto out_nway_reset;
++
++ return genphy_restart_aneg(priv->phy_dev);
++
++out_nway_reset:
++ return -EOPNOTSUPP;
++}
++
++static u32 fe_get_link(struct net_device *dev)
++{
++ struct fe_priv *priv = netdev_priv(dev);
++ int err;
++
++ if (!priv->phy_dev)
++ goto out_get_link;
++
++ if (priv->phy_flags == FE_PHY_FLAG_ATTACH) {
++ err = genphy_update_link(priv->phy_dev);
++ if (err)
++ goto out_get_link;
++ }
++
++ return priv->phy_dev->link;
++
++out_get_link:
++ return ethtool_op_get_link(dev);
++}
++
++static void fe_get_ringparam(struct net_device *dev,
++ struct ethtool_ringparam *ring)
++{
++ ring->rx_max_pending = MAX_DMA_DESC;
++ ring->tx_max_pending = MAX_DMA_DESC;
++ ring->rx_pending = NUM_DMA_DESC;
++ ring->tx_pending = NUM_DMA_DESC;
++}
++
++static int fe_get_coalesce(struct net_device *dev,
++ struct ethtool_coalesce *coal)
++{
++ u32 delay_cfg = fe_reg_r32(FE_REG_DLY_INT_CFG);
++
++ coal->rx_coalesce_usecs = (delay_cfg & 0xff) * FE_DELAY_TIME;
++ coal->rx_max_coalesced_frames = ((delay_cfg >> 8) & 0x7f);
++ coal->use_adaptive_rx_coalesce = (delay_cfg >> 15) & 0x1;
++
++ coal->tx_coalesce_usecs = ((delay_cfg >> 16 )& 0xff) * FE_DELAY_TIME;
++ coal->tx_max_coalesced_frames = ((delay_cfg >> 24) & 0x7f);
++ coal->use_adaptive_tx_coalesce = (delay_cfg >> 31) & 0x1;
++
++ return 0;
++}
++
++static int fe_set_coalesce(struct net_device *dev,
++ struct ethtool_coalesce *coal)
++{
++ u32 delay_cfg;
++ u32 rx_usecs, tx_usecs;
++ u32 rx_frames, tx_frames;
++
++ if (!coal->use_adaptive_rx_coalesce || !coal->use_adaptive_tx_coalesce)
++ return -EINVAL;
++
++ rx_usecs = DIV_ROUND_UP(coal->rx_coalesce_usecs, FE_DELAY_TIME);
++ rx_frames = coal->rx_max_coalesced_frames;
++ tx_usecs = DIV_ROUND_UP(coal->tx_coalesce_usecs, FE_DELAY_TIME);
++ tx_frames = coal->tx_max_coalesced_frames;
++
++ if (((tx_usecs == 0) && (tx_frames ==0)) ||
++ ((rx_usecs == 0) && (rx_frames ==0)))
++ return -EINVAL;
++
++ if (rx_usecs > 0xff) rx_usecs = 0xff;
++ if (rx_frames > 0x7f) rx_frames = 0x7f;
++ if (tx_usecs > 0xff) tx_usecs = 0xff;
++ if (tx_frames > 0x7f) tx_frames = 0x7f;
++
++ delay_cfg = ((((FE_DELAY_EN_INT | tx_frames) << 8) | tx_usecs) << 16) |
++ (((FE_DELAY_EN_INT | rx_frames) << 8) | rx_usecs);
++
++ fe_reg_w32(delay_cfg, FE_REG_DLY_INT_CFG);
++
++ return 0;
++}
++
++static void fe_get_strings(struct net_device *dev, u32 stringset, u8 *data)
++{
++ switch (stringset) {
++ case ETH_SS_STATS:
++ memcpy(data, *fe_gdma_str, sizeof(fe_gdma_str));
++ break;
++ }
++}
++
++static int fe_get_sset_count(struct net_device *dev, int sset)
++{
++ switch (sset) {
++ case ETH_SS_STATS:
++ return ARRAY_SIZE(fe_gdma_str);
++ default:
++ return -EOPNOTSUPP;
++ }
++}
++
++static void fe_get_ethtool_stats(struct net_device *dev,
++ struct ethtool_stats *stats, u64 *data)
++{
++ struct fe_priv *priv = netdev_priv(dev);
++ struct fe_hw_stats *hwstats = priv->hw_stats;
++ u64 *data_src, *data_dst;
++ unsigned int start;
++ int i;
++
++ if (netif_running(dev) && netif_device_present(dev)) {
++ if (spin_trylock(&hwstats->stats_lock)) {
++ fe_stats_update(priv);
++ spin_unlock(&hwstats->stats_lock);
++ }
++ }
++
++ do {
++ data_src = &hwstats->tx_bytes;
++ data_dst = data;
++ start = u64_stats_fetch_begin_bh(&hwstats->syncp);
++
++ for (i = 0; i < ARRAY_SIZE(fe_gdma_str); i++)
++ *data_dst++ = *data_src++;
++
++ } while (u64_stats_fetch_retry_bh(&hwstats->syncp, start));
++}
++
++static struct ethtool_ops fe_ethtool_ops = {
++ .get_settings = fe_get_settings,
++ .set_settings = fe_set_settings,
++ .get_drvinfo = fe_get_drvinfo,
++ .get_msglevel = fe_get_msglevel,
++ .set_msglevel = fe_set_msglevel,
++ .nway_reset = fe_nway_reset,
++ .get_link = fe_get_link,
++ .get_ringparam = fe_get_ringparam,
++ .get_coalesce = fe_get_coalesce,
++ .set_coalesce = fe_set_coalesce,
++};
++
++void fe_set_ethtool_ops(struct net_device *netdev)
++{
++ struct fe_priv *priv = netdev_priv(netdev);
++ struct fe_soc_data *soc = priv->soc;
++
++ if (soc->reg_table[FE_REG_FE_COUNTER_BASE]) {
++ fe_ethtool_ops.get_strings = fe_get_strings;
++ fe_ethtool_ops.get_sset_count = fe_get_sset_count;
++ fe_ethtool_ops.get_ethtool_stats = fe_get_ethtool_stats;
++ }
++
++ SET_ETHTOOL_OPS(netdev, &fe_ethtool_ops);
++}
+Index: linux-3.14.18/drivers/net/ethernet/ralink/ralink_ethtool.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ linux-3.14.18/drivers/net/ethernet/ralink/ralink_ethtool.h 2014-10-29 20:25:55.433357519 +0100
+@@ -0,0 +1,25 @@
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; version 2 of the License
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ *
++ * Copyright (C) 2009-2013 Michael Lee <igvtee@gmail.com>
++ */
++
++#ifndef FE_ETHTOOL_H
++#define FE_ETHTOOL_H
++
++#include <linux/ethtool.h>
++
++void fe_set_ethtool_ops(struct net_device *netdev);
++
++#endif /* FE_ETHTOOL_H */