usleep_range(10, 20);
}
+void fe_reset_fe(struct fe_priv *priv)
+{
+ if (!priv->rst_fe)
+ return;
+
+ reset_control_assert(priv->rst_fe);
+ usleep_range(60, 120);
+ reset_control_deassert(priv->rst_fe);
+ usleep_range(60, 120);
+}
+
static inline void fe_int_disable(u32 mask)
{
fe_reg_w32(fe_reg_r32(FE_REG_FE_INT_ENABLE) & ~mask,
for (i = 0; i < ring->rx_ring_size; i++)
if (ring->rx_data[i]) {
if (ring->rx_dma && ring->rx_dma[i].rxd1)
- dma_unmap_single(&priv->netdev->dev,
+ dma_unmap_single(priv->dev,
ring->rx_dma[i].rxd1,
ring->rx_buf_size,
DMA_FROM_DEVICE);
}
if (ring->rx_dma) {
- dma_free_coherent(&priv->netdev->dev,
+ dma_free_coherent(priv->dev,
ring->rx_ring_size * sizeof(*ring->rx_dma),
ring->rx_dma,
ring->rx_phys);
static int fe_alloc_rx(struct fe_priv *priv)
{
- struct net_device *netdev = priv->netdev;
struct fe_rx_ring *ring = &priv->rx_ring;
int i, pad;
goto no_rx_mem;
}
- ring->rx_dma = dma_alloc_coherent(&netdev->dev,
+ ring->rx_dma = dma_alloc_coherent(priv->dev,
ring->rx_ring_size * sizeof(*ring->rx_dma),
&ring->rx_phys,
GFP_ATOMIC | __GFP_ZERO);
else
pad = NET_IP_ALIGN;
for (i = 0; i < ring->rx_ring_size; i++) {
- dma_addr_t dma_addr = dma_map_single(&netdev->dev,
+ dma_addr_t dma_addr = dma_map_single(priv->dev,
ring->rx_data[i] + NET_SKB_PAD + pad,
ring->rx_buf_size,
DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(&netdev->dev, dma_addr)))
+ if (unlikely(dma_mapping_error(priv->dev, dma_addr)))
goto no_rx_mem;
ring->rx_dma[i].rxd1 = (unsigned int)dma_addr;
static void fe_clean_tx(struct fe_priv *priv)
{
int i;
- struct device *dev = &priv->netdev->dev;
+ struct device *dev = priv->dev;
struct fe_tx_ring *ring = &priv->tx_ring;
if (ring->tx_buf) {
if (!ring->tx_buf)
goto no_tx_mem;
- ring->tx_dma = dma_alloc_coherent(&priv->netdev->dev,
+ ring->tx_dma = dma_alloc_coherent(priv->dev,
ring->tx_ring_size * sizeof(*ring->tx_dma),
&ring->tx_phys,
GFP_ATOMIC | __GFP_ZERO);
{
struct fe_priv *priv = netdev_priv(dev);
struct fe_map_state st = {
- .dev = &dev->dev,
+ .dev = priv->dev,
.ring_idx = ring->tx_next_idx,
};
struct sk_buff *head = skb;
/* TX SG offload */
nr_frags = skb_shinfo(skb)->nr_frags;
for (i = 0; i < nr_frags; i++) {
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
frag = &skb_shinfo(skb)->frags[i];
if (fe_tx_dma_map_page(ring, &st, skb_frag_page(frag),
- frag->page_offset, skb_frag_size(frag)))
+ skb_frag_off(frag), skb_frag_size(frag)))
goto err_dma;
}
netif_wake_queue(dev);
}
- if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !head->xmit_more)
+ if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !netdev_xmit_more())
fe_reg_w32(ring->tx_next_idx, FE_REG_TX_CTX_IDX0);
return 0;
j = ring->tx_next_idx;
for (i = 0; i < tx_num; i++) {
/* unmap dma */
- fe_txd_unmap(&dev->dev, &ring->tx_buf[j]);
+ fe_txd_unmap(priv->dev, &ring->tx_buf[j]);
ring->tx_dma[j].txd2 = TX_DMA_DESP2_DEF;
j = NEXT_TX_DESP_IDX(j);
{
struct sk_buff *head = skb;
int i, nfrags = 0;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
next_frag:
nfrags++;
if (skb_is_gso(skb)) {
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
frag = &skb_shinfo(skb)->frags[i];
- nfrags += DIV_ROUND_UP(frag->size, TX_DMA_BUF_LEN);
+ nfrags += DIV_ROUND_UP(skb_frag_size(frag), TX_DMA_BUF_LEN);
}
} else {
nfrags += skb_shinfo(skb)->nr_frags;
stats->rx_dropped++;
goto release_desc;
}
- dma_addr = dma_map_single(&netdev->dev,
+ dma_addr = dma_map_single(priv->dev,
new_data + NET_SKB_PAD + pad,
ring->rx_buf_size,
DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) {
+ if (unlikely(dma_mapping_error(priv->dev, dma_addr))) {
skb_free_frag(new_data);
goto release_desc;
}
}
skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
- dma_unmap_single(&netdev->dev, trxd.rxd1,
+ dma_unmap_single(priv->dev, trxd.rxd1,
ring->rx_buf_size, DMA_FROM_DEVICE);
pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
skb->dev = netdev;
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
RX_DMA_VID(trxd.rxd3));
-#ifdef CONFIG_NET_RALINK_OFFLOAD
- if (mtk_offload_check_rx(priv, skb, trxd.rxd4) == 0) {
-#endif
- stats->rx_packets++;
- stats->rx_bytes += pktlen;
+ stats->rx_packets++;
+ stats->rx_bytes += pktlen;
+
+ napi_gro_receive(napi, skb);
- napi_gro_receive(napi, skb);
-#ifdef CONFIG_NET_RALINK_OFFLOAD
- } else {
- dev_kfree_skb(skb);
- }
-#endif
ring->rx_data[idx] = new_data;
rxd->rxd1 = (unsigned int)dma_addr;
int *tx_again)
{
struct net_device *netdev = priv->netdev;
- struct device *dev = &netdev->dev;
unsigned int bytes_compl = 0;
struct sk_buff *skb;
struct fe_tx_buf *tx_buf;
done++;
budget--;
}
- fe_txd_unmap(dev, tx_buf);
+ fe_txd_unmap(priv->dev, tx_buf);
idx = NEXT_TX_DESP_IDX(idx);
}
ring->tx_free_idx = idx;
return rx_done;
}
-static void fe_tx_timeout(struct net_device *dev)
+static void fe_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct fe_priv *priv = netdev_priv(dev);
struct fe_tx_ring *ring = &priv->tx_ring;
napi_enable(&priv->rx_napi);
fe_int_enable(priv->soc->tx_int | priv->soc->rx_int);
netif_start_queue(dev);
-#ifdef CONFIG_NET_RALINK_OFFLOAD
- mtk_ppe_probe(priv);
-#endif
return 0;
}
fe_free_dma(priv);
-#ifdef CONFIG_NET_RALINK_OFFLOAD
- mtk_ppe_remove(priv);
-#endif
-
return 0;
}
{
struct fe_priv *priv = netdev_priv(dev);
struct device_node *port;
- const char *mac_addr;
int err;
- priv->soc->reset_fe();
+ if (priv->soc->reset_fe)
+ priv->soc->reset_fe(priv);
+ else
+ fe_reset_fe(priv);
+
+ if (priv->soc->switch_init) {
+ err = priv->soc->switch_init(priv);
+ if (err) {
+ if (err == -EPROBE_DEFER)
+ return err;
- if (priv->soc->switch_init)
- if (priv->soc->switch_init(priv)) {
netdev_err(dev, "failed to initialize switch core\n");
return -ENODEV;
}
+ }
fe_reset_phy(priv);
- mac_addr = of_get_mac_address(priv->dev->of_node);
- if (mac_addr)
- ether_addr_copy(dev->dev_addr, mac_addr);
+ of_get_mac_address(priv->dev->of_node, dev->dev_addr);
/* If the mac address is invalid, use random mac address */
if (!is_valid_ether_addr(dev->dev_addr)) {
return fe_open(dev);
}
-#ifdef CONFIG_NET_RALINK_OFFLOAD
-static int
-fe_flow_offload(enum flow_offload_type type, struct flow_offload *flow,
- struct flow_offload_hw_path *src,
- struct flow_offload_hw_path *dest)
-{
- struct fe_priv *priv;
-
- if (src->dev != dest->dev)
- return -EINVAL;
-
- priv = netdev_priv(src->dev);
-
- return mtk_flow_offload(priv, type, flow, src, dest);
-}
-#endif
-
static const struct net_device_ops fe_netdev_ops = {
.ndo_init = fe_init,
.ndo_uninit = fe_uninit,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = fe_poll_controller,
#endif
-#ifdef CONFIG_NET_RALINK_OFFLOAD
- .ndo_flow_offload = fe_flow_offload,
-#endif
};
static void fe_reset_pending(struct fe_priv *priv)
struct clk *sysclk;
int err, napi_weight;
- device_reset(&pdev->dev);
+ err = device_reset(&pdev->dev);
+ if (err)
+ dev_err(&pdev->dev, "failed to reset device\n");
match = of_match_device(of_fe_match, &pdev->dev);
soc = (struct fe_soc_data *)match->data;
goto err_free_dev;
}
+ priv = netdev_priv(netdev);
+ spin_lock_init(&priv->page_lock);
+ priv->rst_fe = devm_reset_control_get(&pdev->dev, "fe");
+ if (IS_ERR(priv->rst_fe))
+ priv->rst_fe = NULL;
+
if (soc->init_data)
soc->init_data(soc, netdev);
netdev->vlan_features = netdev->hw_features &
if (fe_reg_table[FE_REG_FE_DMA_VID_BASE])
netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
- priv = netdev_priv(netdev);
- spin_lock_init(&priv->page_lock);
if (fe_reg_table[FE_REG_FE_COUNTER_BASE]) {
priv->hw_stats = kzalloc(sizeof(*priv->hw_stats), GFP_KERNEL);
if (!priv->hw_stats) {