011fad2e61850ddb4e05a48f7a13ae7727ba5e65
[openwrt/staging/chunkeey.git] / target / linux / mediatek / patches-4.4 / 0071-net-mediatek-fix-TX-locking.patch
1 From b9df14f712866925856c0ffb2d899511c21e1b8a Mon Sep 17 00:00:00 2001
2 From: John Crispin <blogic@openwrt.org>
3 Date: Tue, 29 Mar 2016 17:20:01 +0200
4 Subject: [PATCH 71/78] net: mediatek: fix TX locking
5
6 Inside the TX path there is a lock inside the tx_map function. This is
7 however too late. The patch moves the lock to the start of the xmit
8 function right before the free count check of the DMA ring happens.
9 If we do not do this, the code becomes racy leading to TX stalls and
10 dropped packets. This happens as there are 2 netdevs running on the
11 same physical DMA ring.
12
13 Signed-off-by: John Crispin <blogic@openwrt.org>
14 ---
15 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 20 ++++++++++----------
16 1 file changed, 10 insertions(+), 10 deletions(-)
17
18 diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
19 index 26eeb1a..67b18f9 100644
20 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
21 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
22 @@ -536,7 +536,6 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
23 struct mtk_eth *eth = mac->hw;
24 struct mtk_tx_dma *itxd, *txd;
25 struct mtk_tx_buf *tx_buf;
26 - unsigned long flags;
27 dma_addr_t mapped_addr;
28 unsigned int nr_frags;
29 int i, n_desc = 1;
30 @@ -568,11 +567,6 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
31 if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
32 return -ENOMEM;
33
34 - /* normally we can rely on the stack not calling this more than once,
35 - * however we have 2 queues running ont he same ring so we need to lock
36 - * the ring access
37 - */
38 - spin_lock_irqsave(&eth->page_lock, flags);
39 WRITE_ONCE(itxd->txd1, mapped_addr);
40 tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
41 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
42 @@ -632,8 +626,6 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
43 WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
44 (!nr_frags * TX_DMA_LS0)));
45
46 - spin_unlock_irqrestore(&eth->page_lock, flags);
47 -
48 netdev_sent_queue(dev, skb->len);
49 skb_tx_timestamp(skb);
50
51 @@ -661,8 +653,6 @@ err_dma:
52 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
53 } while (itxd != txd);
54
55 - spin_unlock_irqrestore(&eth->page_lock, flags);
56 -
57 return -ENOMEM;
58 }
59
60 @@ -712,14 +702,22 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
61 struct mtk_eth *eth = mac->hw;
62 struct mtk_tx_ring *ring = &eth->tx_ring;
63 struct net_device_stats *stats = &dev->stats;
64 + unsigned long flags;
65 bool gso = false;
66 int tx_num;
67
68 + /* normally we can rely on the stack not calling this more than once,
69 + * however we have 2 queues running ont he same ring so we need to lock
70 + * the ring access
71 + */
72 + spin_lock_irqsave(&eth->page_lock, flags);
73 +
74 tx_num = mtk_cal_txd_req(skb);
75 if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
76 mtk_stop_queue(eth);
77 netif_err(eth, tx_queued, dev,
78 "Tx Ring full when queue awake!\n");
79 + spin_unlock_irqrestore(&eth->page_lock, flags);
80 return NETDEV_TX_BUSY;
81 }
82
83 @@ -747,10 +745,12 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
84 ring->thresh))
85 mtk_wake_queue(eth);
86 }
87 + spin_unlock_irqrestore(&eth->page_lock, flags);
88
89 return NETDEV_TX_OK;
90
91 drop:
92 + spin_unlock_irqrestore(&eth->page_lock, flags);
93 stats->tx_dropped++;
94 dev_kfree_skb(skb);
95 return NETDEV_TX_OK;
96 --
97 1.7.10.4
98