kernel: update kernel 4.4 to version 4.4.7
[openwrt/svn-archive/archive.git] / target / linux / mediatek / patches-4.4 / 0071-net-mediatek-fix-TX-locking.patch
1 From b9df14f712866925856c0ffb2d899511c21e1b8a Mon Sep 17 00:00:00 2001
2 From: John Crispin <blogic@openwrt.org>
3 Date: Tue, 29 Mar 2016 17:20:01 +0200
4 Subject: [PATCH 71/81] net: mediatek: fix TX locking
5
6 Inside the TX path there is a lock inside the tx_map function. This is
7 however too late. The patch moves the lock to the start of the xmit
8 function right before the free count check of the DMA ring happens.
9 If we do not do this, the code becomes racy leading to TX stalls and
10 dropped packets. This happens as there are 2 netdevs running on the
11 same physical DMA ring.
12
13 Signed-off-by: John Crispin <blogic@openwrt.org>
14 ---
15 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 20 ++++++++++----------
16 1 file changed, 10 insertions(+), 10 deletions(-)
17
18 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
19 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
20 @@ -536,7 +536,6 @@ static int mtk_tx_map(struct sk_buff *sk
21 struct mtk_eth *eth = mac->hw;
22 struct mtk_tx_dma *itxd, *txd;
23 struct mtk_tx_buf *tx_buf;
24 - unsigned long flags;
25 dma_addr_t mapped_addr;
26 unsigned int nr_frags;
27 int i, n_desc = 1;
28 @@ -568,11 +567,6 @@ static int mtk_tx_map(struct sk_buff *sk
29 if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
30 return -ENOMEM;
31
32 - /* normally we can rely on the stack not calling this more than once,
33 - * however we have 2 queues running ont he same ring so we need to lock
34 - * the ring access
35 - */
36 - spin_lock_irqsave(&eth->page_lock, flags);
37 WRITE_ONCE(itxd->txd1, mapped_addr);
38 tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
39 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
40 @@ -632,8 +626,6 @@ static int mtk_tx_map(struct sk_buff *sk
41 WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
42 (!nr_frags * TX_DMA_LS0)));
43
44 - spin_unlock_irqrestore(&eth->page_lock, flags);
45 -
46 netdev_sent_queue(dev, skb->len);
47 skb_tx_timestamp(skb);
48
49 @@ -661,8 +653,6 @@ err_dma:
50 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
51 } while (itxd != txd);
52
53 - spin_unlock_irqrestore(&eth->page_lock, flags);
54 -
55 return -ENOMEM;
56 }
57
58 @@ -712,14 +702,22 @@ static int mtk_start_xmit(struct sk_buff
59 struct mtk_eth *eth = mac->hw;
60 struct mtk_tx_ring *ring = &eth->tx_ring;
61 struct net_device_stats *stats = &dev->stats;
62 + unsigned long flags;
63 bool gso = false;
64 int tx_num;
65
66 + /* normally we can rely on the stack not calling this more than once,
67 + * however we have 2 queues running ont he same ring so we need to lock
68 + * the ring access
69 + */
70 + spin_lock_irqsave(&eth->page_lock, flags);
71 +
72 tx_num = mtk_cal_txd_req(skb);
73 if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
74 mtk_stop_queue(eth);
75 netif_err(eth, tx_queued, dev,
76 "Tx Ring full when queue awake!\n");
77 + spin_unlock_irqrestore(&eth->page_lock, flags);
78 return NETDEV_TX_BUSY;
79 }
80
81 @@ -747,10 +745,12 @@ static int mtk_start_xmit(struct sk_buff
82 ring->thresh))
83 mtk_wake_queue(eth);
84 }
85 + spin_unlock_irqrestore(&eth->page_lock, flags);
86
87 return NETDEV_TX_OK;
88
89 drop:
90 + spin_unlock_irqrestore(&eth->page_lock, flags);
91 stats->tx_dropped++;
92 dev_kfree_skb(skb);
93 return NETDEV_TX_OK;