3431b119c0b760348fe87dc6ae7c1f6de831e4ad
[openwrt/openwrt.git] / target / linux / mediatek / patches-4.14 / 0027-net-next-mediatek-fix-DQL-support.patch
1 From f974e397b806f7b16d11cc1542538616291924f1 Mon Sep 17 00:00:00 2001
2 From: John Crispin <john@phrozen.org>
3 Date: Sat, 23 Apr 2016 11:57:21 +0200
4 Subject: [PATCH 27/57] net-next: mediatek: fix DQL support
5
6 The MTK ethernet core has 2 MACs both sitting on the same DMA ring. The
7 current code will assign the TX traffic of each MAC to its own DQL. This
8 results in the amount of data, that DQL says is in the queue incorrect. As
9 the data from multiple devices is infact enqueued. This makes any decision
10 based on these value non deterministic. Fix this by tracking all TX
11 traffic, regardless of the MAC it belongs to in the DQL of all devices
12 using the DMA.
13
14 Signed-off-by: John Crispin <john@phrozen.org>
15 ---
16 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 35 +++++++++++++++++------------
17 1 file changed, 21 insertions(+), 14 deletions(-)
18
19 Index: linux-4.14.11/drivers/net/ethernet/mediatek/mtk_eth_soc.c
20 ===================================================================
21 --- linux-4.14.11.orig/drivers/net/ethernet/mediatek/mtk_eth_soc.c
22 +++ linux-4.14.11/drivers/net/ethernet/mediatek/mtk_eth_soc.c
23 @@ -779,7 +779,16 @@ static int mtk_tx_map(struct sk_buff *sk
24 WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
25 (!nr_frags * TX_DMA_LS0)));
26
27 - netdev_sent_queue(dev, skb->len);
28 + /* we have a single DMA ring so BQL needs to be updated for all devices
29 + * sitting on this ring
30 + */
31 + for (i = 0; i < MTK_MAC_COUNT; i++) {
32 + if (!eth->netdev[i])
33 + continue;
34 +
35 + netdev_sent_queue(eth->netdev[i], skb->len);
36 + }
37 +
38 skb_tx_timestamp(skb);
39
40 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
41 @@ -1076,20 +1085,17 @@ static int mtk_poll_tx(struct mtk_eth *e
42 struct mtk_tx_dma *desc;
43 struct sk_buff *skb;
44 struct mtk_tx_buf *tx_buf;
45 - unsigned int done[MTK_MAX_DEVS];
46 - unsigned int bytes[MTK_MAX_DEVS];
47 + int total = 0, done = 0;
48 + unsigned int bytes = 0;
49 u32 cpu, dma;
50 - int total = 0, i;
51 -
52 - memset(done, 0, sizeof(done));
53 - memset(bytes, 0, sizeof(bytes));
54 + int i;
55
56 cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
57 dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
58
59 desc = mtk_qdma_phys_to_virt(ring, cpu);
60
61 - while ((cpu != dma) && budget) {
62 + while ((cpu != dma) && (done < budget)) {
63 u32 next_cpu = desc->txd2;
64 int mac = 0;
65
66 @@ -1106,9 +1112,8 @@ static int mtk_poll_tx(struct mtk_eth *e
67 break;
68
69 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
70 - bytes[mac] += skb->len;
71 - done[mac]++;
72 - budget--;
73 + bytes += skb->len;
74 + done++;
75 }
76 mtk_tx_unmap(eth, tx_buf);
77
78 @@ -1120,11 +1125,13 @@ static int mtk_poll_tx(struct mtk_eth *e
79
80 mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
81
82 + /* we have a single DMA ring so BQL needs to be updated for all devices
83 + * sitting on this ring
84 + */
85 for (i = 0; i < MTK_MAC_COUNT; i++) {
86 - if (!eth->netdev[i] || !done[i])
87 + if (!eth->netdev[i])
88 continue;
89 - netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
90 - total += done[i];
91 + netdev_completed_queue(eth->netdev[i], done, bytes);
92 }
93
94 if (mtk_queue_stopped(eth) &&