ipq806x: add support for GL.iNet GL-B1300
[openwrt/openwrt.git] / target / linux / mediatek / patches-4.9 / 0047-net-next-mediatek-split-IRQ-register-locking-into-TX.patch
1 From 5afceece38fa30e3c71e7ed9ac62aa70ba8cfbb1 Mon Sep 17 00:00:00 2001
2 From: John Crispin <john@phrozen.org>
3 Date: Fri, 16 Jun 2017 10:00:30 +0200
4 Subject: [PATCH 47/57] net-next: mediatek: split IRQ register locking into TX
5 and RX
6
7 Originally the driver only utilized the new QDMA engine. The current code
8 still assumes this is the case when locking the IRQ mask register. Since
9 RX now runs on the old style PDMA engine we can add a second lock. This
10 patch reduces the IRQ latency as the TX and RX path no longer need to wait
11 on each other under heavy load.
12
13 Signed-off-by: John Crispin <john@phrozen.org>
14 ---
15 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 79 ++++++++++++++++++-----------
16 drivers/net/ethernet/mediatek/mtk_eth_soc.h | 5 +-
17 2 files changed, 54 insertions(+), 30 deletions(-)
18
19 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
20 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
21 @@ -372,28 +372,48 @@ static void mtk_mdio_cleanup(struct mtk_
22 mdiobus_unregister(eth->mii_bus);
23 }
24
25 -static inline void mtk_irq_disable(struct mtk_eth *eth,
26 - unsigned reg, u32 mask)
27 +static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
28 {
29 unsigned long flags;
30 u32 val;
31
32 - spin_lock_irqsave(&eth->irq_lock, flags);
33 - val = mtk_r32(eth, reg);
34 - mtk_w32(eth, val & ~mask, reg);
35 - spin_unlock_irqrestore(&eth->irq_lock, flags);
36 + spin_lock_irqsave(&eth->tx_irq_lock, flags);
37 + val = mtk_r32(eth, MTK_QDMA_INT_MASK);
38 + mtk_w32(eth, val & ~mask, MTK_QDMA_INT_MASK);
39 + spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
40 }
41
42 -static inline void mtk_irq_enable(struct mtk_eth *eth,
43 - unsigned reg, u32 mask)
44 +static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
45 {
46 unsigned long flags;
47 u32 val;
48
49 - spin_lock_irqsave(&eth->irq_lock, flags);
50 - val = mtk_r32(eth, reg);
51 - mtk_w32(eth, val | mask, reg);
52 - spin_unlock_irqrestore(&eth->irq_lock, flags);
53 + spin_lock_irqsave(&eth->tx_irq_lock, flags);
54 + val = mtk_r32(eth, MTK_QDMA_INT_MASK);
55 + mtk_w32(eth, val | mask, MTK_QDMA_INT_MASK);
56 + spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
57 +}
58 +
59 +static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
60 +{
61 + unsigned long flags;
62 + u32 val;
63 +
64 + spin_lock_irqsave(&eth->rx_irq_lock, flags);
65 + val = mtk_r32(eth, MTK_PDMA_INT_MASK);
66 + mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK);
67 + spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
68 +}
69 +
70 +static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
71 +{
72 + unsigned long flags;
73 + u32 val;
74 +
75 + spin_lock_irqsave(&eth->rx_irq_lock, flags);
76 + val = mtk_r32(eth, MTK_PDMA_INT_MASK);
77 + mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK);
78 + spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
79 }
80
81 static int mtk_set_mac_address(struct net_device *dev, void *p)
82 @@ -1116,7 +1136,7 @@ static int mtk_napi_tx(struct napi_struc
83 return budget;
84
85 napi_complete(napi);
86 - mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
87 + mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
88
89 return tx_done;
90 }
91 @@ -1150,7 +1170,7 @@ poll_again:
92 goto poll_again;
93 }
94 napi_complete(napi);
95 - mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
96 + mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
97
98 return rx_done + budget - remain_budget;
99 }
100 @@ -1699,7 +1719,7 @@ static irqreturn_t mtk_handle_irq_rx(int
101
102 if (likely(napi_schedule_prep(&eth->rx_napi))) {
103 __napi_schedule(&eth->rx_napi);
104 - mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
105 + mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
106 }
107
108 return IRQ_HANDLED;
109 @@ -1711,7 +1731,7 @@ static irqreturn_t mtk_handle_irq_tx(int
110
111 if (likely(napi_schedule_prep(&eth->tx_napi))) {
112 __napi_schedule(&eth->tx_napi);
113 - mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
114 + mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
115 }
116
117 return IRQ_HANDLED;
118 @@ -1723,11 +1743,11 @@ static void mtk_poll_controller(struct n
119 struct mtk_mac *mac = netdev_priv(dev);
120 struct mtk_eth *eth = mac->hw;
121
122 - mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
123 - mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
124 + mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
125 + mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
126 mtk_handle_irq_rx(eth->irq[2], dev);
127 - mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
128 - mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
129 + mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
130 + mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
131 }
132 #endif
133
134 @@ -1770,8 +1790,8 @@ static int mtk_open(struct net_device *d
135
136 napi_enable(&eth->tx_napi);
137 napi_enable(&eth->rx_napi);
138 - mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
139 - mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
140 + mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
141 + mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
142 }
143 atomic_inc(&eth->dma_refcnt);
144
145 @@ -1816,8 +1836,8 @@ static int mtk_stop(struct net_device *d
146 if (!atomic_dec_and_test(&eth->dma_refcnt))
147 return 0;
148
149 - mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
150 - mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
151 + mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
152 + mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
153 napi_disable(&eth->tx_napi);
154 napi_disable(&eth->rx_napi);
155
156 @@ -1911,8 +1931,8 @@ static int mtk_hw_init(struct mtk_eth *e
157 mtk_w32(eth, 0, MTK_PDMA_DELAY_INT);
158 mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
159 #endif
160 - mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0);
161 - mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0);
162 + mtk_tx_irq_disable(eth, ~0);
163 + mtk_rx_irq_disable(eth, ~0);
164 mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
165 mtk_w32(eth, 0, MTK_RST_GL);
166
167 @@ -1983,8 +2003,8 @@ static void mtk_uninit(struct net_device
168 phy_disconnect(dev->phydev);
169 if (of_phy_is_fixed_link(mac->of_node))
170 of_phy_deregister_fixed_link(mac->of_node);
171 - mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0);
172 - mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0);
173 + mtk_tx_irq_disable(eth, ~0);
174 + mtk_rx_irq_disable(eth, ~0);
175 }
176
177 static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
178 @@ -2442,7 +2462,8 @@ static int mtk_probe(struct platform_dev
179 return PTR_ERR(eth->base);
180
181 spin_lock_init(&eth->page_lock);
182 - spin_lock_init(&eth->irq_lock);
183 + spin_lock_init(&eth->tx_irq_lock);
184 + spin_lock_init(&eth->rx_irq_lock);
185
186 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
187 "mediatek,ethsys");
188 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
189 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
190 @@ -526,6 +526,8 @@ struct mtk_rx_ring {
191 * @dev: The device pointer
192 * @base: The mapped register i/o base
193 * @page_lock: Make sure that register operations are atomic
194 + * @tx_irq__lock: Make sure that IRQ register operations are atomic
195 + * @rx_irq__lock: Make sure that IRQ register operations are atomic
196 * @dummy_dev: we run 2 netdevs on 1 physical DMA ring and need a
197 * dummy for NAPI to work
198 * @netdev: The netdev instances
199 @@ -555,7 +557,8 @@ struct mtk_eth {
200 struct device *dev;
201 void __iomem *base;
202 spinlock_t page_lock;
203 - spinlock_t irq_lock;
204 + spinlock_t tx_irq_lock;
205 + spinlock_t rx_irq_lock;
206 struct net_device dummy_dev;
207 struct net_device *netdev[MTK_MAX_DEVS];
208 struct mtk_mac *mac[MTK_MAX_DEVS];