28183b9098e1a86bcf9eafefa7e2302abb23692b
[openwrt/openwrt.git] / target / linux / mediatek / patches-4.4 / 0088-net-next-mediatek-add-support-for-IRQ-grouping.patch
1 From 41b4500871ab5b1ef27c6fb49ffd8aac8c7e5009 Mon Sep 17 00:00:00 2001
2 From: John Crispin <blogic@openwrt.org>
3 Date: Wed, 23 Mar 2016 18:31:48 +0100
4 Subject: [PATCH 88/91] net-next: mediatek: add support for IRQ grouping
5
6 The ethernet core has 3 IRQs. using the IRQ grouping registers we are able
7 to separate TX and RX IRQs, which allows us to service them on separate
8 cores. This patch splits the irq handler into 2 separate functiosn, one for
9 TX and another for RX. The TX housekeeping is split out of the NAPI handler.
10 Instead we use a tasklet to handle housekeeping.
11
12 Signed-off-by: John Crispin <blogic@openwrt.org>
13 ---
14 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 164 ++++++++++++++++++---------
15 drivers/net/ethernet/mediatek/mtk_eth_soc.h | 16 ++-
16 2 files changed, 124 insertions(+), 56 deletions(-)
17
18 diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
19 index f86d551..6557026 100644
20 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
21 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
22 @@ -790,7 +790,7 @@ drop:
23 }
24
25 static int mtk_poll_rx(struct napi_struct *napi, int budget,
26 - struct mtk_eth *eth, u32 rx_intr)
27 + struct mtk_eth *eth)
28 {
29 struct mtk_rx_ring *ring = &eth->rx_ring;
30 int idx = ring->calc_idx;
31 @@ -878,19 +878,18 @@ release_desc:
32 }
33
34 if (done < budget)
35 - mtk_w32(eth, rx_intr, MTK_QMTK_INT_STATUS);
36 + mtk_w32(eth, MTK_RX_DONE_INT, MTK_QMTK_INT_STATUS);
37
38 return done;
39 }
40
41 -static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
42 +static int mtk_poll_tx(struct mtk_eth *eth, int budget)
43 {
44 struct mtk_tx_ring *ring = &eth->tx_ring;
45 struct mtk_tx_dma *desc;
46 struct sk_buff *skb;
47 struct mtk_tx_buf *tx_buf;
48 - int total = 0, done = 0;
49 - unsigned int bytes = 0;
50 + unsigned int bytes = 0, done = 0;
51 u32 cpu, dma;
52 static int condition;
53 int i;
54 @@ -944,63 +943,80 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
55 }
56
57 /* read hw index again make sure no new tx packet */
58 - if (cpu != dma || cpu != mtk_r32(eth, MTK_QTX_DRX_PTR))
59 - *tx_again = true;
60 - else
61 + if (cpu == dma && cpu == mtk_r32(eth, MTK_QTX_DRX_PTR))
62 mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS);
63
64 - if (!total)
65 - return 0;
66 -
67 if (atomic_read(&ring->free_count) > ring->thresh)
68 mtk_wake_queue(eth);
69
70 - return total;
71 + return done;
72 }
73
74 -static int mtk_poll(struct napi_struct *napi, int budget)
75 +static void mtk_handle_status_irq(struct mtk_eth *eth)
76 {
77 - struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
78 - u32 status, status2, mask, tx_intr, rx_intr, status_intr;
79 - int tx_done, rx_done;
80 - bool tx_again = false;
81 -
82 - status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
83 - status2 = mtk_r32(eth, MTK_INT_STATUS2);
84 - tx_intr = MTK_TX_DONE_INT;
85 - rx_intr = MTK_RX_DONE_INT;
86 - status_intr = (MTK_GDM1_AF | MTK_GDM2_AF);
87 - tx_done = 0;
88 - rx_done = 0;
89 - tx_again = 0;
90 -
91 - if (status & tx_intr)
92 - tx_done = mtk_poll_tx(eth, budget, &tx_again);
93 -
94 - if (status & rx_intr)
95 - rx_done = mtk_poll_rx(napi, budget, eth, rx_intr);
96 + u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
97 + u32 status_intr = (MTK_GDM1_AF | MTK_GDM2_AF);
98
99 if (unlikely(status2 & status_intr)) {
100 mtk_stats_update(eth);
101 mtk_w32(eth, status_intr, MTK_INT_STATUS2);
102 }
103 +}
104
105 +static int mtk_napi_tx(struct napi_struct *napi, int budget)
106 +{
107 + struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
108 + u32 status, mask;
109 + int tx_done = 0;
110 +
111 + mtk_handle_status_irq(eth);
112 +
113 + status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
114 + tx_done = mtk_poll_tx(eth, budget);
115 if (unlikely(netif_msg_intr(eth))) {
116 mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
117 - netdev_info(eth->netdev[0],
118 - "done tx %d, rx %d, intr 0x%08x/0x%x\n",
119 - tx_done, rx_done, status, mask);
120 + dev_info(eth->dev,
121 + "done tx %d, intr 0x%08x/0x%x\n",
122 + tx_done, status, mask);
123 }
124
125 - if (tx_again || rx_done == budget)
126 + if (tx_done == budget)
127 return budget;
128
129 status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
130 - if (status & (tx_intr | rx_intr))
131 + if (status & MTK_TX_DONE_INT)
132 return budget;
133
134 napi_complete(napi);
135 - mtk_irq_enable(eth, tx_intr | rx_intr);
136 + mtk_irq_enable(eth, MTK_TX_DONE_INT);
137 +
138 + return tx_done;
139 +}
140 +
141 +static int mtk_napi_rx(struct napi_struct *napi, int budget)
142 +{
143 + struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
144 + u32 status, mask;
145 + int rx_done = 0;
146 +
147 + status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
148 + rx_done = mtk_poll_rx(napi, budget, eth);
149 + if (unlikely(netif_msg_intr(eth))) {
150 + mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
151 + dev_info(eth->dev,
152 + "done rx %d, intr 0x%08x/0x%x\n",
153 + rx_done, status, mask);
154 + }
155 +
156 + if (rx_done == budget)
157 + return budget;
158 +
159 + status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
160 + if (status & MTK_RX_DONE_INT)
161 + return budget;
162 +
163 + napi_complete(napi);
164 + mtk_irq_enable(eth, MTK_RX_DONE_INT);
165
166 return rx_done;
167 }
168 @@ -1237,22 +1253,44 @@ static void mtk_tx_timeout(struct net_device *dev)
169 schedule_work(&eth->pending_work);
170 }
171
172 -static irqreturn_t mtk_handle_irq(int irq, void *_eth)
173 +static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
174 {
175 struct mtk_eth *eth = _eth;
176 u32 status;
177
178 status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
179 + status &= ~MTK_TX_DONE_INT;
180 +
181 if (unlikely(!status))
182 return IRQ_NONE;
183
184 - if (likely(status & (MTK_RX_DONE_INT | MTK_TX_DONE_INT))) {
185 + if (status & MTK_RX_DONE_INT) {
186 if (likely(napi_schedule_prep(&eth->rx_napi)))
187 __napi_schedule(&eth->rx_napi);
188 - } else {
189 - mtk_w32(eth, status, MTK_QMTK_INT_STATUS);
190 + mtk_irq_disable(eth, MTK_RX_DONE_INT);
191 + }
192 + mtk_w32(eth, status, MTK_QMTK_INT_STATUS);
193 +
194 + return IRQ_HANDLED;
195 +}
196 +
197 +static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
198 +{
199 + struct mtk_eth *eth = _eth;
200 + u32 status;
201 +
202 + status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
203 + status &= ~MTK_RX_DONE_INT;
204 +
205 + if (unlikely(!status))
206 + return IRQ_NONE;
207 +
208 + if (status & MTK_TX_DONE_INT) {
209 + if (likely(napi_schedule_prep(&eth->tx_napi)))
210 + __napi_schedule(&eth->tx_napi);
211 + mtk_irq_disable(eth, MTK_TX_DONE_INT);
212 }
213 - mtk_irq_disable(eth, (MTK_RX_DONE_INT | MTK_TX_DONE_INT));
214 + mtk_w32(eth, status, MTK_QMTK_INT_STATUS);
215
216 return IRQ_HANDLED;
217 }
218 @@ -1265,7 +1303,7 @@ static void mtk_poll_controller(struct net_device *dev)
219 u32 int_mask = MTK_TX_DONE_INT | MTK_RX_DONE_INT;
220
221 mtk_irq_disable(eth, int_mask);
222 - mtk_handle_irq(dev->irq, dev);
223 + mtk_handle_irq(dev->irq[0], dev);
224 mtk_irq_enable(eth, int_mask);
225 }
226 #endif
227 @@ -1301,6 +1339,7 @@ static int mtk_open(struct net_device *dev)
228 if (err)
229 return err;
230
231 + napi_enable(&eth->tx_napi);
232 napi_enable(&eth->rx_napi);
233 mtk_irq_enable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
234 }
235 @@ -1349,6 +1388,7 @@ static int mtk_stop(struct net_device *dev)
236 return 0;
237
238 mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
239 + napi_disable(&eth->tx_napi);
240 napi_disable(&eth->rx_napi);
241
242 mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
243 @@ -1386,7 +1426,11 @@ static int __init mtk_hw_init(struct mtk_eth *eth)
244 /* Enable RX VLan Offloading */
245 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
246
247 - err = devm_request_irq(eth->dev, eth->irq, mtk_handle_irq, 0,
248 + err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0,
249 + dev_name(eth->dev), eth);
250 + if (err)
251 + return err;
252 + err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0,
253 dev_name(eth->dev), eth);
254 if (err)
255 return err;
256 @@ -1402,7 +1446,11 @@ static int __init mtk_hw_init(struct mtk_eth *eth)
257 mtk_w32(eth, 0, MTK_RST_GL);
258
259 /* FE int grouping */
260 - mtk_w32(eth, 0, MTK_FE_INT_GRP);
261 + mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
262 + mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2);
263 + mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
264 + mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
265 + mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
266
267 for (i = 0; i < 2; i++) {
268 u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
269 @@ -1450,7 +1498,9 @@ static void mtk_uninit(struct net_device *dev)
270 phy_disconnect(mac->phy_dev);
271 mtk_mdio_cleanup(eth);
272 mtk_irq_disable(eth, ~0);
273 - free_irq(dev->irq, dev);
274 + free_irq(eth->irq[0], dev);
275 + free_irq(eth->irq[1], dev);
276 + free_irq(eth->irq[2], dev);
277 }
278
279 static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
280 @@ -1725,10 +1775,10 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
281 dev_err(eth->dev, "error bringing up device\n");
282 goto free_netdev;
283 }
284 - eth->netdev[id]->irq = eth->irq;
285 + eth->netdev[id]->irq = eth->irq[0];
286 netif_info(eth, probe, eth->netdev[id],
287 "mediatek frame engine at 0x%08lx, irq %d\n",
288 - eth->netdev[id]->base_addr, eth->netdev[id]->irq);
289 + eth->netdev[id]->base_addr, eth->irq[0]);
290
291 return 0;
292
293 @@ -1745,6 +1795,7 @@ static int mtk_probe(struct platform_device *pdev)
294 struct mtk_soc_data *soc;
295 struct mtk_eth *eth;
296 int err;
297 + int i;
298
299 match = of_match_device(of_mtk_match, &pdev->dev);
300 soc = (struct mtk_soc_data *)match->data;
301 @@ -1780,10 +1831,12 @@ static int mtk_probe(struct platform_device *pdev)
302 return PTR_ERR(eth->rstc);
303 }
304
305 - eth->irq = platform_get_irq(pdev, 0);
306 - if (eth->irq < 0) {
307 - dev_err(&pdev->dev, "no IRQ resource found\n");
308 - return -ENXIO;
309 + for (i = 0; i < 3; i++) {
310 + eth->irq[i] = platform_get_irq(pdev, i);
311 + if (eth->irq[i] < 0) {
312 + dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
313 + return -ENXIO;
314 + }
315 }
316
317 eth->clk_ethif = devm_clk_get(&pdev->dev, "ethif");
318 @@ -1824,7 +1877,9 @@ static int mtk_probe(struct platform_device *pdev)
319 * for NAPI to work
320 */
321 init_dummy_netdev(&eth->dummy_dev);
322 - netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_poll,
323 + netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx,
324 + MTK_NAPI_WEIGHT);
325 + netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx,
326 MTK_NAPI_WEIGHT);
327
328 platform_set_drvdata(pdev, eth);
329 @@ -1845,6 +1900,7 @@ static int mtk_remove(struct platform_device *pdev)
330 clk_disable_unprepare(eth->clk_gp1);
331 clk_disable_unprepare(eth->clk_gp2);
332
333 + netif_napi_del(&eth->tx_napi);
334 netif_napi_del(&eth->rx_napi);
335 mtk_cleanup(eth);
336 platform_set_drvdata(pdev, NULL);
337 diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
338 index 8220275..bf158f8 100644
339 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
340 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
341 @@ -68,6 +68,10 @@
342 /* Unicast Filter MAC Address Register - High */
343 #define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000))
344
345 +/* PDMA Interrupt grouping registers */
346 +#define MTK_PDMA_INT_GRP1 0xa50
347 +#define MTK_PDMA_INT_GRP2 0xa54
348 +
349 /* QDMA TX Queue Configuration Registers */
350 #define MTK_QTX_CFG(x) (0x1800 + (x * 0x10))
351 #define QDMA_RES_THRES 4
352 @@ -124,6 +128,11 @@
353 #define MTK_TX_DONE_INT (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \
354 MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3)
355
356 +/* QDMA Interrupt grouping registers */
357 +#define MTK_QDMA_INT_GRP1 0x1a20
358 +#define MTK_QDMA_INT_GRP2 0x1a24
359 +#define MTK_RLS_DONE_INT BIT(0)
360 +
361 /* QDMA Interrupt Status Register */
362 #define MTK_QDMA_INT_MASK 0x1A1C
363
364 @@ -355,7 +364,8 @@ struct mtk_rx_ring {
365 * @dma_refcnt: track how many netdevs are using the DMA engine
366 * @tx_ring: Pointer to the memore holding info about the TX ring
367 * @rx_ring: Pointer to the memore holding info about the RX ring
368 - * @rx_napi: The NAPI struct
369 + * @tx_napi: The TX NAPI struct
370 + * @rx_napi: The RX NAPI struct
371 * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring
372 * @phy_scratch_ring: physical address of scratch_ring
373 * @scratch_head: The scratch memory that scratch_ring points to.
374 @@ -376,7 +386,7 @@ struct mtk_eth {
375 struct net_device dummy_dev;
376 struct net_device *netdev[MTK_MAX_DEVS];
377 struct mtk_mac *mac[MTK_MAX_DEVS];
378 - int irq;
379 + int irq[3];
380 u32 msg_enable;
381 unsigned long sysclk;
382 struct regmap *ethsys;
383 @@ -384,6 +394,7 @@ struct mtk_eth {
384 atomic_t dma_refcnt;
385 struct mtk_tx_ring tx_ring;
386 struct mtk_rx_ring rx_ring;
387 + struct napi_struct tx_napi;
388 struct napi_struct rx_napi;
389 struct mtk_tx_dma *scratch_ring;
390 dma_addr_t phy_scratch_ring;
391 @@ -394,6 +405,7 @@ struct mtk_eth {
392 struct clk *clk_gp2;
393 struct mii_bus *mii_bus;
394 struct work_struct pending_work;
395 +
396 };
397
398 /* struct mtk_mac - the structure that holds the info about the MACs of the
399 --
400 1.7.10.4
401