mediatek: update patches
[openwrt/staging/chunkeey.git] / target / linux / mediatek / patches-4.4 / 0088-net-next-mediatek-add-support-for-IRQ-grouping.patch
1 From f43e3aaaacaaf0482f0aaa6fbad03572f3a0c614 Mon Sep 17 00:00:00 2001
2 From: John Crispin <blogic@openwrt.org>
3 Date: Wed, 23 Mar 2016 18:31:48 +0100
4 Subject: [PATCH 88/91] net-next: mediatek: add support for IRQ grouping
5
6 The ethernet core has 3 IRQs. using the IRQ grouping registers we are able
7 to separate TX and RX IRQs, which allows us to service them on separate
8 cores. This patch splits the irq handler into 2 separate functiosn, one for
9 TX and another for RX. The TX housekeeping is split out of the NAPI handler.
10 Instead we use a tasklet to handle housekeeping.
11
12 Signed-off-by: John Crispin <blogic@openwrt.org>
13 ---
14 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 165 ++++++++++++++++++---------
15 drivers/net/ethernet/mediatek/mtk_eth_soc.h | 16 ++-
16 2 files changed, 125 insertions(+), 56 deletions(-)
17
18 diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
19 index f821820..b5e364c 100644
20 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
21 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
22 @@ -789,7 +789,7 @@ drop:
23 }
24
25 static int mtk_poll_rx(struct napi_struct *napi, int budget,
26 - struct mtk_eth *eth, u32 rx_intr)
27 + struct mtk_eth *eth)
28 {
29 struct mtk_rx_ring *ring = &eth->rx_ring;
30 int idx = ring->calc_idx;
31 @@ -877,19 +877,18 @@ release_desc:
32 }
33
34 if (done < budget)
35 - mtk_w32(eth, rx_intr, MTK_QMTK_INT_STATUS);
36 + mtk_w32(eth, MTK_RX_DONE_INT, MTK_QMTK_INT_STATUS);
37
38 return done;
39 }
40
41 -static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
42 +static int mtk_poll_tx(struct mtk_eth *eth, int budget)
43 {
44 struct mtk_tx_ring *ring = &eth->tx_ring;
45 struct mtk_tx_dma *desc;
46 struct sk_buff *skb;
47 struct mtk_tx_buf *tx_buf;
48 - int total = 0, done = 0;
49 - unsigned int bytes = 0;
50 + unsigned int bytes = 0, done = 0;
51 u32 cpu, dma;
52 static int condition;
53 int i;
54 @@ -941,64 +940,82 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
55 continue;
56 netdev_completed_queue(eth->netdev[i], done, bytes);
57 }
58 +
59 /* read hw index again make sure no new tx packet */
60 - if (cpu != dma || cpu != mtk_r32(eth, MTK_QTX_DRX_PTR))
61 - *tx_again = true;
62 - else
63 + if (cpu == dma && cpu == mtk_r32(eth, MTK_QTX_DRX_PTR))
64 mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS);
65
66 - if (!total)
67 - return 0;
68 -
69 if (atomic_read(&ring->free_count) > ring->thresh)
70 mtk_wake_queue(eth);
71
72 - return total;
73 + return done;
74 }
75
76 -static int mtk_poll(struct napi_struct *napi, int budget)
77 +static void mtk_handle_status_irq(struct mtk_eth *eth)
78 {
79 - struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
80 - u32 status, status2, mask, tx_intr, rx_intr, status_intr;
81 - int tx_done, rx_done;
82 - bool tx_again = false;
83 -
84 - status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
85 - status2 = mtk_r32(eth, MTK_INT_STATUS2);
86 - tx_intr = MTK_TX_DONE_INT;
87 - rx_intr = MTK_RX_DONE_INT;
88 - status_intr = (MTK_GDM1_AF | MTK_GDM2_AF);
89 - tx_done = 0;
90 - rx_done = 0;
91 - tx_again = 0;
92 -
93 - if (status & tx_intr)
94 - tx_done = mtk_poll_tx(eth, budget, &tx_again);
95 -
96 - if (status & rx_intr)
97 - rx_done = mtk_poll_rx(napi, budget, eth, rx_intr);
98 + u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
99 + u32 status_intr = (MTK_GDM1_AF | MTK_GDM2_AF);
100
101 if (unlikely(status2 & status_intr)) {
102 mtk_stats_update(eth);
103 mtk_w32(eth, status_intr, MTK_INT_STATUS2);
104 }
105 +}
106 +
107 +static int mtk_napi_tx(struct napi_struct *napi, int budget)
108 +{
109 + struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
110 + u32 status, mask;
111 + int tx_done = 0;
112 +
113 + mtk_handle_status_irq(eth);
114 +
115 + status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
116 + tx_done = mtk_poll_tx(eth, budget);
117 + if (unlikely(netif_msg_intr(eth))) {
118 + mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
119 + dev_info(eth->dev,
120 + "done tx %d, intr 0x%08x/0x%x\n",
121 + tx_done, status, mask);
122 + }
123 +
124 + if (tx_done == budget)
125 + return budget;
126 +
127 + status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
128 + if (status & MTK_TX_DONE_INT)
129 + return budget;
130 +
131 + napi_complete(napi);
132 + mtk_irq_enable(eth, MTK_TX_DONE_INT);
133 +
134 + return tx_done;
135 +}
136 +
137 +static int mtk_napi_rx(struct napi_struct *napi, int budget)
138 +{
139 + struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
140 + u32 status, mask;
141 + int rx_done = 0;
142
143 + status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
144 + rx_done = mtk_poll_rx(napi, budget, eth);
145 if (unlikely(netif_msg_intr(eth))) {
146 mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
147 - netdev_info(eth->netdev[0],
148 - "done tx %d, rx %d, intr 0x%08x/0x%x\n",
149 - tx_done, rx_done, status, mask);
150 + dev_info(eth->dev,
151 + "done rx %d, intr 0x%08x/0x%x\n",
152 + rx_done, status, mask);
153 }
154
155 - if (tx_again || rx_done == budget)
156 + if (rx_done == budget)
157 return budget;
158
159 status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
160 - if (status & (tx_intr | rx_intr))
161 + if (status & MTK_RX_DONE_INT)
162 return budget;
163
164 napi_complete(napi);
165 - mtk_irq_enable(eth, tx_intr | rx_intr);
166 + mtk_irq_enable(eth, MTK_RX_DONE_INT);
167
168 return rx_done;
169 }
170 @@ -1235,22 +1252,44 @@ static void mtk_tx_timeout(struct net_device *dev)
171 schedule_work(&eth->pending_work);
172 }
173
174 -static irqreturn_t mtk_handle_irq(int irq, void *_eth)
175 +static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
176 {
177 struct mtk_eth *eth = _eth;
178 u32 status;
179
180 status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
181 + status &= ~MTK_TX_DONE_INT;
182 +
183 if (unlikely(!status))
184 return IRQ_NONE;
185
186 - if (likely(status & (MTK_RX_DONE_INT | MTK_TX_DONE_INT))) {
187 + if (status & MTK_RX_DONE_INT) {
188 if (likely(napi_schedule_prep(&eth->rx_napi)))
189 __napi_schedule(&eth->rx_napi);
190 - } else {
191 - mtk_w32(eth, status, MTK_QMTK_INT_STATUS);
192 + mtk_irq_disable(eth, MTK_RX_DONE_INT);
193 }
194 - mtk_irq_disable(eth, (MTK_RX_DONE_INT | MTK_TX_DONE_INT));
195 + mtk_w32(eth, status, MTK_QMTK_INT_STATUS);
196 +
197 + return IRQ_HANDLED;
198 +}
199 +
200 +static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
201 +{
202 + struct mtk_eth *eth = _eth;
203 + u32 status;
204 +
205 + status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
206 + status &= ~MTK_RX_DONE_INT;
207 +
208 + if (unlikely(!status))
209 + return IRQ_NONE;
210 +
211 + if (status & MTK_TX_DONE_INT) {
212 + if (likely(napi_schedule_prep(&eth->tx_napi)))
213 + __napi_schedule(&eth->tx_napi);
214 + mtk_irq_disable(eth, MTK_TX_DONE_INT);
215 + }
216 + mtk_w32(eth, status, MTK_QMTK_INT_STATUS);
217
218 return IRQ_HANDLED;
219 }
220 @@ -1263,7 +1302,7 @@ static void mtk_poll_controller(struct net_device *dev)
221 u32 int_mask = MTK_TX_DONE_INT | MTK_RX_DONE_INT;
222
223 mtk_irq_disable(eth, int_mask);
224 - mtk_handle_irq(dev->irq, dev);
225 + mtk_handle_irq(dev->irq[0], dev);
226 mtk_irq_enable(eth, int_mask);
227 }
228 #endif
229 @@ -1299,6 +1338,7 @@ static int mtk_open(struct net_device *dev)
230 if (err)
231 return err;
232
233 + napi_enable(&eth->tx_napi);
234 napi_enable(&eth->rx_napi);
235 mtk_irq_enable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
236 }
237 @@ -1347,6 +1387,7 @@ static int mtk_stop(struct net_device *dev)
238 return 0;
239
240 mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
241 + napi_disable(&eth->tx_napi);
242 napi_disable(&eth->rx_napi);
243
244 mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
245 @@ -1384,7 +1425,11 @@ static int __init mtk_hw_init(struct mtk_eth *eth)
246 /* Enable RX VLan Offloading */
247 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
248
249 - err = devm_request_irq(eth->dev, eth->irq, mtk_handle_irq, 0,
250 + err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0,
251 + dev_name(eth->dev), eth);
252 + if (err)
253 + return err;
254 + err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0,
255 dev_name(eth->dev), eth);
256 if (err)
257 return err;
258 @@ -1400,7 +1445,11 @@ static int __init mtk_hw_init(struct mtk_eth *eth)
259 mtk_w32(eth, 0, MTK_RST_GL);
260
261 /* FE int grouping */
262 - mtk_w32(eth, 0, MTK_FE_INT_GRP);
263 + mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
264 + mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2);
265 + mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
266 + mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
267 + mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
268
269 for (i = 0; i < 2; i++) {
270 u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
271 @@ -1448,7 +1497,9 @@ static void mtk_uninit(struct net_device *dev)
272 phy_disconnect(mac->phy_dev);
273 mtk_mdio_cleanup(eth);
274 mtk_irq_disable(eth, ~0);
275 - free_irq(dev->irq, dev);
276 + free_irq(eth->irq[0], dev);
277 + free_irq(eth->irq[1], dev);
278 + free_irq(eth->irq[2], dev);
279 }
280
281 static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
282 @@ -1723,10 +1774,10 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
283 dev_err(eth->dev, "error bringing up device\n");
284 goto free_netdev;
285 }
286 - eth->netdev[id]->irq = eth->irq;
287 + eth->netdev[id]->irq = eth->irq[0];
288 netif_info(eth, probe, eth->netdev[id],
289 "mediatek frame engine at 0x%08lx, irq %d\n",
290 - eth->netdev[id]->base_addr, eth->netdev[id]->irq);
291 + eth->netdev[id]->base_addr, eth->irq[0]);
292
293 return 0;
294
295 @@ -1743,6 +1794,7 @@ static int mtk_probe(struct platform_device *pdev)
296 struct mtk_soc_data *soc;
297 struct mtk_eth *eth;
298 int err;
299 + int i;
300
301 match = of_match_device(of_mtk_match, &pdev->dev);
302 soc = (struct mtk_soc_data *)match->data;
303 @@ -1778,10 +1830,12 @@ static int mtk_probe(struct platform_device *pdev)
304 return PTR_ERR(eth->rstc);
305 }
306
307 - eth->irq = platform_get_irq(pdev, 0);
308 - if (eth->irq < 0) {
309 - dev_err(&pdev->dev, "no IRQ resource found\n");
310 - return -ENXIO;
311 + for (i = 0; i < 3; i++) {
312 + eth->irq[i] = platform_get_irq(pdev, i);
313 + if (eth->irq[i] < 0) {
314 + dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
315 + return -ENXIO;
316 + }
317 }
318
319 eth->clk_ethif = devm_clk_get(&pdev->dev, "ethif");
320 @@ -1822,7 +1876,9 @@ static int mtk_probe(struct platform_device *pdev)
321 * for NAPI to work
322 */
323 init_dummy_netdev(&eth->dummy_dev);
324 - netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_poll,
325 + netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx,
326 + MTK_NAPI_WEIGHT);
327 + netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx,
328 MTK_NAPI_WEIGHT);
329
330 platform_set_drvdata(pdev, eth);
331 @@ -1843,6 +1899,7 @@ static int mtk_remove(struct platform_device *pdev)
332 clk_disable_unprepare(eth->clk_gp1);
333 clk_disable_unprepare(eth->clk_gp2);
334
335 + netif_napi_del(&eth->tx_napi);
336 netif_napi_del(&eth->rx_napi);
337 mtk_cleanup(eth);
338 platform_set_drvdata(pdev, NULL);
339 diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
340 index 8220275..bf158f8 100644
341 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
342 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
343 @@ -68,6 +68,10 @@
344 /* Unicast Filter MAC Address Register - High */
345 #define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000))
346
347 +/* PDMA Interrupt grouping registers */
348 +#define MTK_PDMA_INT_GRP1 0xa50
349 +#define MTK_PDMA_INT_GRP2 0xa54
350 +
351 /* QDMA TX Queue Configuration Registers */
352 #define MTK_QTX_CFG(x) (0x1800 + (x * 0x10))
353 #define QDMA_RES_THRES 4
354 @@ -124,6 +128,11 @@
355 #define MTK_TX_DONE_INT (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \
356 MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3)
357
358 +/* QDMA Interrupt grouping registers */
359 +#define MTK_QDMA_INT_GRP1 0x1a20
360 +#define MTK_QDMA_INT_GRP2 0x1a24
361 +#define MTK_RLS_DONE_INT BIT(0)
362 +
363 /* QDMA Interrupt Status Register */
364 #define MTK_QDMA_INT_MASK 0x1A1C
365
366 @@ -355,7 +364,8 @@ struct mtk_rx_ring {
367 * @dma_refcnt: track how many netdevs are using the DMA engine
368 * @tx_ring: Pointer to the memore holding info about the TX ring
369 * @rx_ring: Pointer to the memore holding info about the RX ring
370 - * @rx_napi: The NAPI struct
371 + * @tx_napi: The TX NAPI struct
372 + * @rx_napi: The RX NAPI struct
373 * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring
374 * @phy_scratch_ring: physical address of scratch_ring
375 * @scratch_head: The scratch memory that scratch_ring points to.
376 @@ -376,7 +386,7 @@ struct mtk_eth {
377 struct net_device dummy_dev;
378 struct net_device *netdev[MTK_MAX_DEVS];
379 struct mtk_mac *mac[MTK_MAX_DEVS];
380 - int irq;
381 + int irq[3];
382 u32 msg_enable;
383 unsigned long sysclk;
384 struct regmap *ethsys;
385 @@ -384,6 +394,7 @@ struct mtk_eth {
386 atomic_t dma_refcnt;
387 struct mtk_tx_ring tx_ring;
388 struct mtk_rx_ring rx_ring;
389 + struct napi_struct tx_napi;
390 struct napi_struct rx_napi;
391 struct mtk_tx_dma *scratch_ring;
392 dma_addr_t phy_scratch_ring;
393 @@ -394,6 +405,7 @@ struct mtk_eth {
394 struct clk *clk_gp2;
395 struct mii_bus *mii_bus;
396 struct work_struct pending_work;
397 +
398 };
399
400 /* struct mtk_mac - the structure that holds the info about the MACs of the
401 --
402 1.7.10.4
403