1 From f43e3aaaacaaf0482f0aaa6fbad03572f3a0c614 Mon Sep 17 00:00:00 2001
2 From: John Crispin <blogic@openwrt.org>
3 Date: Wed, 23 Mar 2016 18:31:48 +0100
4 Subject: [PATCH 88/91] net-next: mediatek: add support for IRQ grouping
6 The ethernet core has 3 IRQs. using the IRQ grouping registers we are able
7 to separate TX and RX IRQs, which allows us to service them on separate
8 cores. This patch splits the irq handler into 2 separate functiosn, one for
9 TX and another for RX. The TX housekeeping is split out of the NAPI handler.
10 Instead we use a tasklet to handle housekeeping.
12 Signed-off-by: John Crispin <blogic@openwrt.org>
14 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 165 ++++++++++++++++++---------
15 drivers/net/ethernet/mediatek/mtk_eth_soc.h | 16 ++-
16 2 files changed, 125 insertions(+), 56 deletions(-)
18 diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
19 index f821820..b5e364c 100644
20 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
21 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
22 @@ -789,7 +789,7 @@ drop:
25 static int mtk_poll_rx(struct napi_struct *napi, int budget,
26 - struct mtk_eth *eth, u32 rx_intr)
27 + struct mtk_eth *eth)
29 struct mtk_rx_ring *ring = ð->rx_ring;
30 int idx = ring->calc_idx;
31 @@ -877,19 +877,18 @@ release_desc:
35 - mtk_w32(eth, rx_intr, MTK_QMTK_INT_STATUS);
36 + mtk_w32(eth, MTK_RX_DONE_INT, MTK_QMTK_INT_STATUS);
41 -static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
42 +static int mtk_poll_tx(struct mtk_eth *eth, int budget)
44 struct mtk_tx_ring *ring = ð->tx_ring;
45 struct mtk_tx_dma *desc;
47 struct mtk_tx_buf *tx_buf;
48 - int total = 0, done = 0;
49 - unsigned int bytes = 0;
50 + unsigned int bytes = 0, done = 0;
54 @@ -941,64 +940,82 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
56 netdev_completed_queue(eth->netdev[i], done, bytes);
59 /* read hw index again make sure no new tx packet */
60 - if (cpu != dma || cpu != mtk_r32(eth, MTK_QTX_DRX_PTR))
63 + if (cpu == dma && cpu == mtk_r32(eth, MTK_QTX_DRX_PTR))
64 mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS);
69 if (atomic_read(&ring->free_count) > ring->thresh)
76 -static int mtk_poll(struct napi_struct *napi, int budget)
77 +static void mtk_handle_status_irq(struct mtk_eth *eth)
79 - struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
80 - u32 status, status2, mask, tx_intr, rx_intr, status_intr;
81 - int tx_done, rx_done;
82 - bool tx_again = false;
84 - status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
85 - status2 = mtk_r32(eth, MTK_INT_STATUS2);
86 - tx_intr = MTK_TX_DONE_INT;
87 - rx_intr = MTK_RX_DONE_INT;
88 - status_intr = (MTK_GDM1_AF | MTK_GDM2_AF);
93 - if (status & tx_intr)
94 - tx_done = mtk_poll_tx(eth, budget, &tx_again);
96 - if (status & rx_intr)
97 - rx_done = mtk_poll_rx(napi, budget, eth, rx_intr);
98 + u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
99 + u32 status_intr = (MTK_GDM1_AF | MTK_GDM2_AF);
101 if (unlikely(status2 & status_intr)) {
102 mtk_stats_update(eth);
103 mtk_w32(eth, status_intr, MTK_INT_STATUS2);
107 +static int mtk_napi_tx(struct napi_struct *napi, int budget)
109 + struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
113 + mtk_handle_status_irq(eth);
115 + status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
116 + tx_done = mtk_poll_tx(eth, budget);
117 + if (unlikely(netif_msg_intr(eth))) {
118 + mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
120 + "done tx %d, intr 0x%08x/0x%x\n",
121 + tx_done, status, mask);
124 + if (tx_done == budget)
127 + status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
128 + if (status & MTK_TX_DONE_INT)
131 + napi_complete(napi);
132 + mtk_irq_enable(eth, MTK_TX_DONE_INT);
137 +static int mtk_napi_rx(struct napi_struct *napi, int budget)
139 + struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
143 + status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
144 + rx_done = mtk_poll_rx(napi, budget, eth);
145 if (unlikely(netif_msg_intr(eth))) {
146 mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
147 - netdev_info(eth->netdev[0],
148 - "done tx %d, rx %d, intr 0x%08x/0x%x\n",
149 - tx_done, rx_done, status, mask);
151 + "done rx %d, intr 0x%08x/0x%x\n",
152 + rx_done, status, mask);
155 - if (tx_again || rx_done == budget)
156 + if (rx_done == budget)
159 status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
160 - if (status & (tx_intr | rx_intr))
161 + if (status & MTK_RX_DONE_INT)
165 - mtk_irq_enable(eth, tx_intr | rx_intr);
166 + mtk_irq_enable(eth, MTK_RX_DONE_INT);
170 @@ -1235,22 +1252,44 @@ static void mtk_tx_timeout(struct net_device *dev)
171 schedule_work(ð->pending_work);
174 -static irqreturn_t mtk_handle_irq(int irq, void *_eth)
175 +static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
177 struct mtk_eth *eth = _eth;
180 status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
181 + status &= ~MTK_TX_DONE_INT;
183 if (unlikely(!status))
186 - if (likely(status & (MTK_RX_DONE_INT | MTK_TX_DONE_INT))) {
187 + if (status & MTK_RX_DONE_INT) {
188 if (likely(napi_schedule_prep(ð->rx_napi)))
189 __napi_schedule(ð->rx_napi);
191 - mtk_w32(eth, status, MTK_QMTK_INT_STATUS);
192 + mtk_irq_disable(eth, MTK_RX_DONE_INT);
194 - mtk_irq_disable(eth, (MTK_RX_DONE_INT | MTK_TX_DONE_INT));
195 + mtk_w32(eth, status, MTK_QMTK_INT_STATUS);
197 + return IRQ_HANDLED;
200 +static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
202 + struct mtk_eth *eth = _eth;
205 + status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
206 + status &= ~MTK_RX_DONE_INT;
208 + if (unlikely(!status))
211 + if (status & MTK_TX_DONE_INT) {
212 + if (likely(napi_schedule_prep(ð->tx_napi)))
213 + __napi_schedule(ð->tx_napi);
214 + mtk_irq_disable(eth, MTK_TX_DONE_INT);
216 + mtk_w32(eth, status, MTK_QMTK_INT_STATUS);
220 @@ -1263,7 +1302,7 @@ static void mtk_poll_controller(struct net_device *dev)
221 u32 int_mask = MTK_TX_DONE_INT | MTK_RX_DONE_INT;
223 mtk_irq_disable(eth, int_mask);
224 - mtk_handle_irq(dev->irq, dev);
225 + mtk_handle_irq(dev->irq[0], dev);
226 mtk_irq_enable(eth, int_mask);
229 @@ -1299,6 +1338,7 @@ static int mtk_open(struct net_device *dev)
233 + napi_enable(ð->tx_napi);
234 napi_enable(ð->rx_napi);
235 mtk_irq_enable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
237 @@ -1347,6 +1387,7 @@ static int mtk_stop(struct net_device *dev)
240 mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
241 + napi_disable(ð->tx_napi);
242 napi_disable(ð->rx_napi);
244 mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
245 @@ -1384,7 +1425,11 @@ static int __init mtk_hw_init(struct mtk_eth *eth)
246 /* Enable RX VLan Offloading */
247 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
249 - err = devm_request_irq(eth->dev, eth->irq, mtk_handle_irq, 0,
250 + err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0,
251 + dev_name(eth->dev), eth);
254 + err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0,
255 dev_name(eth->dev), eth);
258 @@ -1400,7 +1445,11 @@ static int __init mtk_hw_init(struct mtk_eth *eth)
259 mtk_w32(eth, 0, MTK_RST_GL);
261 /* FE int grouping */
262 - mtk_w32(eth, 0, MTK_FE_INT_GRP);
263 + mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
264 + mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2);
265 + mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
266 + mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
267 + mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
269 for (i = 0; i < 2; i++) {
270 u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
271 @@ -1448,7 +1497,9 @@ static void mtk_uninit(struct net_device *dev)
272 phy_disconnect(mac->phy_dev);
273 mtk_mdio_cleanup(eth);
274 mtk_irq_disable(eth, ~0);
275 - free_irq(dev->irq, dev);
276 + free_irq(eth->irq[0], dev);
277 + free_irq(eth->irq[1], dev);
278 + free_irq(eth->irq[2], dev);
281 static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
282 @@ -1723,10 +1774,10 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
283 dev_err(eth->dev, "error bringing up device\n");
286 - eth->netdev[id]->irq = eth->irq;
287 + eth->netdev[id]->irq = eth->irq[0];
288 netif_info(eth, probe, eth->netdev[id],
289 "mediatek frame engine at 0x%08lx, irq %d\n",
290 - eth->netdev[id]->base_addr, eth->netdev[id]->irq);
291 + eth->netdev[id]->base_addr, eth->irq[0]);
295 @@ -1743,6 +1794,7 @@ static int mtk_probe(struct platform_device *pdev)
296 struct mtk_soc_data *soc;
301 match = of_match_device(of_mtk_match, &pdev->dev);
302 soc = (struct mtk_soc_data *)match->data;
303 @@ -1778,10 +1830,12 @@ static int mtk_probe(struct platform_device *pdev)
304 return PTR_ERR(eth->rstc);
307 - eth->irq = platform_get_irq(pdev, 0);
308 - if (eth->irq < 0) {
309 - dev_err(&pdev->dev, "no IRQ resource found\n");
311 + for (i = 0; i < 3; i++) {
312 + eth->irq[i] = platform_get_irq(pdev, i);
313 + if (eth->irq[i] < 0) {
314 + dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
319 eth->clk_ethif = devm_clk_get(&pdev->dev, "ethif");
320 @@ -1822,7 +1876,9 @@ static int mtk_probe(struct platform_device *pdev)
323 init_dummy_netdev(ð->dummy_dev);
324 - netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_poll,
325 + netif_napi_add(ð->dummy_dev, ð->tx_napi, mtk_napi_tx,
327 + netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_napi_rx,
330 platform_set_drvdata(pdev, eth);
331 @@ -1843,6 +1899,7 @@ static int mtk_remove(struct platform_device *pdev)
332 clk_disable_unprepare(eth->clk_gp1);
333 clk_disable_unprepare(eth->clk_gp2);
335 + netif_napi_del(ð->tx_napi);
336 netif_napi_del(ð->rx_napi);
338 platform_set_drvdata(pdev, NULL);
339 diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
340 index 8220275..bf158f8 100644
341 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
342 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
344 /* Unicast Filter MAC Address Register - High */
345 #define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000))
347 +/* PDMA Interrupt grouping registers */
348 +#define MTK_PDMA_INT_GRP1 0xa50
349 +#define MTK_PDMA_INT_GRP2 0xa54
351 /* QDMA TX Queue Configuration Registers */
352 #define MTK_QTX_CFG(x) (0x1800 + (x * 0x10))
353 #define QDMA_RES_THRES 4
355 #define MTK_TX_DONE_INT (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \
356 MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3)
358 +/* QDMA Interrupt grouping registers */
359 +#define MTK_QDMA_INT_GRP1 0x1a20
360 +#define MTK_QDMA_INT_GRP2 0x1a24
361 +#define MTK_RLS_DONE_INT BIT(0)
363 /* QDMA Interrupt Status Register */
364 #define MTK_QDMA_INT_MASK 0x1A1C
366 @@ -355,7 +364,8 @@ struct mtk_rx_ring {
367 * @dma_refcnt: track how many netdevs are using the DMA engine
368 * @tx_ring: Pointer to the memore holding info about the TX ring
369 * @rx_ring: Pointer to the memore holding info about the RX ring
370 - * @rx_napi: The NAPI struct
371 + * @tx_napi: The TX NAPI struct
372 + * @rx_napi: The RX NAPI struct
373 * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring
374 * @phy_scratch_ring: physical address of scratch_ring
375 * @scratch_head: The scratch memory that scratch_ring points to.
376 @@ -376,7 +386,7 @@ struct mtk_eth {
377 struct net_device dummy_dev;
378 struct net_device *netdev[MTK_MAX_DEVS];
379 struct mtk_mac *mac[MTK_MAX_DEVS];
383 unsigned long sysclk;
384 struct regmap *ethsys;
385 @@ -384,6 +394,7 @@ struct mtk_eth {
387 struct mtk_tx_ring tx_ring;
388 struct mtk_rx_ring rx_ring;
389 + struct napi_struct tx_napi;
390 struct napi_struct rx_napi;
391 struct mtk_tx_dma *scratch_ring;
392 dma_addr_t phy_scratch_ring;
393 @@ -394,6 +405,7 @@ struct mtk_eth {
395 struct mii_bus *mii_bus;
396 struct work_struct pending_work;
400 /* struct mtk_mac - the structure that holds the info about the MACs of the