1 From 190df1a9dbf4d8809b7f991194ce60e47f2290a2 Mon Sep 17 00:00:00 2001
2 From: John Crispin <john@phrozen.org>
3 Date: Wed, 23 Mar 2016 18:31:48 +0100
4 Subject: [PATCH 096/102] net-next: mediatek: add support for IRQ grouping
6 The ethernet core has 3 IRQs. using the IRQ grouping registers we are able
7 to separate TX and RX IRQs, which allows us to service them on separate
8 cores. This patch splits the irq handler into 2 separate functions, one for
9 TX and another for RX. The TX housekeeping is split out into its own NAPI
12 Signed-off-by: John Crispin <john@phrozen.org>
14 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 156 +++++++++++++++++----------
15 drivers/net/ethernet/mediatek/mtk_eth_soc.h | 15 ++-
16 2 files changed, 111 insertions(+), 60 deletions(-)
18 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
19 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
20 @@ -905,14 +905,13 @@ release_desc:
24 -static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
25 +static int mtk_poll_tx(struct mtk_eth *eth, int budget)
27 struct mtk_tx_ring *ring = ð->tx_ring;
28 struct mtk_tx_dma *desc;
30 struct mtk_tx_buf *tx_buf;
31 - int total = 0, done = 0;
32 - unsigned int bytes = 0;
33 + unsigned int bytes = 0, done = 0;
37 @@ -964,63 +963,82 @@ static int mtk_poll_tx(struct mtk_eth *e
38 netdev_completed_queue(eth->netdev[i], done, bytes);
41 - /* read hw index again make sure no new tx packet */
42 - if (cpu != dma || cpu != mtk_r32(eth, MTK_QTX_DRX_PTR))
45 - mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS);
50 if (mtk_queue_stopped(eth) &&
51 (atomic_read(&ring->free_count) > ring->thresh))
58 -static int mtk_poll(struct napi_struct *napi, int budget)
59 +static void mtk_handle_status_irq(struct mtk_eth *eth)
61 - struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
62 - u32 status, status2, mask;
63 - int tx_done, rx_done;
64 - bool tx_again = false;
66 - status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
67 - status2 = mtk_r32(eth, MTK_INT_STATUS2);
72 - if (status & MTK_TX_DONE_INT)
73 - tx_done = mtk_poll_tx(eth, budget, &tx_again);
75 - if (status & MTK_RX_DONE_INT)
76 - rx_done = mtk_poll_rx(napi, budget, eth);
77 + u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
79 if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
80 mtk_stats_update(eth);
81 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
86 +static int mtk_napi_tx(struct napi_struct *napi, int budget)
88 + struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
92 + mtk_handle_status_irq(eth);
93 + mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS);
94 + tx_done = mtk_poll_tx(eth, budget);
96 if (unlikely(netif_msg_intr(eth))) {
97 + status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
98 mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
99 - netdev_info(eth->netdev[0],
100 - "done tx %d, rx %d, intr 0x%08x/0x%x\n",
101 - tx_done, rx_done, status, mask);
103 + "done tx %d, intr 0x%08x/0x%x\n",
104 + tx_done, status, mask);
107 - if (tx_again || rx_done == budget)
108 + if (tx_done == budget)
111 status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
112 - if (status & (tx_intr | rx_intr))
113 + if (status & MTK_TX_DONE_INT)
117 - mtk_irq_enable(eth, MTK_RX_DONE_INT | MTK_RX_DONE_INT);
118 + mtk_irq_enable(eth, MTK_TX_DONE_INT);
123 +static int mtk_napi_rx(struct napi_struct *napi, int budget)
125 + struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
129 + mtk_handle_status_irq(eth);
130 + mtk_w32(eth, MTK_RX_DONE_INT, MTK_QMTK_INT_STATUS);
131 + rx_done = mtk_poll_rx(napi, budget, eth);
133 + if (unlikely(netif_msg_intr(eth))) {
134 + status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
135 + mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
137 + "done rx %d, intr 0x%08x/0x%x\n",
138 + rx_done, status, mask);
141 + if (rx_done == budget)
144 + status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
145 + if (status & MTK_RX_DONE_INT)
148 + napi_complete(napi);
149 + mtk_irq_enable(eth, MTK_RX_DONE_INT);
153 @@ -1256,22 +1274,26 @@ static void mtk_tx_timeout(struct net_de
154 schedule_work(ð->pending_work);
157 -static irqreturn_t mtk_handle_irq(int irq, void *_eth)
158 +static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
160 struct mtk_eth *eth = _eth;
163 - status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
164 - if (unlikely(!status))
166 + if (likely(napi_schedule_prep(ð->rx_napi))) {
167 + __napi_schedule(ð->rx_napi);
168 + mtk_irq_disable(eth, MTK_RX_DONE_INT);
171 - if (likely(status & (MTK_RX_DONE_INT | MTK_TX_DONE_INT))) {
172 - if (likely(napi_schedule_prep(ð->rx_napi)))
173 - __napi_schedule(ð->rx_napi);
175 - mtk_w32(eth, status, MTK_QMTK_INT_STATUS);
176 + return IRQ_HANDLED;
179 +static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
181 + struct mtk_eth *eth = _eth;
183 + if (likely(napi_schedule_prep(ð->tx_napi))) {
184 + __napi_schedule(ð->tx_napi);
185 + mtk_irq_disable(eth, MTK_TX_DONE_INT);
187 - mtk_irq_disable(eth, (MTK_RX_DONE_INT | MTK_TX_DONE_INT));
191 @@ -1284,7 +1306,7 @@ static void mtk_poll_controller(struct n
192 u32 int_mask = MTK_TX_DONE_INT | MTK_RX_DONE_INT;
194 mtk_irq_disable(eth, int_mask);
195 - mtk_handle_irq(dev->irq, dev);
196 + mtk_handle_irq(dev->irq[0], dev);
197 mtk_irq_enable(eth, int_mask);
200 @@ -1320,6 +1342,7 @@ static int mtk_open(struct net_device *d
204 + napi_enable(ð->tx_napi);
205 napi_enable(ð->rx_napi);
206 mtk_irq_enable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
208 @@ -1368,6 +1391,7 @@ static int mtk_stop(struct net_device *d
211 mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
212 + napi_disable(ð->tx_napi);
213 napi_disable(ð->rx_napi);
215 mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
216 @@ -1405,7 +1429,11 @@ static int __init mtk_hw_init(struct mtk
217 /* Enable RX VLan Offloading */
218 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
220 - err = devm_request_irq(eth->dev, eth->irq, mtk_handle_irq, 0,
221 + err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0,
222 + dev_name(eth->dev), eth);
225 + err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0,
226 dev_name(eth->dev), eth);
229 @@ -1421,7 +1449,11 @@ static int __init mtk_hw_init(struct mtk
230 mtk_w32(eth, 0, MTK_RST_GL);
232 /* FE int grouping */
233 - mtk_w32(eth, 0, MTK_FE_INT_GRP);
234 + mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
235 + mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2);
236 + mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
237 + mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
238 + mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
240 for (i = 0; i < 2; i++) {
241 u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
242 @@ -1469,7 +1501,9 @@ static void mtk_uninit(struct net_device
243 phy_disconnect(mac->phy_dev);
244 mtk_mdio_cleanup(eth);
245 mtk_irq_disable(eth, ~0);
246 - free_irq(dev->irq, dev);
247 + free_irq(eth->irq[0], dev);
248 + free_irq(eth->irq[1], dev);
249 + free_irq(eth->irq[2], dev);
252 static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
253 @@ -1744,10 +1778,10 @@ static int mtk_add_mac(struct mtk_eth *e
254 dev_err(eth->dev, "error bringing up device\n");
257 - eth->netdev[id]->irq = eth->irq;
258 + eth->netdev[id]->irq = eth->irq[0];
259 netif_info(eth, probe, eth->netdev[id],
260 "mediatek frame engine at 0x%08lx, irq %d\n",
261 - eth->netdev[id]->base_addr, eth->netdev[id]->irq);
262 + eth->netdev[id]->base_addr, eth->irq[0]);
266 @@ -1764,6 +1798,7 @@ static int mtk_probe(struct platform_dev
267 struct mtk_soc_data *soc;
272 match = of_match_device(of_mtk_match, &pdev->dev);
273 soc = (struct mtk_soc_data *)match->data;
274 @@ -1799,10 +1834,12 @@ static int mtk_probe(struct platform_dev
275 return PTR_ERR(eth->rstc);
278 - eth->irq = platform_get_irq(pdev, 0);
279 - if (eth->irq < 0) {
280 - dev_err(&pdev->dev, "no IRQ resource found\n");
282 + for (i = 0; i < 3; i++) {
283 + eth->irq[i] = platform_get_irq(pdev, i);
284 + if (eth->irq[i] < 0) {
285 + dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
290 eth->clk_ethif = devm_clk_get(&pdev->dev, "ethif");
291 @@ -1843,7 +1880,9 @@ static int mtk_probe(struct platform_dev
294 init_dummy_netdev(ð->dummy_dev);
295 - netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_poll,
296 + netif_napi_add(ð->dummy_dev, ð->tx_napi, mtk_napi_tx,
298 + netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_napi_rx,
301 platform_set_drvdata(pdev, eth);
302 @@ -1864,6 +1903,7 @@ static int mtk_remove(struct platform_de
303 clk_disable_unprepare(eth->clk_gp1);
304 clk_disable_unprepare(eth->clk_gp2);
306 + netif_napi_del(ð->tx_napi);
307 netif_napi_del(ð->rx_napi);
309 platform_set_drvdata(pdev, NULL);
310 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
311 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
313 /* Unicast Filter MAC Address Register - High */
314 #define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000))
316 +/* PDMA Interrupt grouping registers */
317 +#define MTK_PDMA_INT_GRP1 0xa50
318 +#define MTK_PDMA_INT_GRP2 0xa54
320 /* QDMA TX Queue Configuration Registers */
321 #define MTK_QTX_CFG(x) (0x1800 + (x * 0x10))
322 #define QDMA_RES_THRES 4
324 #define MTK_TX_DONE_INT (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \
325 MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3)
327 +/* QDMA Interrupt grouping registers */
328 +#define MTK_QDMA_INT_GRP1 0x1a20
329 +#define MTK_QDMA_INT_GRP2 0x1a24
330 +#define MTK_RLS_DONE_INT BIT(0)
332 /* QDMA Interrupt Status Register */
333 #define MTK_QDMA_INT_MASK 0x1A1C
335 @@ -356,7 +365,8 @@ struct mtk_rx_ring {
336 * @dma_refcnt: track how many netdevs are using the DMA engine
337 * @tx_ring: Pointer to the memore holding info about the TX ring
338 * @rx_ring: Pointer to the memore holding info about the RX ring
339 - * @rx_napi: The NAPI struct
340 + * @tx_napi: The TX NAPI struct
341 + * @rx_napi: The RX NAPI struct
342 * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring
343 * @phy_scratch_ring: physical address of scratch_ring
344 * @scratch_head: The scratch memory that scratch_ring points to.
345 @@ -377,7 +387,7 @@ struct mtk_eth {
346 struct net_device dummy_dev;
347 struct net_device *netdev[MTK_MAX_DEVS];
348 struct mtk_mac *mac[MTK_MAX_DEVS];
352 unsigned long sysclk;
353 struct regmap *ethsys;
354 @@ -385,6 +395,7 @@ struct mtk_eth {
356 struct mtk_tx_ring tx_ring;
357 struct mtk_rx_ring rx_ring;
358 + struct napi_struct tx_napi;
359 struct napi_struct rx_napi;
360 struct mtk_tx_dma *scratch_ring;
361 dma_addr_t phy_scratch_ring;