kernel: update kernel 4.4 to version 4.4.12
[openwrt/openwrt.git] / target / linux / mediatek / patches-4.4 / 0096-net-next-mediatek-add-support-for-IRQ-grouping.patch
1 From 190df1a9dbf4d8809b7f991194ce60e47f2290a2 Mon Sep 17 00:00:00 2001
2 From: John Crispin <john@phrozen.org>
3 Date: Wed, 23 Mar 2016 18:31:48 +0100
4 Subject: [PATCH 096/102] net-next: mediatek: add support for IRQ grouping
5
6 The ethernet core has 3 IRQs. using the IRQ grouping registers we are able
7 to separate TX and RX IRQs, which allows us to service them on separate
8 cores. This patch splits the irq handler into 2 separate functions, one for
9 TX and another for RX. The TX housekeeping is split out into its own NAPI
10 handler.
11
12 Signed-off-by: John Crispin <john@phrozen.org>
13 ---
14 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 156 +++++++++++++++++----------
15 drivers/net/ethernet/mediatek/mtk_eth_soc.h | 15 ++-
16 2 files changed, 111 insertions(+), 60 deletions(-)
17
18 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
19 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
20 @@ -905,14 +905,13 @@ release_desc:
21 return done;
22 }
23
24 -static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
25 +static int mtk_poll_tx(struct mtk_eth *eth, int budget)
26 {
27 struct mtk_tx_ring *ring = &eth->tx_ring;
28 struct mtk_tx_dma *desc;
29 struct sk_buff *skb;
30 struct mtk_tx_buf *tx_buf;
31 - int total = 0, done = 0;
32 - unsigned int bytes = 0;
33 + unsigned int bytes = 0, done = 0;
34 u32 cpu, dma;
35 static int condition;
36 int i;
37 @@ -964,63 +963,82 @@ static int mtk_poll_tx(struct mtk_eth *e
38 netdev_completed_queue(eth->netdev[i], done, bytes);
39 }
40
41 - /* read hw index again make sure no new tx packet */
42 - if (cpu != dma || cpu != mtk_r32(eth, MTK_QTX_DRX_PTR))
43 - *tx_again = true;
44 - else
45 - mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS);
46 -
47 - if (!total)
48 - return 0;
49 -
50 if (mtk_queue_stopped(eth) &&
51 (atomic_read(&ring->free_count) > ring->thresh))
52 mtk_wake_queue(eth);
53
54 - return total;
55 + return done;
56 }
57
58 -static int mtk_poll(struct napi_struct *napi, int budget)
59 +static void mtk_handle_status_irq(struct mtk_eth *eth)
60 {
61 - struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
62 - u32 status, status2, mask;
63 - int tx_done, rx_done;
64 - bool tx_again = false;
65 -
66 - status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
67 - status2 = mtk_r32(eth, MTK_INT_STATUS2);
68 - tx_done = 0;
69 - rx_done = 0;
70 - tx_again = 0;
71 -
72 - if (status & MTK_TX_DONE_INT)
73 - tx_done = mtk_poll_tx(eth, budget, &tx_again);
74 -
75 - if (status & MTK_RX_DONE_INT)
76 - rx_done = mtk_poll_rx(napi, budget, eth);
77 + u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
78
79 if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
80 mtk_stats_update(eth);
81 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
82 MTK_INT_STATUS2);
83 }
84 +}
85 +
86 +static int mtk_napi_tx(struct napi_struct *napi, int budget)
87 +{
88 + struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
89 + u32 status, mask;
90 + int tx_done = 0;
91 +
92 + mtk_handle_status_irq(eth);
93 + mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS);
94 + tx_done = mtk_poll_tx(eth, budget);
95
96 if (unlikely(netif_msg_intr(eth))) {
97 + status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
98 mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
99 - netdev_info(eth->netdev[0],
100 - "done tx %d, rx %d, intr 0x%08x/0x%x\n",
101 - tx_done, rx_done, status, mask);
102 + dev_info(eth->dev,
103 + "done tx %d, intr 0x%08x/0x%x\n",
104 + tx_done, status, mask);
105 }
106
107 - if (tx_again || rx_done == budget)
108 + if (tx_done == budget)
109 return budget;
110
111 status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
112 - if (status & (tx_intr | rx_intr))
113 + if (status & MTK_TX_DONE_INT)
114 return budget;
115
116 napi_complete(napi);
117 - mtk_irq_enable(eth, MTK_RX_DONE_INT | MTK_RX_DONE_INT);
118 + mtk_irq_enable(eth, MTK_TX_DONE_INT);
119 +
120 + return tx_done;
121 +}
122 +
123 +static int mtk_napi_rx(struct napi_struct *napi, int budget)
124 +{
125 + struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
126 + u32 status, mask;
127 + int rx_done = 0;
128 +
129 + mtk_handle_status_irq(eth);
130 + mtk_w32(eth, MTK_RX_DONE_INT, MTK_QMTK_INT_STATUS);
131 + rx_done = mtk_poll_rx(napi, budget, eth);
132 +
133 + if (unlikely(netif_msg_intr(eth))) {
134 + status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
135 + mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
136 + dev_info(eth->dev,
137 + "done rx %d, intr 0x%08x/0x%x\n",
138 + rx_done, status, mask);
139 + }
140 +
141 + if (rx_done == budget)
142 + return budget;
143 +
144 + status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
145 + if (status & MTK_RX_DONE_INT)
146 + return budget;
147 +
148 + napi_complete(napi);
149 + mtk_irq_enable(eth, MTK_RX_DONE_INT);
150
151 return rx_done;
152 }
153 @@ -1256,22 +1274,26 @@ static void mtk_tx_timeout(struct net_de
154 schedule_work(&eth->pending_work);
155 }
156
157 -static irqreturn_t mtk_handle_irq(int irq, void *_eth)
158 +static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
159 {
160 struct mtk_eth *eth = _eth;
161 - u32 status;
162
163 - status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
164 - if (unlikely(!status))
165 - return IRQ_NONE;
166 + if (likely(napi_schedule_prep(&eth->rx_napi))) {
167 + __napi_schedule(&eth->rx_napi);
168 + mtk_irq_disable(eth, MTK_RX_DONE_INT);
169 + }
170
171 - if (likely(status & (MTK_RX_DONE_INT | MTK_TX_DONE_INT))) {
172 - if (likely(napi_schedule_prep(&eth->rx_napi)))
173 - __napi_schedule(&eth->rx_napi);
174 - } else {
175 - mtk_w32(eth, status, MTK_QMTK_INT_STATUS);
176 + return IRQ_HANDLED;
177 +}
178 +
179 +static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
180 +{
181 + struct mtk_eth *eth = _eth;
182 +
183 + if (likely(napi_schedule_prep(&eth->tx_napi))) {
184 + __napi_schedule(&eth->tx_napi);
185 + mtk_irq_disable(eth, MTK_TX_DONE_INT);
186 }
187 - mtk_irq_disable(eth, (MTK_RX_DONE_INT | MTK_TX_DONE_INT));
188
189 return IRQ_HANDLED;
190 }
191 @@ -1284,7 +1306,7 @@ static void mtk_poll_controller(struct n
192 u32 int_mask = MTK_TX_DONE_INT | MTK_RX_DONE_INT;
193
194 mtk_irq_disable(eth, int_mask);
195 - mtk_handle_irq(dev->irq, dev);
196 + mtk_handle_irq(dev->irq[0], dev);
197 mtk_irq_enable(eth, int_mask);
198 }
199 #endif
200 @@ -1320,6 +1342,7 @@ static int mtk_open(struct net_device *d
201 if (err)
202 return err;
203
204 + napi_enable(&eth->tx_napi);
205 napi_enable(&eth->rx_napi);
206 mtk_irq_enable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
207 }
208 @@ -1368,6 +1391,7 @@ static int mtk_stop(struct net_device *d
209 return 0;
210
211 mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
212 + napi_disable(&eth->tx_napi);
213 napi_disable(&eth->rx_napi);
214
215 mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
216 @@ -1405,7 +1429,11 @@ static int __init mtk_hw_init(struct mtk
217 /* Enable RX VLan Offloading */
218 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
219
220 - err = devm_request_irq(eth->dev, eth->irq, mtk_handle_irq, 0,
221 + err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0,
222 + dev_name(eth->dev), eth);
223 + if (err)
224 + return err;
225 + err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0,
226 dev_name(eth->dev), eth);
227 if (err)
228 return err;
229 @@ -1421,7 +1449,11 @@ static int __init mtk_hw_init(struct mtk
230 mtk_w32(eth, 0, MTK_RST_GL);
231
232 /* FE int grouping */
233 - mtk_w32(eth, 0, MTK_FE_INT_GRP);
234 + mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
235 + mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2);
236 + mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
237 + mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
238 + mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
239
240 for (i = 0; i < 2; i++) {
241 u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
242 @@ -1469,7 +1501,9 @@ static void mtk_uninit(struct net_device
243 phy_disconnect(mac->phy_dev);
244 mtk_mdio_cleanup(eth);
245 mtk_irq_disable(eth, ~0);
246 - free_irq(dev->irq, dev);
247 + free_irq(eth->irq[0], dev);
248 + free_irq(eth->irq[1], dev);
249 + free_irq(eth->irq[2], dev);
250 }
251
252 static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
253 @@ -1744,10 +1778,10 @@ static int mtk_add_mac(struct mtk_eth *e
254 dev_err(eth->dev, "error bringing up device\n");
255 goto free_netdev;
256 }
257 - eth->netdev[id]->irq = eth->irq;
258 + eth->netdev[id]->irq = eth->irq[0];
259 netif_info(eth, probe, eth->netdev[id],
260 "mediatek frame engine at 0x%08lx, irq %d\n",
261 - eth->netdev[id]->base_addr, eth->netdev[id]->irq);
262 + eth->netdev[id]->base_addr, eth->irq[0]);
263
264 return 0;
265
266 @@ -1764,6 +1798,7 @@ static int mtk_probe(struct platform_dev
267 struct mtk_soc_data *soc;
268 struct mtk_eth *eth;
269 int err;
270 + int i;
271
272 match = of_match_device(of_mtk_match, &pdev->dev);
273 soc = (struct mtk_soc_data *)match->data;
274 @@ -1799,10 +1834,12 @@ static int mtk_probe(struct platform_dev
275 return PTR_ERR(eth->rstc);
276 }
277
278 - eth->irq = platform_get_irq(pdev, 0);
279 - if (eth->irq < 0) {
280 - dev_err(&pdev->dev, "no IRQ resource found\n");
281 - return -ENXIO;
282 + for (i = 0; i < 3; i++) {
283 + eth->irq[i] = platform_get_irq(pdev, i);
284 + if (eth->irq[i] < 0) {
285 + dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
286 + return -ENXIO;
287 + }
288 }
289
290 eth->clk_ethif = devm_clk_get(&pdev->dev, "ethif");
291 @@ -1843,7 +1880,9 @@ static int mtk_probe(struct platform_dev
292 * for NAPI to work
293 */
294 init_dummy_netdev(&eth->dummy_dev);
295 - netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_poll,
296 + netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx,
297 + MTK_NAPI_WEIGHT);
298 + netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx,
299 MTK_NAPI_WEIGHT);
300
301 platform_set_drvdata(pdev, eth);
302 @@ -1864,6 +1903,7 @@ static int mtk_remove(struct platform_de
303 clk_disable_unprepare(eth->clk_gp1);
304 clk_disable_unprepare(eth->clk_gp2);
305
306 + netif_napi_del(&eth->tx_napi);
307 netif_napi_del(&eth->rx_napi);
308 mtk_cleanup(eth);
309 platform_set_drvdata(pdev, NULL);
310 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
311 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
312 @@ -68,6 +68,10 @@
313 /* Unicast Filter MAC Address Register - High */
314 #define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000))
315
316 +/* PDMA Interrupt grouping registers */
317 +#define MTK_PDMA_INT_GRP1 0xa50
318 +#define MTK_PDMA_INT_GRP2 0xa54
319 +
320 /* QDMA TX Queue Configuration Registers */
321 #define MTK_QTX_CFG(x) (0x1800 + (x * 0x10))
322 #define QDMA_RES_THRES 4
323 @@ -125,6 +129,11 @@
324 #define MTK_TX_DONE_INT (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \
325 MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3)
326
327 +/* QDMA Interrupt grouping registers */
328 +#define MTK_QDMA_INT_GRP1 0x1a20
329 +#define MTK_QDMA_INT_GRP2 0x1a24
330 +#define MTK_RLS_DONE_INT BIT(0)
331 +
332 /* QDMA Interrupt Status Register */
333 #define MTK_QDMA_INT_MASK 0x1A1C
334
335 @@ -356,7 +365,8 @@ struct mtk_rx_ring {
336 * @dma_refcnt: track how many netdevs are using the DMA engine
337 * @tx_ring: Pointer to the memore holding info about the TX ring
338 * @rx_ring: Pointer to the memore holding info about the RX ring
339 - * @rx_napi: The NAPI struct
340 + * @tx_napi: The TX NAPI struct
341 + * @rx_napi: The RX NAPI struct
342 * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring
343 * @phy_scratch_ring: physical address of scratch_ring
344 * @scratch_head: The scratch memory that scratch_ring points to.
345 @@ -377,7 +387,7 @@ struct mtk_eth {
346 struct net_device dummy_dev;
347 struct net_device *netdev[MTK_MAX_DEVS];
348 struct mtk_mac *mac[MTK_MAX_DEVS];
349 - int irq;
350 + int irq[3];
351 u32 msg_enable;
352 unsigned long sysclk;
353 struct regmap *ethsys;
354 @@ -385,6 +395,7 @@ struct mtk_eth {
355 atomic_t dma_refcnt;
356 struct mtk_tx_ring tx_ring;
357 struct mtk_rx_ring rx_ring;
358 + struct napi_struct tx_napi;
359 struct napi_struct rx_napi;
360 struct mtk_tx_dma *scratch_ring;
361 dma_addr_t phy_scratch_ring;