vrx518_tc: fix compilation error with kernel 6.1
[openwrt/openwrt.git] / package / kernel / lantiq / vrx518_tc / patches / 202-napi.patch
1 --- a/dcdp/platform/sw_plat.c
2 +++ b/dcdp/platform/sw_plat.c
3 @@ -208,6 +208,8 @@ struct plat_priv {
4 struct tc_req req_work;
5 struct aca_ring_grp soc_rings;
6 struct net_device *netdev;
7 + struct napi_struct *napi_tx;
8 + struct napi_struct *napi_rx;
9 DECLARE_HASHTABLE(mem_map, 8);
10 };
11
12 @@ -472,7 +474,7 @@ err2:
13 return -1;
14 }
15
16 -static void txout_action(struct tc_priv *priv, struct aca_ring *txout)
17 +static int txout_action(struct tc_priv *priv, struct aca_ring *txout, int budget)
18 {
19 struct aca_ring *txin = &g_plat_priv->soc_rings.txin;
20 struct tx_list *txlist = &g_plat_priv->soc_rings.txlist;
21 @@ -490,7 +492,10 @@ static void txout_action(struct tc_priv
22 spin_lock_irqsave(&tx_spinlock, flags);
23 }
24
25 - for (i = 0; i < txout->dnum; i++) {
26 + if (budget == 0 || budget > txout->dnum)
27 + budget = txout->dnum;
28 +
29 + for (i = 0; i < budget; i++) {
30 desc = txout->dbase_mem;
31 desc += txout->idx;
32
33 @@ -540,6 +545,8 @@ static void txout_action(struct tc_priv
34 if (cnt && g_plat_priv->netdev && netif_queue_stopped(g_plat_priv->netdev)) {
35 netif_wake_queue(g_plat_priv->netdev);
36 }
37 +
38 + return cnt;
39 }
40
41 static void rxin_action(struct tc_priv *priv,
42 @@ -549,7 +556,7 @@ static void rxin_action(struct tc_priv *
43 writel(cnt, rxin->umt_dst);
44 }
45
46 -static int rxout_action(struct tc_priv *priv, struct aca_ring *rxout)
47 +static int rxout_action(struct tc_priv *priv, struct aca_ring *rxout, int budget)
48 {
49 struct device *pdev = priv->ep_dev[0].dev;
50 int i, cnt;
51 @@ -559,8 +566,11 @@ static int rxout_action(struct tc_priv *
52 size_t len;
53 struct sk_buff *skb;
54
55 + if (budget == 0 || budget > rxout->dnum)
56 + budget = rxout->dnum;
57 +
58 cnt = 0;
59 - for (i = 0; i < rxout->dnum; i++) {
60 + for (i = 0; i < budget; i++) {
61 desc = rxout->dbase_mem;
62 desc += rxout->idx;
63
64 @@ -593,14 +603,30 @@ static int rxout_action(struct tc_priv *
65 ring_idx_inc(rxout);
66 }
67
68 - if (!cnt)
69 - tc_err(priv, MSG_RX, "RXOUT spurious interrupt\n");
70 - else
71 + if (cnt)
72 writel(cnt, rxout->umt_dst+0x28); // RXOUT_HD_ACCUM_SUB instead of RXOUT_HD_ACCUM_ADD
73
74 return cnt;
75 }
76
77 +static int plat_txout_napi(struct napi_struct *napi, int budget)
78 +{
79 + struct plat_priv *priv = g_plat_priv;
80 + struct tc_priv *tcpriv = plat_to_tcpriv();
81 + struct aca_ring *txout = &priv->soc_rings.txout;
82 + struct dc_ep_dev *ep_dev = &tcpriv->ep_dev[txout->ep_dev_idx];
83 + int cnt;
84 +
85 + cnt = txout_action(tcpriv, txout, budget);
86 +
87 + if (cnt < budget) {
88 + if (napi_complete_done(napi, cnt))
89 + ep_dev->hw_ops->icu_en(ep_dev, ACA_HOSTIF_TX);
90 + }
91 +
92 + return cnt;
93 +}
94 +
95 static void plat_txout_tasklet(unsigned long arg)
96 {
97 struct plat_priv *priv = g_plat_priv;
98 @@ -608,12 +634,33 @@ static void plat_txout_tasklet(unsigned
99 struct aca_ring *txout = &priv->soc_rings.txout;
100 struct dc_ep_dev *ep_dev = &tcpriv->ep_dev[txout->ep_dev_idx];
101
102 - txout_action(tcpriv, txout);
103 + txout_action(tcpriv, txout, 0);
104
105 /* Enable interrupt */
106 ep_dev->hw_ops->icu_en(ep_dev, ACA_HOSTIF_TX);
107 }
108
109 +static int plat_rxout_napi(struct napi_struct *napi, int budget)
110 +{
111 + struct plat_priv *priv = g_plat_priv;
112 + struct tc_priv *tcpriv = plat_to_tcpriv();
113 + struct aca_ring *rxout = &priv->soc_rings.rxout;
114 + struct aca_ring *rxin = &priv->soc_rings.rxin;
115 + struct dc_ep_dev *ep_dev = &tcpriv->ep_dev[rxout->ep_dev_idx];
116 + int cnt;
117 +
118 + cnt = rxout_action(tcpriv, rxout, budget);
119 + if (cnt)
120 + rxin_action(tcpriv, rxin, DMA_PACKET_SZ, cnt);
121 +
122 + if (cnt < budget) {
123 + if (napi_complete_done(napi, cnt))
124 + ep_dev->hw_ops->icu_en(ep_dev, ACA_HOSTIF_RX);
125 + }
126 +
127 + return cnt;
128 +}
129 +
130 static void plat_rxout_tasklet(unsigned long arg)
131 {
132 struct plat_priv *priv = g_plat_priv;
133 @@ -623,7 +670,7 @@ static void plat_rxout_tasklet(unsigned
134 struct dc_ep_dev *ep_dev = &tcpriv->ep_dev[rxout->ep_dev_idx];
135 int cnt;
136
137 - cnt = rxout_action(tcpriv, rxout);
138 + cnt = rxout_action(tcpriv, rxout, 0);
139 if (cnt)
140 rxin_action(tcpriv, rxin, DMA_PACKET_SZ, cnt);
141
142 @@ -783,11 +830,22 @@ static irqreturn_t aca_rx_irq_handler(in
143 {
144 struct dc_ep_dev *ep_dev = dev_id;
145
146 - /* Disable IRQ in IMCU */
147 - ep_dev->hw_ops->icu_mask(ep_dev, ACA_HOSTIF_RX);
148 + if (g_plat_priv->napi_rx) {
149 +
150 + if (napi_schedule_prep(g_plat_priv->napi_rx)) {
151 + ep_dev->hw_ops->icu_mask(ep_dev, ACA_HOSTIF_RX);
152 + __napi_schedule(g_plat_priv->napi_rx);
153 + }
154 +
155 + } else {
156 +
157 + /* Disable IRQ in IMCU */
158 + ep_dev->hw_ops->icu_mask(ep_dev, ACA_HOSTIF_RX);
159
160 - /* Start tasklet */
161 - tasklet_schedule(&rxout_task);
162 + /* Start tasklet */
163 + tasklet_schedule(&rxout_task);
164 +
165 + }
166
167 return IRQ_HANDLED;
168 }
169 @@ -796,15 +854,62 @@ static irqreturn_t aca_tx_irq_handler(in
170 {
171 struct dc_ep_dev *ep_dev = dev_id;
172
173 - /* Disable IRQ in IMCU */
174 - ep_dev->hw_ops->icu_mask(ep_dev, ACA_HOSTIF_TX);
175 + if (g_plat_priv->napi_tx) {
176
177 - /* Start tasklet */
178 - tasklet_schedule(&txout_task);
179 + if (napi_schedule_prep(g_plat_priv->napi_tx)) {
180 + ep_dev->hw_ops->icu_mask(ep_dev, ACA_HOSTIF_TX);
181 + __napi_schedule(g_plat_priv->napi_tx);
182 + }
183 +
184 + } else {
185 +
186 + /* Disable IRQ in IMCU */
187 + ep_dev->hw_ops->icu_mask(ep_dev, ACA_HOSTIF_TX);
188 +
189 + /* Start tasklet */
190 + tasklet_schedule(&txout_task);
191 +
192 + }
193
194 return IRQ_HANDLED;
195 }
196
197 +static void plat_net_open(void)
198 +{
199 + struct plat_priv *priv = g_plat_priv;
200 + struct tc_priv *tcpriv = plat_to_tcpriv();
201 + struct aca_ring *rxout = &priv->soc_rings.rxout;
202 + struct aca_ring *txout = &priv->soc_rings.txout;
203 + struct dc_ep_dev *ep_dev_rx = &tcpriv->ep_dev[rxout->ep_dev_idx];
204 + struct dc_ep_dev *ep_dev_tx = &tcpriv->ep_dev[txout->ep_dev_idx];
205 +
206 + if (priv->napi_rx)
207 + napi_enable(priv->napi_rx);
208 + ep_dev_rx->hw_ops->icu_en(ep_dev_rx, ACA_HOSTIF_RX);
209 +
210 + if (priv->napi_tx)
211 + napi_enable(priv->napi_tx);
212 + ep_dev_tx->hw_ops->icu_en(ep_dev_tx, ACA_HOSTIF_TX);
213 +}
214 +
215 +static void plat_net_stop(void)
216 +{
217 + struct plat_priv *priv = g_plat_priv;
218 + struct tc_priv *tcpriv = plat_to_tcpriv();
219 + struct aca_ring *rxout = &priv->soc_rings.rxout;
220 + struct aca_ring *txout = &priv->soc_rings.txout;
221 + struct dc_ep_dev *ep_dev_rx = &tcpriv->ep_dev[rxout->ep_dev_idx];
222 + struct dc_ep_dev *ep_dev_tx = &tcpriv->ep_dev[txout->ep_dev_idx];
223 +
224 + if (priv->napi_tx)
225 + napi_disable(priv->napi_tx);
226 + ep_dev_tx->hw_ops->icu_mask(ep_dev_tx, ACA_HOSTIF_TX);
227 +
228 + if (priv->napi_rx)
229 + napi_disable(priv->napi_rx);
230 + ep_dev_rx->hw_ops->icu_mask(ep_dev_rx, ACA_HOSTIF_RX);
231 +}
232 +
233 static void plat_irq_init(struct tc_priv *priv, const char *dev_name)
234 {
235 int ret;
236 @@ -988,17 +1093,49 @@ static int plat_soc_cfg_get(struct soc_c
237 }
238
239 static int plat_open(struct net_device *pdev, const char *dev_name,
240 + struct napi_struct *napi_tx, struct napi_struct *napi_rx,
241 int id, int flag)
242 {
243 + struct tc_priv *priv = g_plat_priv->tc_priv;
244 + int i;
245 +
246 + for (i = 0; i < EP_MAX_NUM && i < priv->ep_num; i++) {
247 + disable_irq(priv->ep_dev[i].aca_rx_irq);
248 + disable_irq(priv->ep_dev[i].aca_tx_irq);
249 + }
250 +
251 g_plat_priv->netdev = pdev;
252 + g_plat_priv->napi_tx = napi_tx;
253 + g_plat_priv->napi_rx = napi_rx;
254 +
255 + for (i = 0; i < EP_MAX_NUM && i < priv->ep_num; i++) {
256 + enable_irq(priv->ep_dev[i].aca_rx_irq);
257 + enable_irq(priv->ep_dev[i].aca_tx_irq);
258 + }
259
260 return 0;
261 }
262
263 static void plat_close(struct net_device *pdev, const char *dev_name,
264 + struct napi_struct *napi_tx, struct napi_struct *napi_rx,
265 int flag)
266 {
267 + struct tc_priv *priv = g_plat_priv->tc_priv;
268 + int i;
269 +
270 + for (i = 0; i < EP_MAX_NUM && i < priv->ep_num; i++) {
271 + disable_irq(priv->ep_dev[i].aca_rx_irq);
272 + disable_irq(priv->ep_dev[i].aca_tx_irq);
273 + }
274 +
275 g_plat_priv->netdev = NULL;
276 + g_plat_priv->napi_tx = NULL;
277 + g_plat_priv->napi_rx = NULL;
278 +
279 + for (i = 0; i < EP_MAX_NUM && i < priv->ep_num; i++) {
280 + enable_irq(priv->ep_dev[i].aca_rx_irq);
281 + enable_irq(priv->ep_dev[i].aca_tx_irq);
282 + }
283
284 return;
285 }
286 @@ -1084,6 +1221,10 @@ static void plat_tc_ops_setup(struct tc_
287 priv->tc_ops.free = plat_mem_free;
288 priv->tc_ops.dev_reg = plat_open;
289 priv->tc_ops.dev_unreg = plat_close;
290 + priv->tc_ops.net_open = plat_net_open;
291 + priv->tc_ops.net_stop = plat_net_stop;
292 + priv->tc_ops.napi_tx = plat_txout_napi;
293 + priv->tc_ops.napi_rx = plat_rxout_napi;
294 priv->tc_ops.umt_init = plat_umt_init;
295 priv->tc_ops.umt_exit = plat_umt_exit;
296 priv->tc_ops.umt_start = plat_umt_start;
297 --- a/dcdp/atm_tc.c
298 +++ b/dcdp/atm_tc.c
299 @@ -3650,7 +3650,7 @@ static void atm_aca_ring_config_init(str
300 static int atm_ring_init(struct atm_priv *priv)
301 {
302 atm_aca_ring_config_init(priv);
303 - return priv->tc_priv->tc_ops.dev_reg(NULL, g_atm_dev_name, 0, 0);
304 + return priv->tc_priv->tc_ops.dev_reg(NULL, g_atm_dev_name, NULL, NULL, 0, 0);
305 }
306
307 static int atm_init(struct tc_priv *tcpriv, u32 ep_id)
308 @@ -4020,7 +4020,7 @@ void atm_tc_unload(void)
309 /* unregister device */
310 if (priv->tc_priv->tc_ops.dev_unreg != NULL)
311 priv->tc_priv->tc_ops.dev_unreg(NULL,
312 - g_atm_dev_name, 0);
313 + g_atm_dev_name, NULL, NULL, 0);
314
315 /* atm_dev_deinit(priv); */
316 /* modem module power off */
317 --- a/dcdp/inc/tc_main.h
318 +++ b/dcdp/inc/tc_main.h
319 @@ -209,9 +209,15 @@ struct tc_hw_ops {
320 void (*subif_unreg)(struct net_device *pdev, const char *dev_name,
321 int subif_id, int flag);
322 int (*dev_reg)(struct net_device *pdev, const char *dev_name,
323 + struct napi_struct *napi_tx, struct napi_struct *napi_rx,
324 int id, int flag);
325 void (*dev_unreg)(struct net_device *pdev, const char *dev_name,
326 + struct napi_struct *napi_tx, struct napi_struct *napi_rx,
327 int flag);
328 + void (*net_open)(void);
329 + void (*net_stop)(void);
330 + int (*napi_tx)(struct napi_struct *napi, int budget);
331 + int (*napi_rx)(struct napi_struct *napi, int budget);
332
333 /*umt init/exit including the corresponding DMA init/exit */
334 int (*umt_init)(u32 umt_id, u32 umt_period, u32 umt_dst);
335 --- a/dcdp/ptm_tc.c
336 +++ b/dcdp/ptm_tc.c
337 @@ -146,7 +146,11 @@ static int ptm_open(struct net_device *d
338 struct ptm_priv *ptm_tc = netdev_priv(dev);
339
340 tc_info(ptm_tc->tc_priv, MSG_EVENT, "ptm open\n");
341 +
342 + ptm_tc->tc_priv->tc_ops.net_open();
343 +
344 netif_tx_start_all_queues(dev);
345 +
346 #ifdef CONFIG_SOC_TYPE_XWAY
347 xet_phy_wan_port(7, NULL, 1, 1);
348 if (ppa_hook_ppa_phys_port_add_fn)
349 @@ -163,7 +167,11 @@ static int ptm_stop(struct net_device *d
350 struct ptm_priv *ptm_tc = netdev_priv(dev);
351
352 tc_info(ptm_tc->tc_priv, MSG_EVENT, "ptm stop\n");
353 +
354 netif_tx_stop_all_queues(dev);
355 +
356 + ptm_tc->tc_priv->tc_ops.net_stop();
357 +
358 #ifdef CONFIG_SOC_TYPE_XWAY
359 if (ppa_drv_datapath_mac_entry_setting)
360 ppa_drv_datapath_mac_entry_setting(dev->dev_addr, 0, 6, 10, 1, 2);
361 @@ -564,7 +572,7 @@ static void ptm_rx(struct net_device *de
362 ptm_tc->stats64.rx_packets++;
363 ptm_tc->stats64.rx_bytes += skb->len;
364
365 - if (netif_rx(skb) == NET_RX_DROP)
366 + if (netif_receive_skb(skb) == NET_RX_DROP)
367 ptm_tc->stats64.rx_dropped++;
368
369 return;
370 @@ -664,6 +672,14 @@ static int ptm_dev_init(struct tc_priv *
371 memcpy(ptm_tc->outq_map, def_outq_map, sizeof(def_outq_map));
372 SET_NETDEV_DEV(ptm_tc->dev, tc_priv->ep_dev[id].dev);
373
374 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,17,0))
375 + netif_napi_add(ptm_tc->dev, &ptm_tc->napi_rx, tc_priv->tc_ops.napi_rx);
376 + netif_napi_add_tx(ptm_tc->dev, &ptm_tc->napi_tx, tc_priv->tc_ops.napi_tx);
377 +#else
378 + netif_napi_add(ptm_tc->dev, &ptm_tc->napi_rx, tc_priv->tc_ops.napi_rx, NAPI_POLL_WEIGHT);
379 + netif_tx_napi_add(ptm_tc->dev, &ptm_tc->napi_tx, tc_priv->tc_ops.napi_tx, NAPI_POLL_WEIGHT);
380 +
381 +#endif
382 err = register_netdev(ptm_tc->dev);
383 if (err)
384 goto err1;
385 @@ -2618,7 +2634,9 @@ static int ptm_ring_init(struct ptm_ep_p
386 {
387 ptm_aca_ring_config_init(priv, id, bonding);
388 return priv->tc_priv->tc_ops.dev_reg(priv->ptm_tc->dev,
389 - priv->ptm_tc->dev->name, id, bonding);
390 + priv->ptm_tc->dev->name,
391 + &priv->ptm_tc->napi_tx, &priv->ptm_tc->napi_rx,
392 + id, bonding);
393 }
394
395 /**
396 @@ -2973,7 +2991,9 @@ void ptm_tc_unload(enum dsl_tc_mode tc_m
397 /* unregister device */
398 if (ptm_tc->tc_priv->tc_ops.dev_unreg != NULL)
399 ptm_tc->tc_priv->tc_ops.dev_unreg(ptm_tc->dev,
400 - ptm_tc->dev->name, 0);
401 + ptm_tc->dev->name,
402 + &priv->ptm_tc->napi_tx, &priv->ptm_tc->napi_rx,
403 + 0);
404
405 /* remove PTM callback function */
406 ptm_cb_setup(ptm_tc, 0);
407 @@ -2991,6 +3011,10 @@ void ptm_exit(void)
408
409 if (!priv)
410 return;
411 +
412 + netif_napi_del(&priv->napi_tx);
413 + netif_napi_del(&priv->napi_rx);
414 +
415 unregister_netdev(priv->dev);
416 free_netdev(priv->dev);
417
418 --- a/dcdp/inc/ptm_tc.h
419 +++ b/dcdp/inc/ptm_tc.h
420 @@ -119,6 +119,8 @@ struct ptm_priv {
421 u32 ep_id;
422 struct ppe_fw fw;
423 struct net_device *dev;
424 + struct napi_struct napi_tx;
425 + struct napi_struct napi_rx;
426 spinlock_t ptm_lock;
427 struct rtnl_link_stats64 stats64;
428 int subif_id;