1 --- a/dcdp/platform/sw_plat.c
2 +++ b/dcdp/platform/sw_plat.c
3 @@ -208,6 +208,8 @@ struct plat_priv {
4 struct tc_req req_work;
5 struct aca_ring_grp soc_rings;
6 struct net_device *netdev;
7 + struct napi_struct *napi_tx;
8 + struct napi_struct *napi_rx;
9 DECLARE_HASHTABLE(mem_map, 8);
12 @@ -472,7 +474,7 @@ err2:
16 -static void txout_action(struct tc_priv *priv, struct aca_ring *txout)
17 +static int txout_action(struct tc_priv *priv, struct aca_ring *txout, int budget)
19 struct aca_ring *txin = &g_plat_priv->soc_rings.txin;
20 struct tx_list *txlist = &g_plat_priv->soc_rings.txlist;
21 @@ -490,7 +492,10 @@ static void txout_action(struct tc_priv
22 spin_lock_irqsave(&tx_spinlock, flags);
25 - for (i = 0; i < txout->dnum; i++) {
26 + if (budget == 0 || budget > txout->dnum)
27 + budget = txout->dnum;
29 + for (i = 0; i < budget; i++) {
30 desc = txout->dbase_mem;
33 @@ -540,6 +545,8 @@ static void txout_action(struct tc_priv
34 if (cnt && g_plat_priv->netdev && netif_queue_stopped(g_plat_priv->netdev)) {
35 netif_wake_queue(g_plat_priv->netdev);
41 static void rxin_action(struct tc_priv *priv,
42 @@ -549,7 +556,7 @@ static void rxin_action(struct tc_priv *
43 writel(cnt, rxin->umt_dst);
46 -static int rxout_action(struct tc_priv *priv, struct aca_ring *rxout)
47 +static int rxout_action(struct tc_priv *priv, struct aca_ring *rxout, int budget)
49 struct device *pdev = priv->ep_dev[0].dev;
51 @@ -559,8 +566,11 @@ static int rxout_action(struct tc_priv *
55 + if (budget == 0 || budget > rxout->dnum)
56 + budget = rxout->dnum;
59 - for (i = 0; i < rxout->dnum; i++) {
60 + for (i = 0; i < budget; i++) {
61 desc = rxout->dbase_mem;
64 @@ -593,14 +603,30 @@ static int rxout_action(struct tc_priv *
69 - tc_err(priv, MSG_RX, "RXOUT spurious interrupt\n");
72 writel(cnt, rxout->umt_dst+0x28); // RXOUT_HD_ACCUM_SUB instead of RXOUT_HD_ACCUM_ADD
77 +static int plat_txout_napi(struct napi_struct *napi, int budget)
79 + struct plat_priv *priv = g_plat_priv;
80 + struct tc_priv *tcpriv = plat_to_tcpriv();
81 + struct aca_ring *txout = &priv->soc_rings.txout;
82 + struct dc_ep_dev *ep_dev = &tcpriv->ep_dev[txout->ep_dev_idx];
85 + cnt = txout_action(tcpriv, txout, budget);
88 + if (napi_complete_done(napi, cnt))
89 + ep_dev->hw_ops->icu_en(ep_dev, ACA_HOSTIF_TX);
95 static void plat_txout_tasklet(unsigned long arg)
97 struct plat_priv *priv = g_plat_priv;
98 @@ -608,12 +634,33 @@ static void plat_txout_tasklet(unsigned
99 struct aca_ring *txout = &priv->soc_rings.txout;
100 struct dc_ep_dev *ep_dev = &tcpriv->ep_dev[txout->ep_dev_idx];
102 - txout_action(tcpriv, txout);
103 + txout_action(tcpriv, txout, 0);
105 /* Enable interrupt */
106 ep_dev->hw_ops->icu_en(ep_dev, ACA_HOSTIF_TX);
109 +static int plat_rxout_napi(struct napi_struct *napi, int budget)
111 + struct plat_priv *priv = g_plat_priv;
112 + struct tc_priv *tcpriv = plat_to_tcpriv();
113 + struct aca_ring *rxout = &priv->soc_rings.rxout;
114 + struct aca_ring *rxin = &priv->soc_rings.rxin;
115 + struct dc_ep_dev *ep_dev = &tcpriv->ep_dev[rxout->ep_dev_idx];
118 + cnt = rxout_action(tcpriv, rxout, budget);
120 + rxin_action(tcpriv, rxin, DMA_PACKET_SZ, cnt);
122 + if (cnt < budget) {
123 + if (napi_complete_done(napi, cnt))
124 + ep_dev->hw_ops->icu_en(ep_dev, ACA_HOSTIF_RX);
130 static void plat_rxout_tasklet(unsigned long arg)
132 struct plat_priv *priv = g_plat_priv;
133 @@ -623,7 +670,7 @@ static void plat_rxout_tasklet(unsigned
134 struct dc_ep_dev *ep_dev = &tcpriv->ep_dev[rxout->ep_dev_idx];
137 - cnt = rxout_action(tcpriv, rxout);
138 + cnt = rxout_action(tcpriv, rxout, 0);
140 rxin_action(tcpriv, rxin, DMA_PACKET_SZ, cnt);
142 @@ -783,11 +830,22 @@ static irqreturn_t aca_rx_irq_handler(in
144 struct dc_ep_dev *ep_dev = dev_id;
146 - /* Disable IRQ in IMCU */
147 - ep_dev->hw_ops->icu_mask(ep_dev, ACA_HOSTIF_RX);
148 + if (g_plat_priv->napi_rx) {
150 + if (napi_schedule_prep(g_plat_priv->napi_rx)) {
151 + ep_dev->hw_ops->icu_mask(ep_dev, ACA_HOSTIF_RX);
152 + __napi_schedule(g_plat_priv->napi_rx);
157 + /* Disable IRQ in IMCU */
158 + ep_dev->hw_ops->icu_mask(ep_dev, ACA_HOSTIF_RX);
160 - /* Start tasklet */
161 - tasklet_schedule(&rxout_task);
162 + /* Start tasklet */
163 + tasklet_schedule(&rxout_task);
169 @@ -796,15 +854,62 @@ static irqreturn_t aca_tx_irq_handler(in
171 struct dc_ep_dev *ep_dev = dev_id;
173 - /* Disable IRQ in IMCU */
174 - ep_dev->hw_ops->icu_mask(ep_dev, ACA_HOSTIF_TX);
175 + if (g_plat_priv->napi_tx) {
177 - /* Start tasklet */
178 - tasklet_schedule(&txout_task);
179 + if (napi_schedule_prep(g_plat_priv->napi_tx)) {
180 + ep_dev->hw_ops->icu_mask(ep_dev, ACA_HOSTIF_TX);
181 + __napi_schedule(g_plat_priv->napi_tx);
186 + /* Disable IRQ in IMCU */
187 + ep_dev->hw_ops->icu_mask(ep_dev, ACA_HOSTIF_TX);
189 + /* Start tasklet */
190 + tasklet_schedule(&txout_task);
197 +static void plat_net_open(void)
199 + struct plat_priv *priv = g_plat_priv;
200 + struct tc_priv *tcpriv = plat_to_tcpriv();
201 + struct aca_ring *rxout = &priv->soc_rings.rxout;
202 + struct aca_ring *txout = &priv->soc_rings.txout;
203 + struct dc_ep_dev *ep_dev_rx = &tcpriv->ep_dev[rxout->ep_dev_idx];
204 + struct dc_ep_dev *ep_dev_tx = &tcpriv->ep_dev[txout->ep_dev_idx];
207 + napi_enable(priv->napi_rx);
208 + ep_dev_rx->hw_ops->icu_en(ep_dev_rx, ACA_HOSTIF_RX);
211 + napi_enable(priv->napi_tx);
212 + ep_dev_tx->hw_ops->icu_en(ep_dev_tx, ACA_HOSTIF_TX);
215 +static void plat_net_stop(void)
217 + struct plat_priv *priv = g_plat_priv;
218 + struct tc_priv *tcpriv = plat_to_tcpriv();
219 + struct aca_ring *rxout = &priv->soc_rings.rxout;
220 + struct aca_ring *txout = &priv->soc_rings.txout;
221 + struct dc_ep_dev *ep_dev_rx = &tcpriv->ep_dev[rxout->ep_dev_idx];
222 + struct dc_ep_dev *ep_dev_tx = &tcpriv->ep_dev[txout->ep_dev_idx];
225 + napi_disable(priv->napi_tx);
226 + ep_dev_tx->hw_ops->icu_mask(ep_dev_tx, ACA_HOSTIF_TX);
229 + napi_disable(priv->napi_rx);
230 + ep_dev_rx->hw_ops->icu_mask(ep_dev_rx, ACA_HOSTIF_RX);
233 static void plat_irq_init(struct tc_priv *priv, const char *dev_name)
236 @@ -988,17 +1093,49 @@ static int plat_soc_cfg_get(struct soc_c
239 static int plat_open(struct net_device *pdev, const char *dev_name,
240 + struct napi_struct *napi_tx, struct napi_struct *napi_rx,
243 + struct tc_priv *priv = g_plat_priv->tc_priv;
246 + for (i = 0; i < EP_MAX_NUM && i < priv->ep_num; i++) {
247 + disable_irq(priv->ep_dev[i].aca_rx_irq);
248 + disable_irq(priv->ep_dev[i].aca_tx_irq);
251 g_plat_priv->netdev = pdev;
252 + g_plat_priv->napi_tx = napi_tx;
253 + g_plat_priv->napi_rx = napi_rx;
255 + for (i = 0; i < EP_MAX_NUM && i < priv->ep_num; i++) {
256 + enable_irq(priv->ep_dev[i].aca_rx_irq);
257 + enable_irq(priv->ep_dev[i].aca_tx_irq);
263 static void plat_close(struct net_device *pdev, const char *dev_name,
264 + struct napi_struct *napi_tx, struct napi_struct *napi_rx,
267 + struct tc_priv *priv = g_plat_priv->tc_priv;
270 + for (i = 0; i < EP_MAX_NUM && i < priv->ep_num; i++) {
271 + disable_irq(priv->ep_dev[i].aca_rx_irq);
272 + disable_irq(priv->ep_dev[i].aca_tx_irq);
275 g_plat_priv->netdev = NULL;
276 + g_plat_priv->napi_tx = NULL;
277 + g_plat_priv->napi_rx = NULL;
279 + for (i = 0; i < EP_MAX_NUM && i < priv->ep_num; i++) {
280 + enable_irq(priv->ep_dev[i].aca_rx_irq);
281 + enable_irq(priv->ep_dev[i].aca_tx_irq);
286 @@ -1084,6 +1221,10 @@ static void plat_tc_ops_setup(struct tc_
287 priv->tc_ops.free = plat_mem_free;
288 priv->tc_ops.dev_reg = plat_open;
289 priv->tc_ops.dev_unreg = plat_close;
290 + priv->tc_ops.net_open = plat_net_open;
291 + priv->tc_ops.net_stop = plat_net_stop;
292 + priv->tc_ops.napi_tx = plat_txout_napi;
293 + priv->tc_ops.napi_rx = plat_rxout_napi;
294 priv->tc_ops.umt_init = plat_umt_init;
295 priv->tc_ops.umt_exit = plat_umt_exit;
296 priv->tc_ops.umt_start = plat_umt_start;
299 @@ -3650,7 +3650,7 @@ static void atm_aca_ring_config_init(str
300 static int atm_ring_init(struct atm_priv *priv)
302 atm_aca_ring_config_init(priv);
303 - return priv->tc_priv->tc_ops.dev_reg(NULL, g_atm_dev_name, 0, 0);
304 + return priv->tc_priv->tc_ops.dev_reg(NULL, g_atm_dev_name, NULL, NULL, 0, 0);
307 static int atm_init(struct tc_priv *tcpriv, u32 ep_id)
308 @@ -4020,7 +4020,7 @@ void atm_tc_unload(void)
309 /* unregister device */
310 if (priv->tc_priv->tc_ops.dev_unreg != NULL)
311 priv->tc_priv->tc_ops.dev_unreg(NULL,
312 - g_atm_dev_name, 0);
313 + g_atm_dev_name, NULL, NULL, 0);
315 /* atm_dev_deinit(priv); */
316 /* modem module power off */
317 --- a/dcdp/inc/tc_main.h
318 +++ b/dcdp/inc/tc_main.h
319 @@ -209,9 +209,15 @@ struct tc_hw_ops {
320 void (*subif_unreg)(struct net_device *pdev, const char *dev_name,
321 int subif_id, int flag);
322 int (*dev_reg)(struct net_device *pdev, const char *dev_name,
323 + struct napi_struct *napi_tx, struct napi_struct *napi_rx,
325 void (*dev_unreg)(struct net_device *pdev, const char *dev_name,
326 + struct napi_struct *napi_tx, struct napi_struct *napi_rx,
328 + void (*net_open)(void);
329 + void (*net_stop)(void);
330 + int (*napi_tx)(struct napi_struct *napi, int budget);
331 + int (*napi_rx)(struct napi_struct *napi, int budget);
333 /*umt init/exit including the corresponding DMA init/exit */
334 int (*umt_init)(u32 umt_id, u32 umt_period, u32 umt_dst);
337 @@ -146,7 +146,11 @@ static int ptm_open(struct net_device *d
338 struct ptm_priv *ptm_tc = netdev_priv(dev);
340 tc_info(ptm_tc->tc_priv, MSG_EVENT, "ptm open\n");
342 + ptm_tc->tc_priv->tc_ops.net_open();
344 netif_tx_start_all_queues(dev);
346 #ifdef CONFIG_SOC_TYPE_XWAY
347 xet_phy_wan_port(7, NULL, 1, 1);
348 if (ppa_hook_ppa_phys_port_add_fn)
349 @@ -163,7 +167,11 @@ static int ptm_stop(struct net_device *d
350 struct ptm_priv *ptm_tc = netdev_priv(dev);
352 tc_info(ptm_tc->tc_priv, MSG_EVENT, "ptm stop\n");
354 netif_tx_stop_all_queues(dev);
356 + ptm_tc->tc_priv->tc_ops.net_stop();
358 #ifdef CONFIG_SOC_TYPE_XWAY
359 if (ppa_drv_datapath_mac_entry_setting)
360 ppa_drv_datapath_mac_entry_setting(dev->dev_addr, 0, 6, 10, 1, 2);
361 @@ -564,7 +572,7 @@ static void ptm_rx(struct net_device *de
362 ptm_tc->stats64.rx_packets++;
363 ptm_tc->stats64.rx_bytes += skb->len;
365 - if (netif_rx(skb) == NET_RX_DROP)
366 + if (netif_receive_skb(skb) == NET_RX_DROP)
367 ptm_tc->stats64.rx_dropped++;
370 @@ -664,6 +672,14 @@ static int ptm_dev_init(struct tc_priv *
371 memcpy(ptm_tc->outq_map, def_outq_map, sizeof(def_outq_map));
372 SET_NETDEV_DEV(ptm_tc->dev, tc_priv->ep_dev[id].dev);
374 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,17,0))
375 + netif_napi_add(ptm_tc->dev, &ptm_tc->napi_rx, tc_priv->tc_ops.napi_rx);
376 + netif_napi_add_tx(ptm_tc->dev, &ptm_tc->napi_tx, tc_priv->tc_ops.napi_tx);
378 + netif_napi_add(ptm_tc->dev, &ptm_tc->napi_rx, tc_priv->tc_ops.napi_rx, NAPI_POLL_WEIGHT);
379 + netif_tx_napi_add(ptm_tc->dev, &ptm_tc->napi_tx, tc_priv->tc_ops.napi_tx, NAPI_POLL_WEIGHT);
382 err = register_netdev(ptm_tc->dev);
385 @@ -2618,7 +2634,9 @@ static int ptm_ring_init(struct ptm_ep_p
387 ptm_aca_ring_config_init(priv, id, bonding);
388 return priv->tc_priv->tc_ops.dev_reg(priv->ptm_tc->dev,
389 - priv->ptm_tc->dev->name, id, bonding);
390 + priv->ptm_tc->dev->name,
391 + &priv->ptm_tc->napi_tx, &priv->ptm_tc->napi_rx,
396 @@ -2973,7 +2991,9 @@ void ptm_tc_unload(enum dsl_tc_mode tc_m
397 /* unregister device */
398 if (ptm_tc->tc_priv->tc_ops.dev_unreg != NULL)
399 ptm_tc->tc_priv->tc_ops.dev_unreg(ptm_tc->dev,
400 - ptm_tc->dev->name, 0);
402 + &priv->ptm_tc->napi_tx, &priv->ptm_tc->napi_rx,
405 /* remove PTM callback function */
406 ptm_cb_setup(ptm_tc, 0);
407 @@ -2991,6 +3011,10 @@ void ptm_exit(void)
412 + netif_napi_del(&priv->napi_tx);
413 + netif_napi_del(&priv->napi_rx);
415 unregister_netdev(priv->dev);
416 free_netdev(priv->dev);
418 --- a/dcdp/inc/ptm_tc.h
419 +++ b/dcdp/inc/ptm_tc.h
420 @@ -119,6 +119,8 @@ struct ptm_priv {
423 struct net_device *dev;
424 + struct napi_struct napi_tx;
425 + struct napi_struct napi_rx;
427 struct rtnl_link_stats64 stats64;