kernel: bump 4.14 to 4.14.125 (FS#2305 FS#2297)
[openwrt/openwrt.git] / target / linux / layerscape / patches-4.14 / 702-dpaa2-ethernet-support-layerscape.patch
1 From 90b3f1705785f0e30de6f41abc8764aae1391245 Mon Sep 17 00:00:00 2001
2 From: Biwen Li <biwen.li@nxp.com>
3 Date: Wed, 17 Apr 2019 18:58:28 +0800
4 Subject: [PATCH] dpaa2-ethernet: support layerscape
5 MIME-Version: 1.0
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
8
9 This is an integrated patch of dpaa2-ethernet for layerscape
10
11 Signed-off-by: Biwen Li <biwen.li@nxp.com>
12 Signed-off-by: Bogdan Purcareata <bogdan.purcareata@nxp.com>
13 Signed-off-by: Camelia Groza <camelia.groza@nxp.com>
14 Signed-off-by: David S. Miller <davem@davemloft.net>
15 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
16 Signed-off-by: Guanhua Gao <guanhua.gao@nxp.com>
17 Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
18 Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
19 Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
20 Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
21 Signed-off-by: Valentin Catalin Neacsu <valentin-catalin.neacsu@nxp.com>
22 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
23 ---
24 drivers/staging/fsl-dpaa2/Kconfig | 7 +
25 drivers/staging/fsl-dpaa2/ethernet/Makefile | 3 +
26 .../fsl-dpaa2/ethernet/dpaa2-eth-ceetm.c | 1187 ++++++++
27 .../fsl-dpaa2/ethernet/dpaa2-eth-ceetm.h | 183 ++
28 .../fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c | 356 +++
29 .../fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h | 60 +
30 .../fsl-dpaa2/ethernet/dpaa2-eth-trace.h | 29 +-
31 .../staging/fsl-dpaa2/ethernet/dpaa2-eth.c | 2509 +++++++++++++----
32 .../staging/fsl-dpaa2/ethernet/dpaa2-eth.h | 394 ++-
33 .../fsl-dpaa2/ethernet/dpaa2-ethtool.c | 716 ++++-
34 drivers/staging/fsl-dpaa2/ethernet/dpkg.h | 380 ++-
35 drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h | 255 +-
36 drivers/staging/fsl-dpaa2/ethernet/dpni.c | 704 ++++-
37 drivers/staging/fsl-dpaa2/ethernet/dpni.h | 401 ++-
38 drivers/staging/fsl-dpaa2/ethernet/net.h | 30 +-
39 15 files changed, 6315 insertions(+), 899 deletions(-)
40 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-ceetm.c
41 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-ceetm.h
42 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c
43 create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h
44
45 --- a/drivers/staging/fsl-dpaa2/Kconfig
46 +++ b/drivers/staging/fsl-dpaa2/Kconfig
47 @@ -17,6 +17,13 @@ config FSL_DPAA2_ETH
48 Ethernet driver for Freescale DPAA2 SoCs, using the
49 Freescale MC bus driver
50
51 +config FSL_DPAA2_ETH_CEETM
52 + depends on NET_SCHED
53 + bool "DPAA2 Ethernet CEETM QoS"
54 + default n
55 + ---help---
56 + Enable QoS offloading support through the CEETM hardware block.
57 +
58 if FSL_DPAA2_ETH
59 config FSL_DPAA2_ETH_USE_ERR_QUEUE
60 bool "Enable Rx error queue"
61 --- a/drivers/staging/fsl-dpaa2/ethernet/Makefile
62 +++ b/drivers/staging/fsl-dpaa2/ethernet/Makefile
63 @@ -1,3 +1,4 @@
64 +# SPDX-License-Identifier: GPL-2.0
65 #
66 # Makefile for the Freescale DPAA2 Ethernet controller
67 #
68 @@ -5,6 +6,8 @@
69 obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o
70
71 fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o
72 +fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DEBUGFS} += dpaa2-eth-debugfs.o
73 +fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_CEETM} += dpaa2-eth-ceetm.o
74
75 # Needed by the tracing framework
76 CFLAGS_dpaa2-eth.o := -I$(src)
77 --- /dev/null
78 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-ceetm.c
79 @@ -0,0 +1,1187 @@
80 +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
81 +/*
82 + * Copyright 2017-2019 NXP
83 + *
84 + */
85 +
86 +#include <linux/init.h>
87 +#include <linux/module.h>
88 +
89 +#include "dpaa2-eth-ceetm.h"
90 +#include "dpaa2-eth.h"
91 +
92 +#define DPAA2_CEETM_DESCRIPTION "FSL DPAA2 CEETM qdisc"
93 +/* Conversion formula from userspace passed Bps to expected Mbit */
94 +#define dpaa2_eth_bps_to_mbit(rate) (rate >> 17)
95 +
96 +static const struct nla_policy dpaa2_ceetm_policy[DPAA2_CEETM_TCA_MAX] = {
97 + [DPAA2_CEETM_TCA_COPT] = { .len = sizeof(struct dpaa2_ceetm_tc_copt) },
98 + [DPAA2_CEETM_TCA_QOPS] = { .len = sizeof(struct dpaa2_ceetm_tc_qopt) },
99 +};
100 +
101 +struct Qdisc_ops dpaa2_ceetm_qdisc_ops;
102 +
103 +static inline int dpaa2_eth_set_ch_shaping(struct dpaa2_eth_priv *priv,
104 + struct dpni_tx_shaping_cfg *scfg,
105 + struct dpni_tx_shaping_cfg *ecfg,
106 + int coupled, int ch_id)
107 +{
108 + int err = 0;
109 +
110 + netdev_dbg(priv->net_dev, "%s: ch_id %d rate %d mbps\n", __func__,
111 + ch_id, scfg->rate_limit);
112 + err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, scfg,
113 + ecfg, coupled);
114 + if (err)
115 + netdev_err(priv->net_dev, "dpni_set_tx_shaping err\n");
116 +
117 + return err;
118 +}
119 +
120 +static inline int dpaa2_eth_reset_ch_shaping(struct dpaa2_eth_priv *priv,
121 + int ch_id)
122 +{
123 + struct dpni_tx_shaping_cfg cfg = { 0 };
124 +
125 + return dpaa2_eth_set_ch_shaping(priv, &cfg, &cfg, 0, ch_id);
126 +}
127 +
128 +static inline int
129 +dpaa2_eth_update_shaping_cfg(struct net_device *dev,
130 + struct dpaa2_ceetm_shaping_cfg cfg,
131 + struct dpni_tx_shaping_cfg *scfg,
132 + struct dpni_tx_shaping_cfg *ecfg)
133 +{
134 + scfg->rate_limit = dpaa2_eth_bps_to_mbit(cfg.cir);
135 + ecfg->rate_limit = dpaa2_eth_bps_to_mbit(cfg.eir);
136 +
137 + if (cfg.cbs > DPAA2_ETH_MAX_BURST_SIZE) {
138 + netdev_err(dev, "Committed burst size must be under %d\n",
139 + DPAA2_ETH_MAX_BURST_SIZE);
140 + return -EINVAL;
141 + }
142 +
143 + scfg->max_burst_size = cfg.cbs;
144 +
145 + if (cfg.ebs > DPAA2_ETH_MAX_BURST_SIZE) {
146 + netdev_err(dev, "Excess burst size must be under %d\n",
147 + DPAA2_ETH_MAX_BURST_SIZE);
148 + return -EINVAL;
149 + }
150 +
151 + ecfg->max_burst_size = cfg.ebs;
152 +
153 + if ((!cfg.cir || !cfg.eir) && cfg.coupled) {
154 + netdev_err(dev, "Coupling can be set when both CIR and EIR are finite\n");
155 + return -EINVAL;
156 + }
157 +
158 + return 0;
159 +}
160 +
161 +enum update_tx_prio {
162 + DPAA2_ETH_ADD_CQ,
163 + DPAA2_ETH_DEL_CQ,
164 +};
165 +
166 +/* Normalize weights based on max passed value */
167 +static inline int dpaa2_eth_normalize_tx_prio(struct dpaa2_ceetm_qdisc *priv)
168 +{
169 + struct dpni_tx_schedule_cfg *sched_cfg;
170 + struct dpaa2_ceetm_class *cl;
171 + u32 qpri;
172 + u16 weight_max = 0, increment;
173 + int i;
174 +
175 + /* Check the boundaries of the provided values */
176 + for (i = 0; i < priv->clhash.hashsize; i++)
177 + hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode)
178 + weight_max = (weight_max == 0 ? cl->prio.weight :
179 + (weight_max < cl->prio.weight ?
180 + cl->prio.weight : weight_max));
181 +
182 + /* If there are no elements, there's nothing to do */
183 + if (weight_max == 0)
184 + return 0;
185 +
186 + increment = (DPAA2_CEETM_MAX_WEIGHT - DPAA2_CEETM_MIN_WEIGHT) /
187 + weight_max;
188 +
189 + for (i = 0; i < priv->clhash.hashsize; i++) {
190 + hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode) {
191 + if (cl->prio.mode == STRICT_PRIORITY)
192 + continue;
193 +
194 + qpri = cl->prio.qpri;
195 + sched_cfg = &priv->prio.tx_prio_cfg.tc_sched[qpri];
196 +
197 + sched_cfg->delta_bandwidth =
198 + DPAA2_CEETM_MIN_WEIGHT +
199 + (cl->prio.weight * increment);
200 +
201 + pr_debug("%s: Normalized CQ qpri %d weight to %d\n",
202 + __func__, qpri, sched_cfg->delta_bandwidth);
203 + }
204 + }
205 +
206 + return 0;
207 +}
208 +
209 +static inline int dpaa2_eth_update_tx_prio(struct dpaa2_eth_priv *priv,
210 + struct dpaa2_ceetm_class *cl,
211 + enum update_tx_prio type)
212 +{
213 + struct dpaa2_ceetm_qdisc *sch = qdisc_priv(cl->parent);
214 + struct dpni_tx_schedule_cfg *sched_cfg;
215 + struct dpni_taildrop td = {0};
216 + u8 ch_id = 0, tc_id = 0;
217 + u32 qpri = 0;
218 + int err = 0;
219 +
220 + qpri = cl->prio.qpri;
221 + tc_id = DPNI_BUILD_CH_TC(ch_id, qpri);
222 +
223 + switch (type) {
224 + case DPAA2_ETH_ADD_CQ:
225 + /* Enable taildrop */
226 + td.enable = 1;
227 + td.units = DPNI_CONGESTION_UNIT_FRAMES;
228 + td.threshold = DPAA2_CEETM_TD_THRESHOLD;
229 + err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
230 + DPNI_CP_GROUP, DPNI_QUEUE_TX, tc_id,
231 + 0, &td);
232 + if (err) {
233 + netdev_err(priv->net_dev, "Error enabling Tx taildrop %d\n",
234 + err);
235 + return err;
236 + }
237 + break;
238 + case DPAA2_ETH_DEL_CQ:
239 + /* Disable taildrop */
240 + td.enable = 0;
241 + err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
242 + DPNI_CP_GROUP, DPNI_QUEUE_TX, tc_id,
243 + 0, &td);
244 + if (err) {
245 + netdev_err(priv->net_dev, "Error disabling Tx taildrop %d\n",
246 + err);
247 + return err;
248 + }
249 + break;
250 + }
251 +
252 + /* We can zero out the structure in the tx_prio_conf array */
253 + if (type == DPAA2_ETH_DEL_CQ) {
254 + sched_cfg = &sch->prio.tx_prio_cfg.tc_sched[qpri];
255 + memset(sched_cfg, 0, sizeof(*sched_cfg));
256 + }
257 +
258 + /* Normalize priorities */
259 + err = dpaa2_eth_normalize_tx_prio(sch);
260 +
261 + /* Debug print goes here */
262 + print_hex_dump_debug("tx_prio: ", DUMP_PREFIX_OFFSET, 16, 1,
263 + &sch->prio.tx_prio_cfg,
264 + sizeof(sch->prio.tx_prio_cfg), 0);
265 +
266 + /* Call dpni_set_tx_priorities for the entire prio qdisc */
267 + err = dpni_set_tx_priorities(priv->mc_io, 0, priv->mc_token,
268 + &sch->prio.tx_prio_cfg);
269 + if (err)
270 + netdev_err(priv->net_dev, "dpni_set_tx_priorities err %d\n",
271 + err);
272 +
273 + return err;
274 +}
275 +
276 +static void dpaa2_eth_ceetm_enable(struct dpaa2_eth_priv *priv)
277 +{
278 + priv->ceetm_en = true;
279 +}
280 +
281 +static void dpaa2_eth_ceetm_disable(struct dpaa2_eth_priv *priv)
282 +{
283 + priv->ceetm_en = false;
284 +}
285 +
286 +/* Find class in qdisc hash table using given handle */
287 +static inline struct dpaa2_ceetm_class *dpaa2_ceetm_find(u32 handle,
288 + struct Qdisc *sch)
289 +{
290 + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
291 + struct Qdisc_class_common *clc;
292 +
293 + pr_debug(KBUILD_BASENAME " : %s : find class %X in qdisc %X\n",
294 + __func__, handle, sch->handle);
295 +
296 + clc = qdisc_class_find(&priv->clhash, handle);
297 + return clc ? container_of(clc, struct dpaa2_ceetm_class, common) : NULL;
298 +}
299 +
300 +/* Insert a class in the qdisc's class hash */
301 +static void dpaa2_ceetm_link_class(struct Qdisc *sch,
302 + struct Qdisc_class_hash *clhash,
303 + struct Qdisc_class_common *common)
304 +{
305 + sch_tree_lock(sch);
306 + qdisc_class_hash_insert(clhash, common);
307 + sch_tree_unlock(sch);
308 + qdisc_class_hash_grow(sch, clhash);
309 +}
310 +
311 +/* Destroy a ceetm class */
312 +static void dpaa2_ceetm_cls_destroy(struct Qdisc *sch,
313 + struct dpaa2_ceetm_class *cl)
314 +{
315 + struct net_device *dev = qdisc_dev(sch);
316 + struct dpaa2_eth_priv *priv = netdev_priv(dev);
317 +
318 + if (!cl)
319 + return;
320 +
321 + pr_debug(KBUILD_BASENAME " : %s : destroy class %X from under %X\n",
322 + __func__, cl->common.classid, sch->handle);
323 +
324 + /* Recurse into child first */
325 + if (cl->child) {
326 + qdisc_destroy(cl->child);
327 + cl->child = NULL;
328 + }
329 +
330 + switch (cl->type) {
331 + case CEETM_ROOT:
332 + if (dpaa2_eth_reset_ch_shaping(priv, cl->root.ch_id))
333 + netdev_err(dev, "Error resetting channel shaping\n");
334 +
335 + break;
336 +
337 + case CEETM_PRIO:
338 + if (dpaa2_eth_update_tx_prio(priv, cl, DPAA2_ETH_DEL_CQ))
339 + netdev_err(dev, "Error resetting tx_priorities\n");
340 +
341 + if (cl->prio.cstats)
342 + free_percpu(cl->prio.cstats);
343 +
344 + break;
345 + }
346 +
347 + tcf_block_put(cl->block);
348 + kfree(cl);
349 +}
350 +
351 +/* Destroy a ceetm qdisc */
352 +static void dpaa2_ceetm_destroy(struct Qdisc *sch)
353 +{
354 + unsigned int i;
355 + struct hlist_node *next;
356 + struct dpaa2_ceetm_class *cl;
357 + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
358 + struct net_device *dev = qdisc_dev(sch);
359 + struct dpaa2_eth_priv *priv_eth = netdev_priv(dev);
360 +
361 + pr_debug(KBUILD_BASENAME " : %s : destroy qdisc %X\n",
362 + __func__, sch->handle);
363 +
364 + /* All filters need to be removed before destroying the classes */
365 + tcf_block_put(priv->block);
366 +
367 + for (i = 0; i < priv->clhash.hashsize; i++) {
368 + hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode)
369 + tcf_block_put(cl->block);
370 + }
371 +
372 + for (i = 0; i < priv->clhash.hashsize; i++) {
373 + hlist_for_each_entry_safe(cl, next, &priv->clhash.hash[i],
374 + common.hnode)
375 + dpaa2_ceetm_cls_destroy(sch, cl);
376 + }
377 +
378 + qdisc_class_hash_destroy(&priv->clhash);
379 +
380 + switch (priv->type) {
381 + case CEETM_ROOT:
382 + dpaa2_eth_ceetm_disable(priv_eth);
383 +
384 + if (priv->root.qstats)
385 + free_percpu(priv->root.qstats);
386 +
387 + if (!priv->root.qdiscs)
388 + break;
389 +
390 + /* Destroy the pfifo qdiscs in case they haven't been attached
391 + * to the netdev queues yet.
392 + */
393 + for (i = 0; i < dev->num_tx_queues; i++)
394 + if (priv->root.qdiscs[i])
395 + qdisc_destroy(priv->root.qdiscs[i]);
396 +
397 + kfree(priv->root.qdiscs);
398 + break;
399 +
400 + case CEETM_PRIO:
401 + if (priv->prio.parent)
402 + priv->prio.parent->child = NULL;
403 + break;
404 + }
405 +}
406 +
407 +static int dpaa2_ceetm_dump(struct Qdisc *sch, struct sk_buff *skb)
408 +{
409 + struct Qdisc *qdisc;
410 + unsigned int ntx, i;
411 + struct nlattr *nest;
412 + struct dpaa2_ceetm_tc_qopt qopt;
413 + struct dpaa2_ceetm_qdisc_stats *qstats;
414 + struct net_device *dev = qdisc_dev(sch);
415 + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
416 +
417 + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
418 +
419 + sch_tree_lock(sch);
420 + memset(&qopt, 0, sizeof(qopt));
421 + qopt.type = priv->type;
422 + qopt.shaped = priv->shaped;
423 +
424 + switch (priv->type) {
425 + case CEETM_ROOT:
426 + /* Gather statistics from the underlying pfifo qdiscs */
427 + sch->q.qlen = 0;
428 + memset(&sch->bstats, 0, sizeof(sch->bstats));
429 + memset(&sch->qstats, 0, sizeof(sch->qstats));
430 +
431 + for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
432 + qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
433 + sch->q.qlen += qdisc->q.qlen;
434 + sch->bstats.bytes += qdisc->bstats.bytes;
435 + sch->bstats.packets += qdisc->bstats.packets;
436 + sch->qstats.qlen += qdisc->qstats.qlen;
437 + sch->qstats.backlog += qdisc->qstats.backlog;
438 + sch->qstats.drops += qdisc->qstats.drops;
439 + sch->qstats.requeues += qdisc->qstats.requeues;
440 + sch->qstats.overlimits += qdisc->qstats.overlimits;
441 + }
442 +
443 + for_each_online_cpu(i) {
444 + qstats = per_cpu_ptr(priv->root.qstats, i);
445 + sch->qstats.drops += qstats->drops;
446 + }
447 +
448 + break;
449 +
450 + case CEETM_PRIO:
451 + qopt.prio_group_A = priv->prio.tx_prio_cfg.prio_group_A;
452 + qopt.prio_group_B = priv->prio.tx_prio_cfg.prio_group_B;
453 + qopt.separate_groups = priv->prio.tx_prio_cfg.separate_groups;
454 + break;
455 +
456 + default:
457 + pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
458 + sch_tree_unlock(sch);
459 + return -EINVAL;
460 + }
461 +
462 + nest = nla_nest_start(skb, TCA_OPTIONS);
463 + if (!nest)
464 + goto nla_put_failure;
465 + if (nla_put(skb, DPAA2_CEETM_TCA_QOPS, sizeof(qopt), &qopt))
466 + goto nla_put_failure;
467 + nla_nest_end(skb, nest);
468 +
469 + sch_tree_unlock(sch);
470 + return skb->len;
471 +
472 +nla_put_failure:
473 + sch_tree_unlock(sch);
474 + nla_nest_cancel(skb, nest);
475 + return -EMSGSIZE;
476 +}
477 +
478 +static int dpaa2_ceetm_change_prio(struct Qdisc *sch,
479 + struct dpaa2_ceetm_qdisc *priv,
480 + struct dpaa2_ceetm_tc_qopt *qopt)
481 +{
482 + /* TODO: Once LX2 support is added */
483 + /* priv->shaped = parent_cl->shaped; */
484 + priv->prio.tx_prio_cfg.prio_group_A = qopt->prio_group_A;
485 + priv->prio.tx_prio_cfg.prio_group_B = qopt->prio_group_B;
486 + priv->prio.tx_prio_cfg.separate_groups = qopt->separate_groups;
487 +
488 + return 0;
489 +}
490 +
491 +/* Edit a ceetm qdisc */
492 +static int dpaa2_ceetm_change(struct Qdisc *sch, struct nlattr *opt)
493 +{
494 + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
495 + struct nlattr *tb[DPAA2_CEETM_TCA_QOPS + 1];
496 + struct dpaa2_ceetm_tc_qopt *qopt;
497 + int err;
498 +
499 + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
500 +
501 + err = nla_parse_nested(tb, DPAA2_CEETM_TCA_QOPS, opt,
502 + dpaa2_ceetm_policy, NULL);
503 + if (err < 0) {
504 + pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
505 + "nla_parse_nested");
506 + return err;
507 + }
508 +
509 + if (!tb[DPAA2_CEETM_TCA_QOPS]) {
510 + pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
511 + "tb");
512 + return -EINVAL;
513 + }
514 +
515 + if (TC_H_MIN(sch->handle)) {
516 + pr_err("CEETM: a qdisc should not have a minor\n");
517 + return -EINVAL;
518 + }
519 +
520 + qopt = nla_data(tb[DPAA2_CEETM_TCA_QOPS]);
521 +
522 + if (priv->type != qopt->type) {
523 + pr_err("CEETM: qdisc %X is not of the provided type\n",
524 + sch->handle);
525 + return -EINVAL;
526 + }
527 +
528 + switch (priv->type) {
529 + case CEETM_PRIO:
530 + err = dpaa2_ceetm_change_prio(sch, priv, qopt);
531 + break;
532 + default:
533 + pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
534 + err = -EINVAL;
535 + }
536 +
537 + return err;
538 +}
539 +
540 +/* Configure a root ceetm qdisc */
541 +static int dpaa2_ceetm_init_root(struct Qdisc *sch,
542 + struct dpaa2_ceetm_qdisc *priv,
543 + struct dpaa2_ceetm_tc_qopt *qopt)
544 +{
545 + struct net_device *dev = qdisc_dev(sch);
546 + struct dpaa2_eth_priv *priv_eth = netdev_priv(dev);
547 + struct netdev_queue *dev_queue;
548 + unsigned int i, parent_id;
549 + struct Qdisc *qdisc;
550 +
551 + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
552 +
553 + /* Validate inputs */
554 + if (sch->parent != TC_H_ROOT) {
555 + pr_err("CEETM: a root ceetm qdisc must be root\n");
556 + return -EINVAL;
557 + }
558 +
559 + /* Pre-allocate underlying pfifo qdiscs.
560 + *
561 + * We want to offload shaping and scheduling decisions to the hardware.
562 + * The pfifo qdiscs will be attached to the netdev queues and will
563 + * guide the traffic from the IP stack down to the driver with minimum
564 + * interference.
565 + *
566 + * The CEETM qdiscs and classes will be crossed when the traffic
567 + * reaches the driver.
568 + */
569 + priv->root.qdiscs = kcalloc(dev->num_tx_queues,
570 + sizeof(priv->root.qdiscs[0]),
571 + GFP_KERNEL);
572 + if (!priv->root.qdiscs)
573 + return -ENOMEM;
574 +
575 + for (i = 0; i < dev->num_tx_queues; i++) {
576 + dev_queue = netdev_get_tx_queue(dev, i);
577 + parent_id = TC_H_MAKE(TC_H_MAJ(sch->handle),
578 + TC_H_MIN(i + PFIFO_MIN_OFFSET));
579 +
580 + qdisc = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
581 + parent_id);
582 + if (!qdisc)
583 + return -ENOMEM;
584 +
585 + priv->root.qdiscs[i] = qdisc;
586 + qdisc->flags |= TCQ_F_ONETXQUEUE;
587 + }
588 +
589 + sch->flags |= TCQ_F_MQROOT;
590 +
591 + priv->root.qstats = alloc_percpu(struct dpaa2_ceetm_qdisc_stats);
592 + if (!priv->root.qstats) {
593 + pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
594 + __func__);
595 + return -ENOMEM;
596 + }
597 +
598 + dpaa2_eth_ceetm_enable(priv_eth);
599 + return 0;
600 +}
601 +
602 +/* Configure a prio ceetm qdisc */
603 +static int dpaa2_ceetm_init_prio(struct Qdisc *sch,
604 + struct dpaa2_ceetm_qdisc *priv,
605 + struct dpaa2_ceetm_tc_qopt *qopt)
606 +{
607 + struct net_device *dev = qdisc_dev(sch);
608 + struct dpaa2_ceetm_class *parent_cl;
609 + struct Qdisc *parent_qdisc;
610 +
611 + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
612 +
613 + if (sch->parent == TC_H_ROOT) {
614 + pr_err("CEETM: a prio ceetm qdisc can not be root\n");
615 + return -EINVAL;
616 + }
617 +
618 + parent_qdisc = qdisc_lookup(dev, TC_H_MAJ(sch->parent));
619 + if (strcmp(parent_qdisc->ops->id, dpaa2_ceetm_qdisc_ops.id)) {
620 + pr_err("CEETM: a ceetm qdisc can not be attached to other qdisc/class types\n");
621 + return -EINVAL;
622 + }
623 +
624 + /* Obtain the parent root ceetm_class */
625 + parent_cl = dpaa2_ceetm_find(sch->parent, parent_qdisc);
626 +
627 + if (!parent_cl || parent_cl->type != CEETM_ROOT) {
628 + pr_err("CEETM: a prio ceetm qdiscs can be added only under a root ceetm class\n");
629 + return -EINVAL;
630 + }
631 +
632 + priv->prio.parent = parent_cl;
633 + parent_cl->child = sch;
634 +
635 + return dpaa2_ceetm_change_prio(sch, priv, qopt);
636 +}
637 +
638 +/* Configure a generic ceetm qdisc */
639 +static int dpaa2_ceetm_init(struct Qdisc *sch, struct nlattr *opt)
640 +{
641 + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
642 + struct net_device *dev = qdisc_dev(sch);
643 + struct nlattr *tb[DPAA2_CEETM_TCA_QOPS + 1];
644 + struct dpaa2_ceetm_tc_qopt *qopt;
645 + int err;
646 +
647 + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
648 +
649 + if (!netif_is_multiqueue(dev))
650 + return -EOPNOTSUPP;
651 +
652 + err = tcf_block_get(&priv->block, &priv->filter_list);
653 + if (err) {
654 + pr_err("CEETM: unable to get tcf_block\n");
655 + return err;
656 + }
657 +
658 + if (!opt) {
659 + pr_err(KBUILD_BASENAME " : %s : tc error - opt = NULL\n",
660 + __func__);
661 + return -EINVAL;
662 + }
663 +
664 + err = nla_parse_nested(tb, DPAA2_CEETM_TCA_QOPS, opt,
665 + dpaa2_ceetm_policy, NULL);
666 + if (err < 0) {
667 + pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
668 + "nla_parse_nested");
669 + return err;
670 + }
671 +
672 + if (!tb[DPAA2_CEETM_TCA_QOPS]) {
673 + pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
674 + "tb");
675 + return -EINVAL;
676 + }
677 +
678 + if (TC_H_MIN(sch->handle)) {
679 + pr_err("CEETM: a qdisc should not have a minor\n");
680 + return -EINVAL;
681 + }
682 +
683 + qopt = nla_data(tb[DPAA2_CEETM_TCA_QOPS]);
684 +
685 + /* Initialize the class hash list. Each qdisc has its own class hash */
686 + err = qdisc_class_hash_init(&priv->clhash);
687 + if (err < 0) {
688 + pr_err(KBUILD_BASENAME " : %s : qdisc_class_hash_init failed\n",
689 + __func__);
690 + return err;
691 + }
692 +
693 + priv->type = qopt->type;
694 + priv->shaped = qopt->shaped;
695 +
696 + switch (priv->type) {
697 + case CEETM_ROOT:
698 + err = dpaa2_ceetm_init_root(sch, priv, qopt);
699 + break;
700 + case CEETM_PRIO:
701 + err = dpaa2_ceetm_init_prio(sch, priv, qopt);
702 + break;
703 + default:
704 + pr_err(KBUILD_BASENAME " : %s : invalid qdisc\n", __func__);
705 + /* Note: dpaa2_ceetm_destroy() will be called by our caller */
706 + err = -EINVAL;
707 + }
708 +
709 + return err;
710 +}
711 +
712 +/* Attach the underlying pfifo qdiscs */
713 +static void dpaa2_ceetm_attach(struct Qdisc *sch)
714 +{
715 + struct net_device *dev = qdisc_dev(sch);
716 + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
717 + struct Qdisc *qdisc, *old_qdisc;
718 + unsigned int i;
719 +
720 + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
721 +
722 + for (i = 0; i < dev->num_tx_queues; i++) {
723 + qdisc = priv->root.qdiscs[i];
724 + old_qdisc = dev_graft_qdisc(qdisc->dev_queue, qdisc);
725 + if (old_qdisc)
726 + qdisc_destroy(old_qdisc);
727 + }
728 +
729 + /* Remove the references to the pfifo qdiscs since the kernel will
730 + * destroy them when needed. No cleanup from our part is required from
731 + * this point on.
732 + */
733 + kfree(priv->root.qdiscs);
734 + priv->root.qdiscs = NULL;
735 +}
736 +
737 +static unsigned long dpaa2_ceetm_cls_find(struct Qdisc *sch, u32 classid)
738 +{
739 + struct dpaa2_ceetm_class *cl;
740 +
741 + pr_debug(KBUILD_BASENAME " : %s : classid %X from qdisc %X\n",
742 + __func__, classid, sch->handle);
743 + cl = dpaa2_ceetm_find(classid, sch);
744 +
745 + return (unsigned long)cl;
746 +}
747 +
748 +static int dpaa2_ceetm_cls_change_root(struct dpaa2_ceetm_class *cl,
749 + struct dpaa2_ceetm_tc_copt *copt,
750 + struct net_device *dev)
751 +{
752 + struct dpaa2_eth_priv *priv = netdev_priv(dev);
753 + struct dpni_tx_shaping_cfg scfg = { 0 }, ecfg = { 0 };
754 + int err = 0;
755 +
756 + pr_debug(KBUILD_BASENAME " : %s : class %X\n", __func__,
757 + cl->common.classid);
758 +
759 + if (!cl->shaped)
760 + return 0;
761 +
762 + if (dpaa2_eth_update_shaping_cfg(dev, copt->shaping_cfg,
763 + &scfg, &ecfg))
764 + return -EINVAL;
765 +
766 + err = dpaa2_eth_set_ch_shaping(priv, &scfg, &ecfg,
767 + copt->shaping_cfg.coupled,
768 + cl->root.ch_id);
769 + if (err)
770 + return err;
771 +
772 + memcpy(&cl->root.shaping_cfg, &copt->shaping_cfg,
773 + sizeof(struct dpaa2_ceetm_shaping_cfg));
774 +
775 + return err;
776 +}
777 +
778 +static int dpaa2_ceetm_cls_change_prio(struct dpaa2_ceetm_class *cl,
779 + struct dpaa2_ceetm_tc_copt *copt,
780 + struct net_device *dev)
781 +{
782 + struct dpaa2_ceetm_qdisc *sch = qdisc_priv(cl->parent);
783 + struct dpni_tx_schedule_cfg *sched_cfg;
784 + struct dpaa2_eth_priv *priv = netdev_priv(dev);
785 + int err;
786 +
787 + pr_debug(KBUILD_BASENAME " : %s : class %X mode %d weight %d\n",
788 + __func__, cl->common.classid, copt->mode, copt->weight);
789 +
790 + if (!cl->prio.cstats) {
791 + cl->prio.cstats = alloc_percpu(struct dpaa2_ceetm_class_stats);
792 + if (!cl->prio.cstats) {
793 + pr_err(KBUILD_BASENAME " : %s : alloc_percpu() failed\n",
794 + __func__);
795 + return -ENOMEM;
796 + }
797 + }
798 +
799 + cl->prio.mode = copt->mode;
800 + cl->prio.weight = copt->weight;
801 +
802 + sched_cfg = &sch->prio.tx_prio_cfg.tc_sched[cl->prio.qpri];
803 +
804 + switch (copt->mode) {
805 + case STRICT_PRIORITY:
806 + sched_cfg->mode = DPNI_TX_SCHED_STRICT_PRIORITY;
807 + break;
808 + case WEIGHTED_A:
809 + sched_cfg->mode = DPNI_TX_SCHED_WEIGHTED_A;
810 + break;
811 + case WEIGHTED_B:
812 + sched_cfg->mode = DPNI_TX_SCHED_WEIGHTED_B;
813 + break;
814 + }
815 +
816 + err = dpaa2_eth_update_tx_prio(priv, cl, DPAA2_ETH_ADD_CQ);
817 +
818 + return err;
819 +}
820 +
821 +/* Add a new ceetm class */
822 +static int dpaa2_ceetm_cls_add(struct Qdisc *sch, u32 classid,
823 + struct dpaa2_ceetm_tc_copt *copt,
824 + unsigned long *arg)
825 +{
826 + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
827 + struct net_device *dev = qdisc_dev(sch);
828 + struct dpaa2_eth_priv *priv_eth = netdev_priv(dev);
829 + struct dpaa2_ceetm_class *cl;
830 + int err;
831 +
832 + if (copt->type == CEETM_ROOT &&
833 + priv->clhash.hashelems == dpaa2_eth_ch_count(priv_eth)) {
834 + pr_err("CEETM: only %d channel%s per DPNI allowed, sorry\n",
835 + dpaa2_eth_ch_count(priv_eth),
836 + dpaa2_eth_ch_count(priv_eth) == 1 ? "" : "s");
837 + return -EINVAL;
838 + }
839 +
840 + if (copt->type == CEETM_PRIO &&
841 + priv->clhash.hashelems == dpaa2_eth_tc_count(priv_eth)) {
842 + pr_err("CEETM: only %d queue%s per channel allowed, sorry\n",
843 + dpaa2_eth_tc_count(priv_eth),
844 + dpaa2_eth_tc_count(priv_eth) == 1 ? "" : "s");
845 + return -EINVAL;
846 + }
847 +
848 + cl = kzalloc(sizeof(*cl), GFP_KERNEL);
849 + if (!cl)
850 + return -ENOMEM;
851 +
852 + err = tcf_block_get(&cl->block, &cl->filter_list);
853 + if (err) {
854 + pr_err("%s: Unable to set new root class\n", __func__);
855 + goto out_free;
856 + }
857 +
858 + cl->common.classid = classid;
859 + cl->parent = sch;
860 + cl->child = NULL;
861 +
862 + /* Add class handle in Qdisc */
863 + dpaa2_ceetm_link_class(sch, &priv->clhash, &cl->common);
864 +
865 + cl->shaped = copt->shaped;
866 + cl->type = copt->type;
867 +
868 + /* Claim a CEETM channel / tc - DPAA2. will assume transition from
869 + * classid to qdid/qpri, starting from qdid / qpri 0
870 + */
871 + switch (copt->type) {
872 + case CEETM_ROOT:
873 + cl->root.ch_id = classid - sch->handle - 1;
874 + err = dpaa2_ceetm_cls_change_root(cl, copt, dev);
875 + break;
876 + case CEETM_PRIO:
877 + cl->prio.qpri = classid - sch->handle - 1;
878 + err = dpaa2_ceetm_cls_change_prio(cl, copt, dev);
879 + break;
880 + }
881 +
882 + if (err) {
883 + pr_err("%s: Unable to set new %s class\n", __func__,
884 + (copt->type == CEETM_ROOT ? "root" : "prio"));
885 + goto out_free;
886 + }
887 +
888 + switch (copt->type) {
889 + case CEETM_ROOT:
890 + pr_debug(KBUILD_BASENAME " : %s : configured root class %X associated with channel qdid %d\n",
891 + __func__, classid, cl->root.ch_id);
892 + break;
893 + case CEETM_PRIO:
894 + pr_debug(KBUILD_BASENAME " : %s : configured prio class %X associated with queue qpri %d\n",
895 + __func__, classid, cl->prio.qpri);
896 + break;
897 + }
898 +
899 + *arg = (unsigned long)cl;
900 + return 0;
901 +
902 +out_free:
903 + kfree(cl);
904 + return err;
905 +}
906 +
907 +/* Add or configure a ceetm class */
908 +static int dpaa2_ceetm_cls_change(struct Qdisc *sch, u32 classid, u32 parentid,
909 + struct nlattr **tca, unsigned long *arg)
910 +{
911 + struct dpaa2_ceetm_qdisc *priv;
912 + struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)*arg;
913 + struct nlattr *opt = tca[TCA_OPTIONS];
914 + struct nlattr *tb[DPAA2_CEETM_TCA_MAX];
915 + struct dpaa2_ceetm_tc_copt *copt;
916 + struct net_device *dev = qdisc_dev(sch);
917 + int err;
918 +
919 + pr_debug(KBUILD_BASENAME " : %s : classid %X under qdisc %X\n",
920 + __func__, classid, sch->handle);
921 +
922 + if (strcmp(sch->ops->id, dpaa2_ceetm_qdisc_ops.id)) {
923 + pr_err("CEETM: a ceetm class can not be attached to other qdisc/class types\n");
924 + return -EINVAL;
925 + }
926 +
927 + priv = qdisc_priv(sch);
928 +
929 + if (!opt) {
930 + pr_err(KBUILD_BASENAME " : %s : tc error NULL opt\n", __func__);
931 + return -EINVAL;
932 + }
933 +
934 + err = nla_parse_nested(tb, DPAA2_CEETM_TCA_COPT, opt,
935 + dpaa2_ceetm_policy, NULL);
936 + if (err < 0) {
937 + pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
938 + "nla_parse_nested");
939 + return -EINVAL;
940 + }
941 +
942 + if (!tb[DPAA2_CEETM_TCA_COPT]) {
943 + pr_err(KBUILD_BASENAME " : %s : tc error in %s\n", __func__,
944 + "tb");
945 + return -EINVAL;
946 + }
947 +
948 + copt = nla_data(tb[DPAA2_CEETM_TCA_COPT]);
949 +
950 + /* Configure an existing ceetm class */
951 + if (cl) {
952 + if (copt->type != cl->type) {
953 + pr_err("CEETM: class %X is not of the provided type\n",
954 + cl->common.classid);
955 + return -EINVAL;
956 + }
957 +
958 + switch (copt->type) {
959 + case CEETM_ROOT:
960 + return dpaa2_ceetm_cls_change_root(cl, copt, dev);
961 + case CEETM_PRIO:
962 + return dpaa2_ceetm_cls_change_prio(cl, copt, dev);
963 +
964 + default:
965 + pr_err(KBUILD_BASENAME " : %s : invalid class\n",
966 + __func__);
967 + return -EINVAL;
968 + }
969 + }
970 +
971 + return dpaa2_ceetm_cls_add(sch, classid, copt, arg);
972 +}
973 +
974 +static void dpaa2_ceetm_cls_walk(struct Qdisc *sch, struct qdisc_walker *arg)
975 +{
976 + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
977 + struct dpaa2_ceetm_class *cl;
978 + unsigned int i;
979 +
980 + pr_debug(KBUILD_BASENAME " : %s : qdisc %X\n", __func__, sch->handle);
981 +
982 + if (arg->stop)
983 + return;
984 +
985 + for (i = 0; i < priv->clhash.hashsize; i++) {
986 + hlist_for_each_entry(cl, &priv->clhash.hash[i], common.hnode) {
987 + if (arg->count < arg->skip) {
988 + arg->count++;
989 + continue;
990 + }
991 + if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
992 + arg->stop = 1;
993 + return;
994 + }
995 + arg->count++;
996 + }
997 + }
998 +}
999 +
1000 +static int dpaa2_ceetm_cls_dump(struct Qdisc *sch, unsigned long arg,
1001 + struct sk_buff *skb, struct tcmsg *tcm)
1002 +{
1003 + struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
1004 + struct nlattr *nest;
1005 + struct dpaa2_ceetm_tc_copt copt;
1006 +
1007 + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
1008 + __func__, cl->common.classid, sch->handle);
1009 +
1010 + sch_tree_lock(sch);
1011 +
1012 + tcm->tcm_parent = ((struct Qdisc *)cl->parent)->handle;
1013 + tcm->tcm_handle = cl->common.classid;
1014 +
1015 + memset(&copt, 0, sizeof(copt));
1016 +
1017 + copt.shaped = cl->shaped;
1018 + copt.type = cl->type;
1019 +
1020 + switch (cl->type) {
1021 + case CEETM_ROOT:
1022 + if (cl->child)
1023 + tcm->tcm_info = cl->child->handle;
1024 +
1025 + memcpy(&copt.shaping_cfg, &cl->root.shaping_cfg,
1026 + sizeof(struct dpaa2_ceetm_shaping_cfg));
1027 +
1028 + break;
1029 +
1030 + case CEETM_PRIO:
1031 + if (cl->child)
1032 + tcm->tcm_info = cl->child->handle;
1033 +
1034 + copt.mode = cl->prio.mode;
1035 + copt.weight = cl->prio.weight;
1036 +
1037 + break;
1038 + }
1039 +
1040 + nest = nla_nest_start(skb, TCA_OPTIONS);
1041 + if (!nest)
1042 + goto nla_put_failure;
1043 + if (nla_put(skb, DPAA2_CEETM_TCA_COPT, sizeof(copt), &copt))
1044 + goto nla_put_failure;
1045 + nla_nest_end(skb, nest);
1046 + sch_tree_unlock(sch);
1047 + return skb->len;
1048 +
1049 +nla_put_failure:
1050 + sch_tree_unlock(sch);
1051 + nla_nest_cancel(skb, nest);
1052 + return -EMSGSIZE;
1053 +}
1054 +
1055 +static int dpaa2_ceetm_cls_delete(struct Qdisc *sch, unsigned long arg)
1056 +{
1057 + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
1058 + struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
1059 +
1060 + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
1061 + __func__, cl->common.classid, sch->handle);
1062 +
1063 + sch_tree_lock(sch);
1064 + qdisc_class_hash_remove(&priv->clhash, &cl->common);
1065 + sch_tree_unlock(sch);
1066 + return 0;
1067 +}
1068 +
1069 +/* Get the class' child qdisc, if any */
1070 +static struct Qdisc *dpaa2_ceetm_cls_leaf(struct Qdisc *sch, unsigned long arg)
1071 +{
1072 + struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
1073 +
1074 + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n",
1075 + __func__, cl->common.classid, sch->handle);
1076 +
1077 + switch (cl->type) {
1078 + case CEETM_ROOT:
1079 + case CEETM_PRIO:
1080 + return cl->child;
1081 + }
1082 +
1083 + return NULL;
1084 +}
1085 +
1086 +static int dpaa2_ceetm_cls_graft(struct Qdisc *sch, unsigned long arg,
1087 + struct Qdisc *new, struct Qdisc **old)
1088 +{
1089 + if (new && strcmp(new->ops->id, dpaa2_ceetm_qdisc_ops.id)) {
1090 + pr_err("CEETM: only ceetm qdiscs can be attached to ceetm classes\n");
1091 + return -EOPNOTSUPP;
1092 + }
1093 +
1094 + return 0;
1095 +}
1096 +
1097 +static int dpaa2_ceetm_cls_dump_stats(struct Qdisc *sch, unsigned long arg,
1098 + struct gnet_dump *d)
1099 +{
1100 + struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
1101 + struct gnet_stats_basic_packed tmp_bstats;
1102 + struct dpaa2_ceetm_tc_xstats xstats;
1103 + union dpni_statistics dpni_stats;
1104 + struct net_device *dev = qdisc_dev(sch);
1105 + struct dpaa2_eth_priv *priv_eth = netdev_priv(dev);
1106 + u8 ch_id = 0;
1107 + int err;
1108 +
1109 + memset(&xstats, 0, sizeof(xstats));
1110 + memset(&tmp_bstats, 0, sizeof(tmp_bstats));
1111 +
1112 + if (cl->type == CEETM_ROOT)
1113 + return 0;
1114 +
1115 + err = dpni_get_statistics(priv_eth->mc_io, 0, priv_eth->mc_token, 3,
1116 + DPNI_BUILD_CH_TC(ch_id, cl->prio.qpri),
1117 + &dpni_stats);
1118 + if (err)
1119 + netdev_warn(dev, "dpni_get_stats(%d) failed - %d\n", 3, err);
1120 +
1121 + xstats.ceetm_dequeue_bytes = dpni_stats.page_3.ceetm_dequeue_bytes;
1122 + xstats.ceetm_dequeue_frames = dpni_stats.page_3.ceetm_dequeue_frames;
1123 + xstats.ceetm_reject_bytes = dpni_stats.page_3.ceetm_reject_bytes;
1124 + xstats.ceetm_reject_frames = dpni_stats.page_3.ceetm_reject_frames;
1125 +
1126 + return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
1127 +}
1128 +
1129 +static struct tcf_block *dpaa2_ceetm_tcf_block(struct Qdisc *sch,
1130 + unsigned long arg)
1131 +{
1132 + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
1133 + struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
1134 +
1135 + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
1136 + cl ? cl->common.classid : 0, sch->handle);
1137 + return cl ? cl->block : priv->block;
1138 +}
1139 +
1140 +static unsigned long dpaa2_ceetm_tcf_bind(struct Qdisc *sch,
1141 + unsigned long parent,
1142 + u32 classid)
1143 +{
1144 + struct dpaa2_ceetm_class *cl = dpaa2_ceetm_find(classid, sch);
1145 +
1146 + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
1147 + cl ? cl->common.classid : 0, sch->handle);
1148 + return (unsigned long)cl;
1149 +}
1150 +
1151 +static void dpaa2_ceetm_tcf_unbind(struct Qdisc *sch, unsigned long arg)
1152 +{
1153 + struct dpaa2_ceetm_class *cl = (struct dpaa2_ceetm_class *)arg;
1154 +
1155 + pr_debug(KBUILD_BASENAME " : %s : class %X under qdisc %X\n", __func__,
1156 + cl ? cl->common.classid : 0, sch->handle);
1157 +}
1158 +
1159 +const struct Qdisc_class_ops dpaa2_ceetm_cls_ops = {
1160 + .graft = dpaa2_ceetm_cls_graft,
1161 + .leaf = dpaa2_ceetm_cls_leaf,
1162 + .find = dpaa2_ceetm_cls_find,
1163 + .change = dpaa2_ceetm_cls_change,
1164 + .delete = dpaa2_ceetm_cls_delete,
1165 + .walk = dpaa2_ceetm_cls_walk,
1166 + .tcf_block = dpaa2_ceetm_tcf_block,
1167 + .bind_tcf = dpaa2_ceetm_tcf_bind,
1168 + .unbind_tcf = dpaa2_ceetm_tcf_unbind,
1169 + .dump = dpaa2_ceetm_cls_dump,
1170 + .dump_stats = dpaa2_ceetm_cls_dump_stats,
1171 +};
1172 +
1173 +struct Qdisc_ops dpaa2_ceetm_qdisc_ops __read_mostly = {
1174 + .id = "ceetm",
1175 + .priv_size = sizeof(struct dpaa2_ceetm_qdisc),
1176 + .cl_ops = &dpaa2_ceetm_cls_ops,
1177 + .init = dpaa2_ceetm_init,
1178 + .destroy = dpaa2_ceetm_destroy,
1179 + .change = dpaa2_ceetm_change,
1180 + .dump = dpaa2_ceetm_dump,
1181 + .attach = dpaa2_ceetm_attach,
1182 + .owner = THIS_MODULE,
1183 +};
1184 +
1185 +/* Run the filters and classifiers attached to the qdisc on the provided skb */
1186 +int dpaa2_ceetm_classify(struct sk_buff *skb, struct Qdisc *sch,
1187 + int *qdid, u8 *qpri)
1188 +{
1189 + struct dpaa2_ceetm_qdisc *priv = qdisc_priv(sch);
1190 + struct dpaa2_ceetm_class *cl = NULL;
1191 + struct tcf_result res;
1192 + struct tcf_proto *tcf;
1193 + int result;
1194 +
1195 + tcf = rcu_dereference_bh(priv->filter_list);
1196 + while (tcf && (result = tcf_classify(skb, tcf, &res, false)) >= 0) {
1197 +#ifdef CONFIG_NET_CLS_ACT
1198 + switch (result) {
1199 + case TC_ACT_QUEUED:
1200 + case TC_ACT_STOLEN:
1201 + case TC_ACT_SHOT:
1202 + /* No valid class found due to action */
1203 + return -1;
1204 + }
1205 +#endif
1206 + cl = (void *)res.class;
1207 + if (!cl) {
1208 + /* The filter leads to the qdisc */
1209 + if (res.classid == sch->handle)
1210 + return 0;
1211 +
1212 + cl = dpaa2_ceetm_find(res.classid, sch);
1213 + /* The filter leads to an invalid class */
1214 + if (!cl)
1215 + break;
1216 + }
1217 +
1218 + /* The class might have its own filters attached */
1219 + tcf = rcu_dereference_bh(cl->filter_list);
1220 + }
1221 +
1222 + /* No valid class found */
1223 + if (!cl)
1224 + return 0;
1225 +
1226 + switch (cl->type) {
1227 + case CEETM_ROOT:
1228 + *qdid = cl->root.ch_id;
1229 +
1230 + /* The root class does not have a child prio qdisc */
1231 + if (!cl->child)
1232 + return 0;
1233 +
1234 + /* Run the prio qdisc classifiers */
1235 + return dpaa2_ceetm_classify(skb, cl->child, qdid, qpri);
1236 +
1237 + case CEETM_PRIO:
1238 + *qpri = cl->prio.qpri;
1239 + break;
1240 + }
1241 +
1242 + return 0;
1243 +}
1244 +
1245 +int __init dpaa2_ceetm_register(void)
1246 +{
1247 + int err = 0;
1248 +
1249 + pr_debug(KBUILD_MODNAME ": " DPAA2_CEETM_DESCRIPTION "\n");
1250 +
1251 + err = register_qdisc(&dpaa2_ceetm_qdisc_ops);
1252 + if (unlikely(err))
1253 + pr_err(KBUILD_MODNAME
1254 + ": %s:%hu:%s(): register_qdisc() = %d\n",
1255 + KBUILD_BASENAME ".c", __LINE__, __func__, err);
1256 +
1257 + return err;
1258 +}
1259 +
1260 +void __exit dpaa2_ceetm_unregister(void)
1261 +{
1262 + pr_debug(KBUILD_MODNAME ": %s:%s() ->\n",
1263 + KBUILD_BASENAME ".c", __func__);
1264 +
1265 + unregister_qdisc(&dpaa2_ceetm_qdisc_ops);
1266 +}
1267 --- /dev/null
1268 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-ceetm.h
1269 @@ -0,0 +1,183 @@
1270 +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
1271 +/*
1272 + * Copyright 2017 NXP
1273 + *
1274 + */
1275 +
1276 +#ifndef __DPAA2_ETH_CEETM_H
1277 +#define __DPAA2_ETH_CEETM_H
1278 +
1279 +#include <net/pkt_sched.h>
1280 +#include <net/pkt_cls.h>
1281 +#include <net/netlink.h>
1282 +
1283 +#include "dpaa2-eth.h"
1284 +
1285 +/* For functional purposes, there are num_tx_queues pfifo qdiscs through which
1286 + * frames reach the driver. Their handles start from 1:21. Handles 1:1 to 1:20
1287 + * are reserved for the maximum 32 CEETM channels (majors and minors are in
1288 + * hex).
1289 + */
1290 +#define PFIFO_MIN_OFFSET 0x21
1291 +
1292 +#define DPAA2_CEETM_MIN_WEIGHT 100
1293 +#define DPAA2_CEETM_MAX_WEIGHT 24800
1294 +
1295 +#define DPAA2_CEETM_TD_THRESHOLD 1000
1296 +
1297 +enum wbfs_group_type {
1298 + WBFS_GRP_A,
1299 + WBFS_GRP_B,
1300 + WBFS_GRP_LARGE
1301 +};
1302 +
1303 +enum {
1304 + DPAA2_CEETM_TCA_UNSPEC,
1305 + DPAA2_CEETM_TCA_COPT,
1306 + DPAA2_CEETM_TCA_QOPS,
1307 + DPAA2_CEETM_TCA_MAX,
1308 +};
1309 +
1310 +/* CEETM configuration types */
1311 +enum dpaa2_ceetm_type {
1312 + CEETM_ROOT = 1,
1313 + CEETM_PRIO,
1314 +};
1315 +
1316 +enum {
1317 + STRICT_PRIORITY = 0,
1318 + WEIGHTED_A,
1319 + WEIGHTED_B,
1320 +};
1321 +
1322 +struct dpaa2_ceetm_shaping_cfg {
1323 + __u64 cir; /* committed information rate */
1324 + __u64 eir; /* excess information rate */
1325 + __u16 cbs; /* committed burst size */
1326 + __u16 ebs; /* excess burst size */
1327 + __u8 coupled; /* shaper coupling */
1328 +};
1329 +
1330 +extern const struct nla_policy ceetm_policy[DPAA2_CEETM_TCA_MAX];
1331 +
1332 +struct dpaa2_ceetm_class;
1333 +struct dpaa2_ceetm_qdisc_stats;
1334 +struct dpaa2_ceetm_class_stats;
1335 +
1336 +/* corresponds to CEETM shaping at LNI level */
1337 +struct dpaa2_root_q {
1338 + struct Qdisc **qdiscs;
1339 + struct dpaa2_ceetm_qdisc_stats __percpu *qstats;
1340 +};
1341 +
1342 +/* corresponds to the number of priorities a channel serves */
1343 +struct dpaa2_prio_q {
1344 + struct dpaa2_ceetm_class *parent;
1345 + struct dpni_tx_priorities_cfg tx_prio_cfg;
1346 +};
1347 +
1348 +struct dpaa2_ceetm_qdisc {
1349 + struct Qdisc_class_hash clhash;
1350 + struct tcf_proto *filter_list; /* qdisc attached filters */
1351 + struct tcf_block *block;
1352 +
1353 + enum dpaa2_ceetm_type type; /* ROOT/PRIO */
1354 + bool shaped;
1355 + union {
1356 + struct dpaa2_root_q root;
1357 + struct dpaa2_prio_q prio;
1358 + };
1359 +};
1360 +
1361 +/* CEETM Qdisc configuration parameters */
1362 +struct dpaa2_ceetm_tc_qopt {
1363 + enum dpaa2_ceetm_type type;
1364 + __u16 shaped;
1365 + __u8 prio_group_A;
1366 + __u8 prio_group_B;
1367 + __u8 separate_groups;
1368 +};
1369 +
1370 +/* root class - corresponds to a channel */
1371 +struct dpaa2_root_c {
1372 + struct dpaa2_ceetm_shaping_cfg shaping_cfg;
1373 + u32 ch_id;
1374 +};
1375 +
1376 +/* prio class - corresponds to a strict priority queue (group) */
1377 +struct dpaa2_prio_c {
1378 + struct dpaa2_ceetm_class_stats __percpu *cstats;
1379 + u32 qpri;
1380 + u8 mode;
1381 + u16 weight;
1382 +};
1383 +
1384 +struct dpaa2_ceetm_class {
1385 + struct Qdisc_class_common common;
1386 + struct tcf_proto *filter_list; /* class attached filters */
1387 + struct tcf_block *block;
1388 + struct Qdisc *parent;
1389 + struct Qdisc *child;
1390 +
1391 + enum dpaa2_ceetm_type type; /* ROOT/PRIO */
1392 + bool shaped;
1393 + union {
1394 + struct dpaa2_root_c root;
1395 + struct dpaa2_prio_c prio;
1396 + };
1397 +};
1398 +
1399 +/* CEETM Class configuration parameters */
1400 +struct dpaa2_ceetm_tc_copt {
1401 + enum dpaa2_ceetm_type type;
1402 + struct dpaa2_ceetm_shaping_cfg shaping_cfg;
1403 + __u16 shaped;
1404 + __u8 mode;
1405 + __u16 weight;
1406 +};
1407 +
1408 +/* CEETM stats */
1409 +struct dpaa2_ceetm_qdisc_stats {
1410 + __u32 drops;
1411 +};
1412 +
1413 +struct dpaa2_ceetm_class_stats {
1414 + /* Software counters */
1415 + struct gnet_stats_basic_packed bstats;
1416 + __u32 ern_drop_count;
1417 + __u32 congested_count;
1418 +};
1419 +
1420 +struct dpaa2_ceetm_tc_xstats {
1421 + __u64 ceetm_dequeue_bytes;
1422 + __u64 ceetm_dequeue_frames;
1423 + __u64 ceetm_reject_bytes;
1424 + __u64 ceetm_reject_frames;
1425 +};
1426 +
1427 +#ifdef CONFIG_FSL_DPAA2_ETH_CEETM
1428 +int __init dpaa2_ceetm_register(void);
1429 +void __exit dpaa2_ceetm_unregister(void);
1430 +int dpaa2_ceetm_classify(struct sk_buff *skb, struct Qdisc *sch,
1431 + int *qdid, u8 *qpri);
1432 +#else
1433 +static inline int dpaa2_ceetm_register(void)
1434 +{
1435 + return 0;
1436 +}
1437 +
1438 +static inline void dpaa2_ceetm_unregister(void) {}
1439 +
1440 +static inline int dpaa2_ceetm_classify(struct sk_buff *skb, struct Qdisc *sch,
1441 + int *qdid, u8 *qpri)
1442 +{
1443 + return 0;
1444 +}
1445 +#endif
1446 +
1447 +static inline bool dpaa2_eth_ceetm_is_enabled(struct dpaa2_eth_priv *priv)
1448 +{
1449 + return priv->ceetm_en;
1450 +}
1451 +
1452 +#endif
1453 --- /dev/null
1454 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c
1455 @@ -0,0 +1,356 @@
1456 +
1457 +/* Copyright 2015 Freescale Semiconductor Inc.
1458 + *
1459 + * Redistribution and use in source and binary forms, with or without
1460 + * modification, are permitted provided that the following conditions are met:
1461 + * * Redistributions of source code must retain the above copyright
1462 + * notice, this list of conditions and the following disclaimer.
1463 + * * Redistributions in binary form must reproduce the above copyright
1464 + * notice, this list of conditions and the following disclaimer in the
1465 + * documentation and/or other materials provided with the distribution.
1466 + * * Neither the name of Freescale Semiconductor nor the
1467 + * names of its contributors may be used to endorse or promote products
1468 + * derived from this software without specific prior written permission.
1469 + *
1470 + *
1471 + * ALTERNATIVELY, this software may be distributed under the terms of the
1472 + * GNU General Public License ("GPL") as published by the Free Software
1473 + * Foundation, either version 2 of that License or (at your option) any
1474 + * later version.
1475 + *
1476 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
1477 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1478 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1479 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
1480 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1481 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
1482 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
1483 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1484 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1485 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1486 + */
1487 +
1488 +#include <linux/module.h>
1489 +#include <linux/debugfs.h>
1490 +#include "dpaa2-eth.h"
1491 +#include "dpaa2-eth-debugfs.h"
1492 +
1493 +#define DPAA2_ETH_DBG_ROOT "dpaa2-eth"
1494 +
1495 +static struct dentry *dpaa2_dbg_root;
1496 +
1497 +static int dpaa2_dbg_cpu_show(struct seq_file *file, void *offset)
1498 +{
1499 + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
1500 + struct rtnl_link_stats64 *stats;
1501 + struct dpaa2_eth_drv_stats *extras;
1502 + int i;
1503 +
1504 + seq_printf(file, "Per-CPU stats for %s\n", priv->net_dev->name);
1505 + seq_printf(file, "%s%16s%16s%16s%16s%16s%16s%16s%16s%16s\n",
1506 + "CPU", "Rx", "Rx Err", "Rx SG", "Tx", "Tx Err", "Tx conf",
1507 + "Tx SG", "Tx realloc", "Enq busy");
1508 +
1509 + for_each_online_cpu(i) {
1510 + stats = per_cpu_ptr(priv->percpu_stats, i);
1511 + extras = per_cpu_ptr(priv->percpu_extras, i);
1512 + seq_printf(file, "%3d%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu\n",
1513 + i,
1514 + stats->rx_packets,
1515 + stats->rx_errors,
1516 + extras->rx_sg_frames,
1517 + stats->tx_packets,
1518 + stats->tx_errors,
1519 + extras->tx_conf_frames,
1520 + extras->tx_sg_frames,
1521 + extras->tx_reallocs,
1522 + extras->tx_portal_busy);
1523 + }
1524 +
1525 + return 0;
1526 +}
1527 +
1528 +static int dpaa2_dbg_cpu_open(struct inode *inode, struct file *file)
1529 +{
1530 + int err;
1531 + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
1532 +
1533 + err = single_open(file, dpaa2_dbg_cpu_show, priv);
1534 + if (err < 0)
1535 + netdev_err(priv->net_dev, "single_open() failed\n");
1536 +
1537 + return err;
1538 +}
1539 +
1540 +static const struct file_operations dpaa2_dbg_cpu_ops = {
1541 + .open = dpaa2_dbg_cpu_open,
1542 + .read = seq_read,
1543 + .llseek = seq_lseek,
1544 + .release = single_release,
1545 +};
1546 +
1547 +static char *fq_type_to_str(struct dpaa2_eth_fq *fq)
1548 +{
1549 + switch (fq->type) {
1550 + case DPAA2_RX_FQ:
1551 + return "Rx";
1552 + case DPAA2_TX_CONF_FQ:
1553 + return "Tx conf";
1554 + case DPAA2_RX_ERR_FQ:
1555 + return "Rx err";
1556 + default:
1557 + return "N/A";
1558 + }
1559 +}
1560 +
1561 +static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset)
1562 +{
1563 + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
1564 + struct dpaa2_eth_fq *fq;
1565 + u32 fcnt, bcnt;
1566 + int i, err;
1567 +
1568 + seq_printf(file, "non-zero FQ stats for %s:\n", priv->net_dev->name);
1569 + seq_printf(file, "%s%16s%16s%16s%16s%16s\n",
1570 + "VFQID", "CPU", "Traffic Class", "Type", "Frames",
1571 + "Pending frames");
1572 +
1573 + for (i = 0; i < priv->num_fqs; i++) {
1574 + fq = &priv->fq[i];
1575 + err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt);
1576 + if (err)
1577 + fcnt = 0;
1578 +
1579 + /* A lot of queues, no use displaying zero traffic ones */
1580 + if (!fq->stats.frames && !fcnt)
1581 + continue;
1582 +
1583 + seq_printf(file, "%5d%16d%16d%16s%16llu%16u\n",
1584 + fq->fqid,
1585 + fq->target_cpu,
1586 + fq->tc,
1587 + fq_type_to_str(fq),
1588 + fq->stats.frames,
1589 + fcnt);
1590 + }
1591 +
1592 + return 0;
1593 +}
1594 +
1595 +static int dpaa2_dbg_fqs_open(struct inode *inode, struct file *file)
1596 +{
1597 + int err;
1598 + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
1599 +
1600 + err = single_open(file, dpaa2_dbg_fqs_show, priv);
1601 + if (err < 0)
1602 + netdev_err(priv->net_dev, "single_open() failed\n");
1603 +
1604 + return err;
1605 +}
1606 +
1607 +static const struct file_operations dpaa2_dbg_fq_ops = {
1608 + .open = dpaa2_dbg_fqs_open,
1609 + .read = seq_read,
1610 + .llseek = seq_lseek,
1611 + .release = single_release,
1612 +};
1613 +
1614 +static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset)
1615 +{
1616 + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
1617 + struct dpaa2_eth_channel *ch;
1618 + int i;
1619 +
1620 + seq_printf(file, "Channel stats for %s:\n", priv->net_dev->name);
1621 + seq_printf(file, "%s%16s%16s%16s%16s%16s%16s\n",
1622 + "CHID", "CPU", "Deq busy", "Frames", "CDANs",
1623 + "Avg frm/CDAN", "Buf count");
1624 +
1625 + for (i = 0; i < priv->num_channels; i++) {
1626 + ch = priv->channel[i];
1627 + seq_printf(file, "%4d%16d%16llu%16llu%16llu%16llu%16d\n",
1628 + ch->ch_id,
1629 + ch->nctx.desired_cpu,
1630 + ch->stats.dequeue_portal_busy,
1631 + ch->stats.frames,
1632 + ch->stats.cdan,
1633 + ch->stats.frames / ch->stats.cdan,
1634 + ch->buf_count);
1635 + }
1636 +
1637 + return 0;
1638 +}
1639 +
1640 +static int dpaa2_dbg_ch_open(struct inode *inode, struct file *file)
1641 +{
1642 + int err;
1643 + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
1644 +
1645 + err = single_open(file, dpaa2_dbg_ch_show, priv);
1646 + if (err < 0)
1647 + netdev_err(priv->net_dev, "single_open() failed\n");
1648 +
1649 + return err;
1650 +}
1651 +
1652 +static const struct file_operations dpaa2_dbg_ch_ops = {
1653 + .open = dpaa2_dbg_ch_open,
1654 + .read = seq_read,
1655 + .llseek = seq_lseek,
1656 + .release = single_release,
1657 +};
1658 +
1659 +static ssize_t dpaa2_dbg_reset_write(struct file *file, const char __user *buf,
1660 + size_t count, loff_t *offset)
1661 +{
1662 + struct dpaa2_eth_priv *priv = file->private_data;
1663 + struct rtnl_link_stats64 *percpu_stats;
1664 + struct dpaa2_eth_drv_stats *percpu_extras;
1665 + struct dpaa2_eth_fq *fq;
1666 + struct dpaa2_eth_channel *ch;
1667 + int i;
1668 +
1669 + for_each_online_cpu(i) {
1670 + percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
1671 + memset(percpu_stats, 0, sizeof(*percpu_stats));
1672 +
1673 + percpu_extras = per_cpu_ptr(priv->percpu_extras, i);
1674 + memset(percpu_extras, 0, sizeof(*percpu_extras));
1675 + }
1676 +
1677 + for (i = 0; i < priv->num_fqs; i++) {
1678 + fq = &priv->fq[i];
1679 + memset(&fq->stats, 0, sizeof(fq->stats));
1680 + }
1681 +
1682 + for (i = 0; i < priv->num_channels; i++) {
1683 + ch = priv->channel[i];
1684 + memset(&ch->stats, 0, sizeof(ch->stats));
1685 + }
1686 +
1687 + return count;
1688 +}
1689 +
1690 +static const struct file_operations dpaa2_dbg_reset_ops = {
1691 + .open = simple_open,
1692 + .write = dpaa2_dbg_reset_write,
1693 +};
1694 +
1695 +static ssize_t dpaa2_dbg_reset_mc_write(struct file *file,
1696 + const char __user *buf,
1697 + size_t count, loff_t *offset)
1698 +{
1699 + struct dpaa2_eth_priv *priv = file->private_data;
1700 + int err;
1701 +
1702 + err = dpni_reset_statistics(priv->mc_io, 0, priv->mc_token);
1703 + if (err)
1704 + netdev_err(priv->net_dev,
1705 + "dpni_reset_statistics() failed %d\n", err);
1706 +
1707 + return count;
1708 +}
1709 +
1710 +static const struct file_operations dpaa2_dbg_reset_mc_ops = {
1711 + .open = simple_open,
1712 + .write = dpaa2_dbg_reset_mc_write,
1713 +};
1714 +
1715 +void dpaa2_dbg_add(struct dpaa2_eth_priv *priv)
1716 +{
1717 + if (!dpaa2_dbg_root)
1718 + return;
1719 +
1720 + /* Create a directory for the interface */
1721 + priv->dbg.dir = debugfs_create_dir(priv->net_dev->name,
1722 + dpaa2_dbg_root);
1723 + if (!priv->dbg.dir) {
1724 + netdev_err(priv->net_dev, "debugfs_create_dir() failed\n");
1725 + return;
1726 + }
1727 +
1728 + /* per-cpu stats file */
1729 + priv->dbg.cpu_stats = debugfs_create_file("cpu_stats", 0444,
1730 + priv->dbg.dir, priv,
1731 + &dpaa2_dbg_cpu_ops);
1732 + if (!priv->dbg.cpu_stats) {
1733 + netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
1734 + goto err_cpu_stats;
1735 + }
1736 +
1737 + /* per-fq stats file */
1738 + priv->dbg.fq_stats = debugfs_create_file("fq_stats", 0444,
1739 + priv->dbg.dir, priv,
1740 + &dpaa2_dbg_fq_ops);
1741 + if (!priv->dbg.fq_stats) {
1742 + netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
1743 + goto err_fq_stats;
1744 + }
1745 +
1746 + /* per-fq stats file */
1747 + priv->dbg.ch_stats = debugfs_create_file("ch_stats", 0444,
1748 + priv->dbg.dir, priv,
1749 + &dpaa2_dbg_ch_ops);
1750 + if (!priv->dbg.fq_stats) {
1751 + netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
1752 + goto err_ch_stats;
1753 + }
1754 +
1755 + /* reset stats */
1756 + priv->dbg.reset_stats = debugfs_create_file("reset_stats", 0200,
1757 + priv->dbg.dir, priv,
1758 + &dpaa2_dbg_reset_ops);
1759 + if (!priv->dbg.reset_stats) {
1760 + netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
1761 + goto err_reset_stats;
1762 + }
1763 +
1764 + /* reset MC stats */
1765 + priv->dbg.reset_mc_stats = debugfs_create_file("reset_mc_stats",
1766 + 0222, priv->dbg.dir, priv,
1767 + &dpaa2_dbg_reset_mc_ops);
1768 + if (!priv->dbg.reset_mc_stats) {
1769 + netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
1770 + goto err_reset_mc_stats;
1771 + }
1772 +
1773 + return;
1774 +
1775 +err_reset_mc_stats:
1776 + debugfs_remove(priv->dbg.reset_stats);
1777 +err_reset_stats:
1778 + debugfs_remove(priv->dbg.ch_stats);
1779 +err_ch_stats:
1780 + debugfs_remove(priv->dbg.fq_stats);
1781 +err_fq_stats:
1782 + debugfs_remove(priv->dbg.cpu_stats);
1783 +err_cpu_stats:
1784 + debugfs_remove(priv->dbg.dir);
1785 +}
1786 +
1787 +void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv)
1788 +{
1789 + debugfs_remove(priv->dbg.reset_mc_stats);
1790 + debugfs_remove(priv->dbg.reset_stats);
1791 + debugfs_remove(priv->dbg.fq_stats);
1792 + debugfs_remove(priv->dbg.ch_stats);
1793 + debugfs_remove(priv->dbg.cpu_stats);
1794 + debugfs_remove(priv->dbg.dir);
1795 +}
1796 +
1797 +void dpaa2_eth_dbg_init(void)
1798 +{
1799 + dpaa2_dbg_root = debugfs_create_dir(DPAA2_ETH_DBG_ROOT, NULL);
1800 + if (!dpaa2_dbg_root) {
1801 + pr_err("DPAA2-ETH: debugfs create failed\n");
1802 + return;
1803 + }
1804 +
1805 + pr_info("DPAA2-ETH: debugfs created\n");
1806 +}
1807 +
1808 +void __exit dpaa2_eth_dbg_exit(void)
1809 +{
1810 + debugfs_remove(dpaa2_dbg_root);
1811 +}
1812 --- /dev/null
1813 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h
1814 @@ -0,0 +1,60 @@
1815 +/* Copyright 2015 Freescale Semiconductor Inc.
1816 + *
1817 + * Redistribution and use in source and binary forms, with or without
1818 + * modification, are permitted provided that the following conditions are met:
1819 + * * Redistributions of source code must retain the above copyright
1820 + * notice, this list of conditions and the following disclaimer.
1821 + * * Redistributions in binary form must reproduce the above copyright
1822 + * notice, this list of conditions and the following disclaimer in the
1823 + * documentation and/or other materials provided with the distribution.
1824 + * * Neither the name of Freescale Semiconductor nor the
1825 + * names of its contributors may be used to endorse or promote products
1826 + * derived from this software without specific prior written permission.
1827 + *
1828 + *
1829 + * ALTERNATIVELY, this software may be distributed under the terms of the
1830 + * GNU General Public License ("GPL") as published by the Free Software
1831 + * Foundation, either version 2 of that License or (at your option) any
1832 + * later version.
1833 + *
1834 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
1835 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1836 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1837 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
1838 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1839 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
1840 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
1841 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1842 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1843 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1844 + */
1845 +
1846 +#ifndef DPAA2_ETH_DEBUGFS_H
1847 +#define DPAA2_ETH_DEBUGFS_H
1848 +
1849 +#include <linux/dcache.h>
1850 +
1851 +struct dpaa2_eth_priv;
1852 +
1853 +struct dpaa2_debugfs {
1854 + struct dentry *dir;
1855 + struct dentry *fq_stats;
1856 + struct dentry *ch_stats;
1857 + struct dentry *cpu_stats;
1858 + struct dentry *reset_stats;
1859 + struct dentry *reset_mc_stats;
1860 +};
1861 +
1862 +#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS
1863 +void dpaa2_eth_dbg_init(void);
1864 +void dpaa2_eth_dbg_exit(void);
1865 +void dpaa2_dbg_add(struct dpaa2_eth_priv *priv);
1866 +void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv);
1867 +#else
1868 +static inline void dpaa2_eth_dbg_init(void) {}
1869 +static inline void dpaa2_eth_dbg_exit(void) {}
1870 +static inline void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) {}
1871 +static inline void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv) {}
1872 +#endif /* CONFIG_FSL_DPAA2_ETH_DEBUGFS */
1873 +
1874 +#endif /* DPAA2_ETH_DEBUGFS_H */
1875 --- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h
1876 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h
1877 @@ -1,32 +1,5 @@
1878 +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
1879 /* Copyright 2014-2015 Freescale Semiconductor Inc.
1880 - *
1881 - * Redistribution and use in source and binary forms, with or without
1882 - * modification, are permitted provided that the following conditions are met:
1883 - * * Redistributions of source code must retain the above copyright
1884 - * notice, this list of conditions and the following disclaimer.
1885 - * * Redistributions in binary form must reproduce the above copyright
1886 - * notice, this list of conditions and the following disclaimer in the
1887 - * documentation and/or other materials provided with the distribution.
1888 - * * Neither the name of Freescale Semiconductor nor the
1889 - * names of its contributors may be used to endorse or promote products
1890 - * derived from this software without specific prior written permission.
1891 - *
1892 - *
1893 - * ALTERNATIVELY, this software may be distributed under the terms of the
1894 - * GNU General Public License ("GPL") as published by the Free Software
1895 - * Foundation, either version 2 of that License or (at your option) any
1896 - * later version.
1897 - *
1898 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
1899 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1900 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1901 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
1902 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1903 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
1904 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
1905 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1906 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1907 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1908 */
1909
1910 #undef TRACE_SYSTEM
1911 --- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
1912 +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
1913 @@ -1,33 +1,6 @@
1914 +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
1915 /* Copyright 2014-2016 Freescale Semiconductor Inc.
1916 * Copyright 2016-2017 NXP
1917 - *
1918 - * Redistribution and use in source and binary forms, with or without
1919 - * modification, are permitted provided that the following conditions are met:
1920 - * * Redistributions of source code must retain the above copyright
1921 - * notice, this list of conditions and the following disclaimer.
1922 - * * Redistributions in binary form must reproduce the above copyright
1923 - * notice, this list of conditions and the following disclaimer in the
1924 - * documentation and/or other materials provided with the distribution.
1925 - * * Neither the name of Freescale Semiconductor nor the
1926 - * names of its contributors may be used to endorse or promote products
1927 - * derived from this software without specific prior written permission.
1928 - *
1929 - *
1930 - * ALTERNATIVELY, this software may be distributed under the terms of the
1931 - * GNU General Public License ("GPL") as published by the Free Software
1932 - * Foundation, either version 2 of that License or (at your option) any
1933 - * later version.
1934 - *
1935 - * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
1936 - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1937 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
1938 - * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
1939 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1940 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
1941 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
1942 - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1943 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1944 - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1945 */
1946 #include <linux/init.h>
1947 #include <linux/module.h>
1948 @@ -38,9 +11,14 @@
1949 #include <linux/msi.h>
1950 #include <linux/kthread.h>
1951 #include <linux/iommu.h>
1952 -
1953 +#include <linux/net_tstamp.h>
1954 +#include <linux/bpf.h>
1955 +#include <linux/filter.h>
1956 +#include <linux/atomic.h>
1957 +#include <net/sock.h>
1958 #include "../../fsl-mc/include/mc.h"
1959 #include "dpaa2-eth.h"
1960 +#include "dpaa2-eth-ceetm.h"
1961
1962 /* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
1963 * using trace events only need to #include <trace/events/sched.h>
1964 @@ -52,8 +30,6 @@ MODULE_LICENSE("Dual BSD/GPL");
1965 MODULE_AUTHOR("Freescale Semiconductor, Inc");
1966 MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
1967
1968 -const char dpaa2_eth_drv_version[] = "0.1";
1969 -
1970 static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
1971 dma_addr_t iova_addr)
1972 {
1973 @@ -104,26 +80,27 @@ static void free_rx_fd(struct dpaa2_eth_
1974 /* We don't support any other format */
1975 return;
1976
1977 - /* For S/G frames, we first need to free all SG entries */
1978 + /* For S/G frames, we first need to free all SG entries
1979 + * except the first one, which was taken care of already
1980 + */
1981 sgt = vaddr + dpaa2_fd_get_offset(fd);
1982 - for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
1983 + for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
1984 addr = dpaa2_sg_get_addr(&sgt[i]);
1985 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
1986 - dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
1987 - DMA_FROM_DEVICE);
1988 + dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
1989 + DMA_BIDIRECTIONAL);
1990
1991 - skb_free_frag(sg_vaddr);
1992 + free_pages((unsigned long)sg_vaddr, 0);
1993 if (dpaa2_sg_is_final(&sgt[i]))
1994 break;
1995 }
1996
1997 free_buf:
1998 - skb_free_frag(vaddr);
1999 + free_pages((unsigned long)vaddr, 0);
2000 }
2001
2002 /* Build a linear skb based on a single-buffer frame descriptor */
2003 -static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv,
2004 - struct dpaa2_eth_channel *ch,
2005 +static struct sk_buff *build_linear_skb(struct dpaa2_eth_channel *ch,
2006 const struct dpaa2_fd *fd,
2007 void *fd_vaddr)
2008 {
2009 @@ -133,8 +110,7 @@ static struct sk_buff *build_linear_skb(
2010
2011 ch->buf_count--;
2012
2013 - skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_SIZE +
2014 - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
2015 + skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
2016 if (unlikely(!skb))
2017 return NULL;
2018
2019 @@ -169,16 +145,20 @@ static struct sk_buff *build_frag_skb(st
2020 /* Get the address and length from the S/G entry */
2021 sg_addr = dpaa2_sg_get_addr(sge);
2022 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
2023 - dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
2024 - DMA_FROM_DEVICE);
2025 + dma_unmap_page(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
2026 + DMA_BIDIRECTIONAL);
2027
2028 sg_length = dpaa2_sg_get_len(sge);
2029
2030 if (i == 0) {
2031 /* We build the skb around the first data buffer */
2032 - skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_SIZE +
2033 - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
2034 + skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
2035 if (unlikely(!skb)) {
2036 + /* Free the first SG entry now, since we already
2037 + * unmapped it and obtained the virtual address
2038 + */
2039 + free_pages((unsigned long)sg_vaddr, 0);
2040 +
2041 /* We still need to subtract the buffers used
2042 * by this FD from our software counter
2043 */
2044 @@ -213,17 +193,172 @@ static struct sk_buff *build_frag_skb(st
2045 break;
2046 }
2047
2048 + WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT");
2049 +
2050 /* Count all data buffers + SG table buffer */
2051 ch->buf_count -= i + 2;
2052
2053 return skb;
2054 }
2055
2056 +static int dpaa2_eth_xdp_tx(struct dpaa2_eth_priv *priv,
2057 + struct dpaa2_fd *fd,
2058 + void *buf_start,
2059 + u16 queue_id)
2060 +{
2061 + struct dpaa2_eth_fq *fq;
2062 + struct rtnl_link_stats64 *percpu_stats;
2063 + struct dpaa2_eth_drv_stats *percpu_extras;
2064 + struct dpaa2_faead *faead;
2065 + u32 ctrl, frc;
2066 + int i, err;
2067 +
2068 + /* Mark the egress frame annotation area as valid */
2069 + frc = dpaa2_fd_get_frc(fd);
2070 + dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
2071 + dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL);
2072 +
2073 + ctrl = DPAA2_FAEAD_A4V | DPAA2_FAEAD_A2V | DPAA2_FAEAD_EBDDV;
2074 + faead = dpaa2_get_faead(buf_start, false);
2075 + faead->ctrl = cpu_to_le32(ctrl);
2076 + faead->conf_fqid = 0;
2077 +
2078 + percpu_stats = this_cpu_ptr(priv->percpu_stats);
2079 + percpu_extras = this_cpu_ptr(priv->percpu_extras);
2080 +
2081 + fq = &priv->fq[queue_id];
2082 + for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
2083 + err = priv->enqueue(priv, fq, fd, 0);
2084 + if (err != -EBUSY)
2085 + break;
2086 + }
2087 +
2088 + percpu_extras->tx_portal_busy += i;
2089 + if (unlikely(err)) {
2090 + percpu_stats->tx_errors++;
2091 + } else {
2092 + percpu_stats->tx_packets++;
2093 + percpu_stats->tx_bytes += dpaa2_fd_get_len(fd);
2094 + }
2095 +
2096 + return err;
2097 +}
2098 +
2099 +/* Free buffers acquired from the buffer pool or which were meant to
2100 + * be released in the pool
2101 + */
2102 +static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
2103 +{
2104 + struct device *dev = priv->net_dev->dev.parent;
2105 + void *vaddr;
2106 + int i;
2107 +
2108 + for (i = 0; i < count; i++) {
2109 + vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
2110 + dma_unmap_page(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
2111 + DMA_BIDIRECTIONAL);
2112 + free_pages((unsigned long)vaddr, 0);
2113 + }
2114 +}
2115 +
2116 +static void release_fd_buf(struct dpaa2_eth_priv *priv,
2117 + struct dpaa2_eth_channel *ch,
2118 + dma_addr_t addr)
2119 +{
2120 + int err;
2121 +
2122 + ch->rel_buf_array[ch->rel_buf_cnt++] = addr;
2123 + if (likely(ch->rel_buf_cnt < DPAA2_ETH_BUFS_PER_CMD))
2124 + return;
2125 +
2126 + while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid,
2127 + ch->rel_buf_array,
2128 + ch->rel_buf_cnt)) == -EBUSY)
2129 + cpu_relax();
2130 +
2131 + if (err)
2132 + free_bufs(priv, ch->rel_buf_array, ch->rel_buf_cnt);
2133 +
2134 + ch->rel_buf_cnt = 0;
2135 +}
2136 +
2137 +static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
2138 + struct dpaa2_eth_channel *ch,
2139 + struct dpaa2_fd *fd,
2140 + u16 queue_id,
2141 + void *vaddr)
2142 +{
2143 + struct device *dev = priv->net_dev->dev.parent;
2144 + dma_addr_t addr = dpaa2_fd_get_addr(fd);
2145 + struct rtnl_link_stats64 *percpu_stats;
2146 + struct bpf_prog *xdp_prog;
2147 + struct xdp_buff xdp;
2148 + u32 xdp_act = XDP_PASS;
2149 +
2150 + xdp_prog = READ_ONCE(ch->xdp_prog);
2151 + if (!xdp_prog)
2152 + return xdp_act;
2153 +
2154 + percpu_stats = this_cpu_ptr(priv->percpu_stats);
2155 +
2156 + xdp.data = vaddr + dpaa2_fd_get_offset(fd);
2157 + xdp.data_end = xdp.data + dpaa2_fd_get_len(fd);
2158 + /* Allow the XDP program to use the specially reserved headroom */
2159 + xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
2160 +
2161 + rcu_read_lock();
2162 + xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
2163 +
2164 + /* xdp.data pointer may have changed */
2165 + dpaa2_fd_set_offset(fd, xdp.data - vaddr);
2166 + dpaa2_fd_set_len(fd, xdp.data_end - xdp.data);
2167 +
2168 + switch (xdp_act) {
2169 + case XDP_PASS:
2170 + break;
2171 + default:
2172 + bpf_warn_invalid_xdp_action(xdp_act);
2173 + case XDP_ABORTED:
2174 + case XDP_DROP:
2175 + /* This is our buffer, so we can release it back to hardware */
2176 + release_fd_buf(priv, ch, addr);
2177 + percpu_stats->rx_dropped++;
2178 + break;
2179 + case XDP_TX:
2180 + if (dpaa2_eth_xdp_tx(priv, fd, vaddr, queue_id)) {
2181 + dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
2182 + DMA_BIDIRECTIONAL);
2183 + free_rx_fd(priv, fd, vaddr);
2184 + ch->buf_count--;
2185 + }
2186 + break;
2187 + case XDP_REDIRECT:
2188 + dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
2189 + DMA_BIDIRECTIONAL);
2190 + ch->buf_count--;
2191 + ch->flush = true;
2192 + /* Mark the actual start of the data buffer */
2193 + xdp.data_hard_start = vaddr;
2194 + if (xdp_do_redirect(priv->net_dev, &xdp, xdp_prog))
2195 + free_rx_fd(priv, fd, vaddr);
2196 + break;
2197 + }
2198 +
2199 + if (xdp_act == XDP_TX || xdp_act == XDP_REDIRECT) {
2200 + percpu_stats->rx_packets++;
2201 + percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
2202 + }
2203 +
2204 + rcu_read_unlock();
2205 +
2206 + return xdp_act;
2207 +}
2208 +
2209 /* Main Rx frame processing routine */
2210 static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
2211 struct dpaa2_eth_channel *ch,
2212 const struct dpaa2_fd *fd,
2213 - struct napi_struct *napi)
2214 + struct dpaa2_eth_fq *fq)
2215 {
2216 dma_addr_t addr = dpaa2_fd_get_addr(fd);
2217 u8 fd_format = dpaa2_fd_get_format(fd);
2218 @@ -235,14 +370,16 @@ static void dpaa2_eth_rx(struct dpaa2_et
2219 struct dpaa2_fas *fas;
2220 void *buf_data;
2221 u32 status = 0;
2222 + u32 xdp_act;
2223
2224 /* Tracing point */
2225 trace_dpaa2_rx_fd(priv->net_dev, fd);
2226
2227 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
2228 - dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE);
2229 + dma_sync_single_for_cpu(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
2230 + DMA_BIDIRECTIONAL);
2231
2232 - fas = dpaa2_get_fas(vaddr);
2233 + fas = dpaa2_get_fas(vaddr, false);
2234 prefetch(fas);
2235 buf_data = vaddr + dpaa2_fd_get_offset(fd);
2236 prefetch(buf_data);
2237 @@ -251,22 +388,43 @@ static void dpaa2_eth_rx(struct dpaa2_et
2238 percpu_extras = this_cpu_ptr(priv->percpu_extras);
2239
2240 if (fd_format == dpaa2_fd_single) {
2241 - skb = build_linear_skb(priv, ch, fd, vaddr);
2242 + xdp_act = dpaa2_eth_run_xdp(priv, ch, (struct dpaa2_fd *)fd,
2243 + fq->flowid, vaddr);
2244 + if (xdp_act != XDP_PASS)
2245 + return;
2246 +
2247 + dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
2248 + DMA_BIDIRECTIONAL);
2249 + skb = build_linear_skb(ch, fd, vaddr);
2250 } else if (fd_format == dpaa2_fd_sg) {
2251 + dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
2252 + DMA_BIDIRECTIONAL);
2253 skb = build_frag_skb(priv, ch, buf_data);
2254 - skb_free_frag(vaddr);
2255 + free_pages((unsigned long)vaddr, 0);
2256 percpu_extras->rx_sg_frames++;
2257 percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
2258 } else {
2259 /* We don't support any other format */
2260 - goto err_frame_format;
2261 + goto drop_cnt;
2262 }
2263
2264 if (unlikely(!skb))
2265 - goto err_build_skb;
2266 + goto drop_fd;
2267
2268 prefetch(skb->data);
2269
2270 + /* Get the timestamp value */
2271 + if (priv->ts_rx_en) {
2272 + struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2273 + __le64 *ts = dpaa2_get_ts(vaddr, false);
2274 + u64 ns;
2275 +
2276 + memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2277 +
2278 + ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ts);
2279 + shhwtstamps->hwtstamp = ns_to_ktime(ns);
2280 + }
2281 +
2282 /* Check if we need to validate the L4 csum */
2283 if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
2284 status = le32_to_cpu(fas->status);
2285 @@ -274,30 +432,80 @@ static void dpaa2_eth_rx(struct dpaa2_et
2286 }
2287
2288 skb->protocol = eth_type_trans(skb, priv->net_dev);
2289 + skb_record_rx_queue(skb, fq->flowid);
2290
2291 percpu_stats->rx_packets++;
2292 percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
2293
2294 - napi_gro_receive(napi, skb);
2295 + napi_gro_receive(&ch->napi, skb);
2296
2297 return;
2298
2299 -err_build_skb:
2300 +drop_fd:
2301 free_rx_fd(priv, fd, vaddr);
2302 -err_frame_format:
2303 +drop_cnt:
2304 percpu_stats->rx_dropped++;
2305 }
2306
2307 +#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE
2308 +/* Processing of Rx frames received on the error FQ
2309 + * We check and print the error bits and then free the frame
2310 + */
2311 +static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv,
2312 + struct dpaa2_eth_channel *ch,
2313 + const struct dpaa2_fd *fd,
2314 + struct napi_struct *napi __always_unused,
2315 + u16 queue_id __always_unused)
2316 +{
2317 + struct device *dev = priv->net_dev->dev.parent;
2318 + dma_addr_t addr = dpaa2_fd_get_addr(fd);
2319 + void *vaddr;
2320 + struct rtnl_link_stats64 *percpu_stats;
2321 + struct dpaa2_fas *fas;
2322 + u32 status = 0;
2323 + u32 fd_errors;
2324 + bool has_fas_errors = false;
2325 +
2326 + vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
2327 + dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_BIDIRECTIONAL);
2328 +
2329 + /* check frame errors in the FD field */
2330 + fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_RX_ERR_MASK;
2331 + if (likely(fd_errors)) {
2332 + has_fas_errors = (fd_errors & FD_CTRL_FAERR) &&
2333 + !!(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV);
2334 + if (net_ratelimit())
2335 + netdev_dbg(priv->net_dev, "RX frame FD err: %08x\n",
2336 + fd_errors);
2337 + }
2338 +
2339 + /* check frame errors in the FAS field */
2340 + if (has_fas_errors) {
2341 + fas = dpaa2_get_fas(vaddr, false);
2342 + status = le32_to_cpu(fas->status);
2343 + if (net_ratelimit())
2344 + netdev_dbg(priv->net_dev, "Rx frame FAS err: 0x%08x\n",
2345 + status & DPAA2_FAS_RX_ERR_MASK);
2346 + }
2347 + free_rx_fd(priv, fd, vaddr);
2348 +
2349 + percpu_stats = this_cpu_ptr(priv->percpu_stats);
2350 + percpu_stats->rx_errors++;
2351 + ch->buf_count--;
2352 +}
2353 +#endif
2354 +
2355 /* Consume all frames pull-dequeued into the store. This is the simplest way to
2356 * make sure we don't accidentally issue another volatile dequeue which would
2357 * overwrite (leak) frames already in the store.
2358 *
2359 * Observance of NAPI budget is not our concern, leaving that to the caller.
2360 */
2361 -static int consume_frames(struct dpaa2_eth_channel *ch)
2362 +static int consume_frames(struct dpaa2_eth_channel *ch,
2363 + struct dpaa2_eth_fq **src)
2364 {
2365 struct dpaa2_eth_priv *priv = ch->priv;
2366 - struct dpaa2_eth_fq *fq;
2367 + struct dpaa2_eth_fq *fq = NULL;
2368 struct dpaa2_dq *dq;
2369 const struct dpaa2_fd *fd;
2370 int cleaned = 0;
2371 @@ -315,16 +523,51 @@ static int consume_frames(struct dpaa2_e
2372 }
2373
2374 fd = dpaa2_dq_fd(dq);
2375 + prefetch(fd);
2376 +
2377 fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
2378 - fq->stats.frames++;
2379
2380 - fq->consume(priv, ch, fd, &ch->napi);
2381 + fq->consume(priv, ch, fd, fq);
2382 cleaned++;
2383 } while (!is_last);
2384
2385 + if (!cleaned)
2386 + return 0;
2387 +
2388 + fq->stats.frames += cleaned;
2389 + ch->stats.frames += cleaned;
2390 +
2391 + /* A dequeue operation only pulls frames from a single queue
2392 + * into the store. Return the frame queue as an out param.
2393 + */
2394 + if (src)
2395 + *src = fq;
2396 +
2397 return cleaned;
2398 }
2399
2400 +/* Configure the egress frame annotation for timestamp update */
2401 +static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start)
2402 +{
2403 + struct dpaa2_faead *faead;
2404 + u32 ctrl, frc;
2405 +
2406 + /* Mark the egress frame annotation area as valid */
2407 + frc = dpaa2_fd_get_frc(fd);
2408 + dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
2409 +
2410 + /* Set hardware annotation size */
2411 + ctrl = dpaa2_fd_get_ctrl(fd);
2412 + dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL);
2413 +
2414 + /* enable UPD (update prepanded data) bit in FAEAD field of
2415 + * hardware frame annotation area
2416 + */
2417 + ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
2418 + faead = dpaa2_get_faead(buf_start, true);
2419 + faead->ctrl = cpu_to_le32(ctrl);
2420 +}
2421 +
2422 /* Create a frame descriptor based on a fragmented skb */
2423 static int build_sg_fd(struct dpaa2_eth_priv *priv,
2424 struct sk_buff *skb,
2425 @@ -341,7 +584,6 @@ static int build_sg_fd(struct dpaa2_eth_
2426 int num_sg;
2427 int num_dma_bufs;
2428 struct dpaa2_eth_swa *swa;
2429 - struct dpaa2_fas *fas;
2430
2431 /* Create and map scatterlist.
2432 * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
2433 @@ -365,21 +607,14 @@ static int build_sg_fd(struct dpaa2_eth_
2434
2435 /* Prepare the HW SGT structure */
2436 sgt_buf_size = priv->tx_data_offset +
2437 - sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
2438 - sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN, GFP_ATOMIC);
2439 + sizeof(struct dpaa2_sg_entry) * num_dma_bufs;
2440 + sgt_buf = netdev_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN);
2441 if (unlikely(!sgt_buf)) {
2442 err = -ENOMEM;
2443 goto sgt_buf_alloc_failed;
2444 }
2445 sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
2446 -
2447 - /* PTA from egress side is passed as is to the confirmation side so
2448 - * we need to clear some fields here in order to find consistent values
2449 - * on TX confirmation. We are clearing FAS (Frame Annotation Status)
2450 - * field from the hardware annotation area
2451 - */
2452 - fas = dpaa2_get_fas(sgt_buf);
2453 - memset(fas, 0, DPAA2_FAS_SIZE);
2454 + memset(sgt_buf, 0, sgt_buf_size);
2455
2456 sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
2457
2458 @@ -402,10 +637,11 @@ static int build_sg_fd(struct dpaa2_eth_
2459 * all of them on Tx Conf.
2460 */
2461 swa = (struct dpaa2_eth_swa *)sgt_buf;
2462 - swa->skb = skb;
2463 - swa->scl = scl;
2464 - swa->num_sg = num_sg;
2465 - swa->num_dma_bufs = num_dma_bufs;
2466 + swa->type = DPAA2_ETH_SWA_SG;
2467 + swa->sg.skb = skb;
2468 + swa->sg.scl = scl;
2469 + swa->sg.num_sg = num_sg;
2470 + swa->sg.sgt_size = sgt_buf_size;
2471
2472 /* Separately map the SGT buffer */
2473 addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
2474 @@ -417,13 +653,15 @@ static int build_sg_fd(struct dpaa2_eth_
2475 dpaa2_fd_set_format(fd, dpaa2_fd_sg);
2476 dpaa2_fd_set_addr(fd, addr);
2477 dpaa2_fd_set_len(fd, skb->len);
2478 - dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA |
2479 - DPAA2_FD_CTRL_PTV1);
2480 + dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
2481 +
2482 + if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
2483 + enable_tx_tstamp(fd, sgt_buf);
2484
2485 return 0;
2486
2487 dma_map_single_failed:
2488 - kfree(sgt_buf);
2489 + skb_free_frag(sgt_buf);
2490 sgt_buf_alloc_failed:
2491 dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
2492 dma_map_sg_failed:
2493 @@ -437,29 +675,27 @@ static int build_single_fd(struct dpaa2_
2494 struct dpaa2_fd *fd)
2495 {
2496 struct device *dev = priv->net_dev->dev.parent;
2497 - u8 *buffer_start;
2498 - struct dpaa2_fas *fas;
2499 - struct sk_buff **skbh;
2500 + u8 *buffer_start, *aligned_start;
2501 + struct dpaa2_eth_swa *swa;
2502 dma_addr_t addr;
2503
2504 - buffer_start = PTR_ALIGN(skb->data - priv->tx_data_offset -
2505 - DPAA2_ETH_TX_BUF_ALIGN,
2506 - DPAA2_ETH_TX_BUF_ALIGN);
2507 -
2508 - /* PTA from egress side is passed as is to the confirmation side so
2509 - * we need to clear some fields here in order to find consistent values
2510 - * on TX confirmation. We are clearing FAS (Frame Annotation Status)
2511 - * field from the hardware annotation area
2512 + buffer_start = skb->data - dpaa2_eth_needed_headroom(priv, skb);
2513 +
2514 + /* If there's enough room to align the FD address, do it.
2515 + * It will help hardware optimize accesses.
2516 */
2517 - fas = dpaa2_get_fas(buffer_start);
2518 - memset(fas, 0, DPAA2_FAS_SIZE);
2519 + aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
2520 + DPAA2_ETH_TX_BUF_ALIGN);
2521 + if (aligned_start >= skb->head)
2522 + buffer_start = aligned_start;
2523
2524 /* Store a backpointer to the skb at the beginning of the buffer
2525 * (in the private data area) such that we can release it
2526 * on Tx confirm
2527 */
2528 - skbh = (struct sk_buff **)buffer_start;
2529 - *skbh = skb;
2530 + swa = (struct dpaa2_eth_swa *)buffer_start;
2531 + swa->type = DPAA2_ETH_SWA_SINGLE;
2532 + swa->single.skb = skb;
2533
2534 addr = dma_map_single(dev, buffer_start,
2535 skb_tail_pointer(skb) - buffer_start,
2536 @@ -471,8 +707,10 @@ static int build_single_fd(struct dpaa2_
2537 dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
2538 dpaa2_fd_set_len(fd, skb->len);
2539 dpaa2_fd_set_format(fd, dpaa2_fd_single);
2540 - dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA |
2541 - DPAA2_FD_CTRL_PTV1);
2542 + dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
2543 +
2544 + if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
2545 + enable_tx_tstamp(fd, buffer_start);
2546
2547 return 0;
2548 }
2549 @@ -483,72 +721,75 @@ static int build_single_fd(struct dpaa2_
2550 * back-pointed to is also freed.
2551 * This can be called either from dpaa2_eth_tx_conf() or on the error path of
2552 * dpaa2_eth_tx().
2553 - * Optionally, return the frame annotation status word (FAS), which needs
2554 - * to be checked if we're on the confirmation path.
2555 */
2556 static void free_tx_fd(const struct dpaa2_eth_priv *priv,
2557 - const struct dpaa2_fd *fd,
2558 - u32 *status)
2559 + const struct dpaa2_fd *fd, bool in_napi)
2560 {
2561 struct device *dev = priv->net_dev->dev.parent;
2562 dma_addr_t fd_addr;
2563 - struct sk_buff **skbh, *skb;
2564 + struct sk_buff *skb = NULL;
2565 unsigned char *buffer_start;
2566 - int unmap_size;
2567 - struct scatterlist *scl;
2568 - int num_sg, num_dma_bufs;
2569 struct dpaa2_eth_swa *swa;
2570 u8 fd_format = dpaa2_fd_get_format(fd);
2571 - struct dpaa2_fas *fas;
2572
2573 fd_addr = dpaa2_fd_get_addr(fd);
2574 - skbh = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
2575 - fas = dpaa2_get_fas(skbh);
2576 + buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
2577 + swa = (struct dpaa2_eth_swa *)buffer_start;
2578
2579 if (fd_format == dpaa2_fd_single) {
2580 - skb = *skbh;
2581 - buffer_start = (unsigned char *)skbh;
2582 - /* Accessing the skb buffer is safe before dma unmap, because
2583 - * we didn't map the actual skb shell.
2584 - */
2585 - dma_unmap_single(dev, fd_addr,
2586 - skb_tail_pointer(skb) - buffer_start,
2587 - DMA_BIDIRECTIONAL);
2588 + if (swa->type == DPAA2_ETH_SWA_SINGLE) {
2589 + skb = swa->single.skb;
2590 + /* Accessing the skb buffer is safe before dma unmap,
2591 + * because we didn't map the actual skb shell.
2592 + */
2593 + dma_unmap_single(dev, fd_addr,
2594 + skb_tail_pointer(skb) - buffer_start,
2595 + DMA_BIDIRECTIONAL);
2596 + } else {
2597 + WARN_ONCE(swa->type != DPAA2_ETH_SWA_XDP,
2598 + "Wrong SWA type");
2599 + dma_unmap_single(dev, fd_addr, swa->xdp.dma_size,
2600 + DMA_BIDIRECTIONAL);
2601 + }
2602 } else if (fd_format == dpaa2_fd_sg) {
2603 - swa = (struct dpaa2_eth_swa *)skbh;
2604 - skb = swa->skb;
2605 - scl = swa->scl;
2606 - num_sg = swa->num_sg;
2607 - num_dma_bufs = swa->num_dma_bufs;
2608 + skb = swa->sg.skb;
2609
2610 /* Unmap the scatterlist */
2611 - dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
2612 - kfree(scl);
2613 + dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg, DMA_BIDIRECTIONAL);
2614 + kfree(swa->sg.scl);
2615
2616 /* Unmap the SGT buffer */
2617 - unmap_size = priv->tx_data_offset +
2618 - sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
2619 - dma_unmap_single(dev, fd_addr, unmap_size, DMA_BIDIRECTIONAL);
2620 + dma_unmap_single(dev, fd_addr, swa->sg.sgt_size,
2621 + DMA_BIDIRECTIONAL);
2622 } else {
2623 - /* Unsupported format, mark it as errored and give up */
2624 - if (status)
2625 - *status = ~0;
2626 + netdev_dbg(priv->net_dev, "Invalid FD format\n");
2627 return;
2628 }
2629
2630 - /* Read the status from the Frame Annotation after we unmap the first
2631 - * buffer but before we free it. The caller function is responsible
2632 - * for checking the status value.
2633 - */
2634 - if (status)
2635 - *status = le32_to_cpu(fas->status);
2636 + if (swa->type == DPAA2_ETH_SWA_XDP) {
2637 + page_frag_free(buffer_start);
2638 + return;
2639 + }
2640 +
2641 + /* Get the timestamp value */
2642 + if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
2643 + struct skb_shared_hwtstamps shhwtstamps;
2644 + __le64 *ts = dpaa2_get_ts(buffer_start, true);
2645 + u64 ns;
2646 +
2647 + memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2648 +
2649 + ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ts);
2650 + shhwtstamps.hwtstamp = ns_to_ktime(ns);
2651 + skb_tstamp_tx(skb, &shhwtstamps);
2652 + }
2653
2654 - /* Free SGT buffer kmalloc'ed on tx */
2655 + /* Free SGT buffer allocated on tx */
2656 if (fd_format != dpaa2_fd_single)
2657 - kfree(skbh);
2658 + skb_free_frag(buffer_start);
2659
2660 /* Move on with skb release */
2661 - dev_kfree_skb(skb);
2662 + napi_consume_skb(skb, in_napi);
2663 }
2664
2665 static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
2666 @@ -558,20 +799,41 @@ static netdev_tx_t dpaa2_eth_tx(struct s
2667 struct rtnl_link_stats64 *percpu_stats;
2668 struct dpaa2_eth_drv_stats *percpu_extras;
2669 struct dpaa2_eth_fq *fq;
2670 + struct netdev_queue *nq;
2671 u16 queue_mapping;
2672 - int err, i;
2673 + unsigned int needed_headroom;
2674 + u32 fd_len;
2675 + u8 prio;
2676 + int err, i, ch_id = 0;
2677 +
2678 + queue_mapping = skb_get_queue_mapping(skb);
2679 + prio = netdev_txq_to_tc(net_dev, queue_mapping);
2680 + /* Hardware interprets priority level 0 as being the highest,
2681 + * so we need to do a reverse mapping to the netdev tc index
2682 + */
2683 + if (net_dev->num_tc)
2684 + prio = net_dev->num_tc - prio - 1;
2685 +
2686 + queue_mapping %= dpaa2_eth_queue_count(priv);
2687 + fq = &priv->fq[queue_mapping];
2688
2689 percpu_stats = this_cpu_ptr(priv->percpu_stats);
2690 percpu_extras = this_cpu_ptr(priv->percpu_extras);
2691
2692 - if (unlikely(skb_headroom(skb) < DPAA2_ETH_NEEDED_HEADROOM(priv))) {
2693 + needed_headroom = dpaa2_eth_needed_headroom(priv, skb);
2694 + if (skb_headroom(skb) < needed_headroom) {
2695 struct sk_buff *ns;
2696
2697 - ns = skb_realloc_headroom(skb, DPAA2_ETH_NEEDED_HEADROOM(priv));
2698 + ns = skb_realloc_headroom(skb, needed_headroom);
2699 if (unlikely(!ns)) {
2700 percpu_stats->tx_dropped++;
2701 goto err_alloc_headroom;
2702 }
2703 + percpu_extras->tx_reallocs++;
2704 +
2705 + if (skb->sk)
2706 + skb_set_owner_w(ns, skb->sk);
2707 +
2708 dev_kfree_skb(skb);
2709 skb = ns;
2710 }
2711 @@ -602,17 +864,24 @@ static netdev_tx_t dpaa2_eth_tx(struct s
2712 goto err_build_fd;
2713 }
2714
2715 + if (dpaa2_eth_ceetm_is_enabled(priv)) {
2716 + err = dpaa2_ceetm_classify(skb, net_dev->qdisc, &ch_id, &prio);
2717 + if (err)
2718 + goto err_ceetm_classify;
2719 + }
2720 +
2721 /* Tracing point */
2722 trace_dpaa2_tx_fd(net_dev, &fd);
2723
2724 - /* TxConf FQ selection primarily based on cpu affinity; this is
2725 - * non-migratable context, so it's safe to call smp_processor_id().
2726 + fd_len = dpaa2_fd_get_len(&fd);
2727 + nq = netdev_get_tx_queue(net_dev, queue_mapping);
2728 + netdev_tx_sent_queue(nq, fd_len);
2729 +
2730 + /* Everything that happens after this enqueues might race with
2731 + * the Tx confirmation callback for this frame
2732 */
2733 - queue_mapping = smp_processor_id() % dpaa2_eth_queue_count(priv);
2734 - fq = &priv->fq[queue_mapping];
2735 for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
2736 - err = dpaa2_io_service_enqueue_qd(NULL, priv->tx_qdid, 0,
2737 - fq->tx_qdbin, &fd);
2738 + err = priv->enqueue(priv, fq, &fd, 0);
2739 if (err != -EBUSY)
2740 break;
2741 }
2742 @@ -620,14 +889,17 @@ static netdev_tx_t dpaa2_eth_tx(struct s
2743 if (unlikely(err < 0)) {
2744 percpu_stats->tx_errors++;
2745 /* Clean up everything, including freeing the skb */
2746 - free_tx_fd(priv, &fd, NULL);
2747 + free_tx_fd(priv, &fd, false);
2748 + netdev_tx_completed_queue(nq, 1, fd_len);
2749 } else {
2750 percpu_stats->tx_packets++;
2751 - percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
2752 + percpu_stats->tx_bytes += fd_len;
2753 }
2754
2755 return NETDEV_TX_OK;
2756
2757 +err_ceetm_classify:
2758 + free_tx_fd(priv, &fd, false);
2759 err_build_fd:
2760 err_alloc_headroom:
2761 dev_kfree_skb(skb);
2762 @@ -637,48 +909,39 @@ err_alloc_headroom:
2763
2764 /* Tx confirmation frame processing routine */
2765 static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
2766 - struct dpaa2_eth_channel *ch,
2767 + struct dpaa2_eth_channel *ch __always_unused,
2768 const struct dpaa2_fd *fd,
2769 - struct napi_struct *napi __always_unused)
2770 + struct dpaa2_eth_fq *fq)
2771 {
2772 struct rtnl_link_stats64 *percpu_stats;
2773 struct dpaa2_eth_drv_stats *percpu_extras;
2774 - u32 status = 0;
2775 + u32 fd_len = dpaa2_fd_get_len(fd);
2776 u32 fd_errors;
2777 - bool has_fas_errors = false;
2778
2779 /* Tracing point */
2780 trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
2781
2782 percpu_extras = this_cpu_ptr(priv->percpu_extras);
2783 percpu_extras->tx_conf_frames++;
2784 - percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd);
2785 + percpu_extras->tx_conf_bytes += fd_len;
2786 +
2787 + fq->dq_frames++;
2788 + fq->dq_bytes += fd_len;
2789
2790 /* Check frame errors in the FD field */
2791 fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
2792 - if (unlikely(fd_errors)) {
2793 - /* We only check error bits in the FAS field if corresponding
2794 - * FAERR bit is set in FD and the FAS field is marked as valid
2795 - */
2796 - has_fas_errors = (fd_errors & DPAA2_FD_CTRL_FAERR) &&
2797 - !!(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV);
2798 - if (net_ratelimit())
2799 - netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n",
2800 - fd_errors);
2801 - }
2802 -
2803 - free_tx_fd(priv, fd, has_fas_errors ? &status : NULL);
2804 + free_tx_fd(priv, fd, true);
2805
2806 if (likely(!fd_errors))
2807 return;
2808
2809 + if (net_ratelimit())
2810 + netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n",
2811 + fd_errors);
2812 +
2813 percpu_stats = this_cpu_ptr(priv->percpu_stats);
2814 /* Tx-conf logically pertains to the egress path. */
2815 percpu_stats->tx_errors++;
2816 -
2817 - if (has_fas_errors && net_ratelimit())
2818 - netdev_dbg(priv->net_dev, "TX frame FAS error: 0x%08x\n",
2819 - status & DPAA2_FAS_TX_ERR_MASK);
2820 }
2821
2822 static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
2823 @@ -728,26 +991,29 @@ static int set_tx_csum(struct dpaa2_eth_
2824 /* Perform a single release command to add buffers
2825 * to the specified buffer pool
2826 */
2827 -static int add_bufs(struct dpaa2_eth_priv *priv, u16 bpid)
2828 +static int add_bufs(struct dpaa2_eth_priv *priv,
2829 + struct dpaa2_eth_channel *ch, u16 bpid)
2830 {
2831 struct device *dev = priv->net_dev->dev.parent;
2832 u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
2833 - void *buf;
2834 + struct page *page;
2835 dma_addr_t addr;
2836 - int i;
2837 + int i, err;
2838
2839 for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
2840 /* Allocate buffer visible to WRIOP + skb shared info +
2841 * alignment padding
2842 */
2843 - buf = napi_alloc_frag(DPAA2_ETH_BUF_RAW_SIZE);
2844 - if (unlikely(!buf))
2845 + /* allocate one page for each Rx buffer. WRIOP sees
2846 + * the entire page except for a tailroom reserved for
2847 + * skb shared info
2848 + */
2849 + page = dev_alloc_pages(0);
2850 + if (!page)
2851 goto err_alloc;
2852
2853 - buf = PTR_ALIGN(buf, DPAA2_ETH_RX_BUF_ALIGN);
2854 -
2855 - addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE,
2856 - DMA_FROM_DEVICE);
2857 + addr = dma_map_page(dev, page, 0, DPAA2_ETH_RX_BUF_SIZE,
2858 + DMA_BIDIRECTIONAL);
2859 if (unlikely(dma_mapping_error(dev, addr)))
2860 goto err_map;
2861
2862 @@ -755,28 +1021,33 @@ static int add_bufs(struct dpaa2_eth_pri
2863
2864 /* tracing point */
2865 trace_dpaa2_eth_buf_seed(priv->net_dev,
2866 - buf, DPAA2_ETH_BUF_RAW_SIZE,
2867 + page, DPAA2_ETH_RX_BUF_RAW_SIZE,
2868 addr, DPAA2_ETH_RX_BUF_SIZE,
2869 bpid);
2870 }
2871
2872 release_bufs:
2873 - /* In case the portal is busy, retry until successful.
2874 - * The buffer release function would only fail if the QBMan portal
2875 - * was busy, which implies portal contention (i.e. more CPUs than
2876 - * portals, i.e. GPPs w/o affine DPIOs). For all practical purposes,
2877 - * there is little we can realistically do, short of giving up -
2878 - * in which case we'd risk depleting the buffer pool and never again
2879 - * receiving the Rx interrupt which would kick-start the refill logic.
2880 - * So just keep retrying, at the risk of being moved to ksoftirqd.
2881 - */
2882 - while (dpaa2_io_service_release(NULL, bpid, buf_array, i))
2883 + /* In case the portal is busy, retry until successful */
2884 + while ((err = dpaa2_io_service_release(ch->dpio, bpid,
2885 + buf_array, i)) == -EBUSY)
2886 cpu_relax();
2887 +
2888 + /* If release command failed, clean up and bail out;
2889 + * not much else we can do about it
2890 + */
2891 + if (err) {
2892 + free_bufs(priv, buf_array, i);
2893 + return 0;
2894 + }
2895 +
2896 return i;
2897
2898 err_map:
2899 - skb_free_frag(buf);
2900 + __free_pages(page, 0);
2901 err_alloc:
2902 + /* If we managed to allocate at least some buffers,
2903 + * release them to hardware
2904 + */
2905 if (i)
2906 goto release_bufs;
2907
2908 @@ -796,9 +1067,10 @@ static int seed_pool(struct dpaa2_eth_pr
2909 */
2910 preempt_disable();
2911 for (j = 0; j < priv->num_channels; j++) {
2912 - for (i = 0; i < DPAA2_ETH_NUM_BUFS;
2913 + priv->channel[j]->buf_count = 0;
2914 + for (i = 0; i < priv->max_bufs_per_ch;
2915 i += DPAA2_ETH_BUFS_PER_CMD) {
2916 - new_count = add_bufs(priv, bpid);
2917 + new_count = add_bufs(priv, priv->channel[j], bpid);
2918 priv->channel[j]->buf_count += new_count;
2919
2920 if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
2921 @@ -818,10 +1090,8 @@ static int seed_pool(struct dpaa2_eth_pr
2922 */
2923 static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
2924 {
2925 - struct device *dev = priv->net_dev->dev.parent;
2926 u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
2927 - void *vaddr;
2928 - int ret, i;
2929 + int ret;
2930
2931 do {
2932 ret = dpaa2_io_service_acquire(NULL, priv->bpid,
2933 @@ -830,27 +1100,16 @@ static void drain_bufs(struct dpaa2_eth_
2934 netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
2935 return;
2936 }
2937 - for (i = 0; i < ret; i++) {
2938 - /* Same logic as on regular Rx path */
2939 - vaddr = dpaa2_iova_to_virt(priv->iommu_domain,
2940 - buf_array[i]);
2941 - dma_unmap_single(dev, buf_array[i],
2942 - DPAA2_ETH_RX_BUF_SIZE,
2943 - DMA_FROM_DEVICE);
2944 - skb_free_frag(vaddr);
2945 - }
2946 + free_bufs(priv, buf_array, ret);
2947 } while (ret);
2948 }
2949
2950 static void drain_pool(struct dpaa2_eth_priv *priv)
2951 {
2952 - int i;
2953 -
2954 + preempt_disable();
2955 drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
2956 drain_bufs(priv, 1);
2957 -
2958 - for (i = 0; i < priv->num_channels; i++)
2959 - priv->channel[i]->buf_count = 0;
2960 + preempt_enable();
2961 }
2962
2963 /* Function is called from softirq context only, so we don't need to guard
2964 @@ -862,19 +1121,19 @@ static int refill_pool(struct dpaa2_eth_
2965 {
2966 int new_count;
2967
2968 - if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH))
2969 + if (likely(ch->buf_count >= priv->refill_thresh))
2970 return 0;
2971
2972 do {
2973 - new_count = add_bufs(priv, bpid);
2974 + new_count = add_bufs(priv, ch, bpid);
2975 if (unlikely(!new_count)) {
2976 /* Out of memory; abort for now, we'll try later on */
2977 break;
2978 }
2979 ch->buf_count += new_count;
2980 - } while (ch->buf_count < DPAA2_ETH_NUM_BUFS);
2981 + } while (ch->buf_count < priv->max_bufs_per_ch);
2982
2983 - if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS))
2984 + if (unlikely(ch->buf_count < priv->max_bufs_per_ch))
2985 return -ENOMEM;
2986
2987 return 0;
2988 @@ -887,7 +1146,8 @@ static int pull_channel(struct dpaa2_eth
2989
2990 /* Retry while portal is busy */
2991 do {
2992 - err = dpaa2_io_service_pull_channel(NULL, ch->ch_id, ch->store);
2993 + err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id,
2994 + ch->store);
2995 dequeues++;
2996 cpu_relax();
2997 } while (err == -EBUSY);
2998 @@ -908,14 +1168,17 @@ static int pull_channel(struct dpaa2_eth
2999 static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
3000 {
3001 struct dpaa2_eth_channel *ch;
3002 - int cleaned = 0, store_cleaned;
3003 struct dpaa2_eth_priv *priv;
3004 + int rx_cleaned = 0, txconf_cleaned = 0;
3005 + struct dpaa2_eth_fq *fq, *txc_fq = NULL;
3006 + struct netdev_queue *nq;
3007 + int store_cleaned, work_done;
3008 int err;
3009
3010 ch = container_of(napi, struct dpaa2_eth_channel, napi);
3011 priv = ch->priv;
3012
3013 - while (cleaned < budget) {
3014 + do {
3015 err = pull_channel(ch);
3016 if (unlikely(err))
3017 break;
3018 @@ -923,29 +1186,56 @@ static int dpaa2_eth_poll(struct napi_st
3019 /* Refill pool if appropriate */
3020 refill_pool(priv, ch, priv->bpid);
3021
3022 - store_cleaned = consume_frames(ch);
3023 - cleaned += store_cleaned;
3024 + store_cleaned = consume_frames(ch, &fq);
3025 + if (!store_cleaned)
3026 + break;
3027 + if (fq->type == DPAA2_RX_FQ) {
3028 + rx_cleaned += store_cleaned;
3029 + /* If these are XDP_REDIRECT frames, flush them now */
3030 + /* TODO: Do we need this? */
3031 + if (ch->flush) {
3032 + xdp_do_flush_map();
3033 + ch->flush = false;
3034 + }
3035 + } else {
3036 + txconf_cleaned += store_cleaned;
3037 + /* We have a single Tx conf FQ on this channel */
3038 + txc_fq = fq;
3039 + }
3040
3041 - /* If we have enough budget left for a full store,
3042 - * try a new pull dequeue, otherwise we're done here
3043 + /* If we either consumed the whole NAPI budget with Rx frames
3044 + * or we reached the Tx confirmations threshold, we're done.
3045 */
3046 - if (store_cleaned == 0 ||
3047 - cleaned > budget - DPAA2_ETH_STORE_SIZE)
3048 - break;
3049 - }
3050 + if (rx_cleaned >= budget ||
3051 + txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) {
3052 + work_done = budget;
3053 + goto out;
3054 + }
3055 + } while (store_cleaned);
3056
3057 - if (cleaned < budget) {
3058 - napi_complete_done(napi, cleaned);
3059 - /* Re-enable data available notifications */
3060 - do {
3061 - err = dpaa2_io_service_rearm(NULL, &ch->nctx);
3062 - cpu_relax();
3063 - } while (err == -EBUSY);
3064 - }
3065 + /* We didn't consume the entire budget, so finish napi and
3066 + * re-enable data availability notifications
3067 + */
3068 + napi_complete_done(napi, rx_cleaned);
3069 + do {
3070 + err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
3071 + cpu_relax();
3072 + } while (err == -EBUSY);
3073 + WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
3074 + ch->nctx.desired_cpu);
3075
3076 - ch->stats.frames += cleaned;
3077 + work_done = max(rx_cleaned, 1);
3078
3079 - return cleaned;
3080 +out:
3081 + if (txc_fq) {
3082 + nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid);
3083 + netdev_tx_completed_queue(nq, txc_fq->dq_frames,
3084 + txc_fq->dq_bytes);
3085 + txc_fq->dq_frames = 0;
3086 + txc_fq->dq_bytes = 0;
3087 + }
3088 +
3089 + return work_done;
3090 }
3091
3092 static void enable_ch_napi(struct dpaa2_eth_priv *priv)
3093 @@ -970,9 +1260,23 @@ static void disable_ch_napi(struct dpaa2
3094 }
3095 }
3096
3097 +static void update_tx_fqids(struct dpaa2_eth_priv *priv);
3098 +
3099 +static void update_pf(struct dpaa2_eth_priv *priv,
3100 + struct dpni_link_state *state)
3101 +{
3102 + bool pause_frames;
3103 +
3104 + pause_frames = !!(state->options & DPNI_LINK_OPT_PAUSE);
3105 + if (priv->tx_pause_frames != pause_frames) {
3106 + priv->tx_pause_frames = pause_frames;
3107 + set_rx_taildrop(priv);
3108 + }
3109 +}
3110 +
3111 static int link_state_update(struct dpaa2_eth_priv *priv)
3112 {
3113 - struct dpni_link_state state;
3114 + struct dpni_link_state state = {0};
3115 int err;
3116
3117 err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
3118 @@ -988,6 +1292,8 @@ static int link_state_update(struct dpaa
3119
3120 priv->link_state = state;
3121 if (state.up) {
3122 + update_tx_fqids(priv);
3123 + update_pf(priv, &state);
3124 netif_carrier_on(priv->net_dev);
3125 netif_tx_start_all_queues(priv->net_dev);
3126 } else {
3127 @@ -1006,28 +1312,30 @@ static int dpaa2_eth_open(struct net_dev
3128 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3129 int err;
3130
3131 - err = seed_pool(priv, priv->bpid);
3132 - if (err) {
3133 - /* Not much to do; the buffer pool, though not filled up,
3134 - * may still contain some buffers which would enable us
3135 - * to limp on.
3136 - */
3137 - netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
3138 - priv->dpbp_dev->obj_desc.id, priv->bpid);
3139 - }
3140 -
3141 /* We'll only start the txqs when the link is actually ready; make sure
3142 * we don't race against the link up notification, which may come
3143 * immediately after dpni_enable();
3144 */
3145 netif_tx_stop_all_queues(net_dev);
3146 - enable_ch_napi(priv);
3147 +
3148 /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will
3149 * return true and cause 'ip link show' to report the LOWER_UP flag,
3150 * even though the link notification wasn't even received.
3151 */
3152 netif_carrier_off(net_dev);
3153
3154 + err = seed_pool(priv, priv->bpid);
3155 + if (err) {
3156 + /* Not much to do; the buffer pool, though not filled up,
3157 + * may still contain some buffers which would enable us
3158 + * to limp on.
3159 + */
3160 + netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
3161 + priv->dpbp_dev->obj_desc.id, priv->bpid);
3162 + }
3163 +
3164 + priv->refill_thresh = DPAA2_ETH_REFILL_THRESH(priv);
3165 +
3166 err = dpni_enable(priv->mc_io, 0, priv->mc_token);
3167 if (err < 0) {
3168 netdev_err(net_dev, "dpni_enable() failed\n");
3169 @@ -1047,48 +1355,17 @@ static int dpaa2_eth_open(struct net_dev
3170
3171 link_state_err:
3172 enable_err:
3173 - disable_ch_napi(priv);
3174 + priv->refill_thresh = 0;
3175 drain_pool(priv);
3176 return err;
3177 }
3178
3179 -/* The DPIO store must be empty when we call this,
3180 - * at the end of every NAPI cycle.
3181 - */
3182 -static u32 drain_channel(struct dpaa2_eth_priv *priv,
3183 - struct dpaa2_eth_channel *ch)
3184 -{
3185 - u32 drained = 0, total = 0;
3186 -
3187 - do {
3188 - pull_channel(ch);
3189 - drained = consume_frames(ch);
3190 - total += drained;
3191 - } while (drained);
3192 -
3193 - return total;
3194 -}
3195 -
3196 -static u32 drain_ingress_frames(struct dpaa2_eth_priv *priv)
3197 -{
3198 - struct dpaa2_eth_channel *ch;
3199 - int i;
3200 - u32 drained = 0;
3201 -
3202 - for (i = 0; i < priv->num_channels; i++) {
3203 - ch = priv->channel[i];
3204 - drained += drain_channel(priv, ch);
3205 - }
3206 -
3207 - return drained;
3208 -}
3209 -
3210 static int dpaa2_eth_stop(struct net_device *net_dev)
3211 {
3212 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3213 - int dpni_enabled;
3214 - int retries = 10;
3215 - u32 drained;
3216 + int dpni_enabled = 0;
3217 + int retries = 10, i;
3218 + int err = 0;
3219
3220 netif_tx_stop_all_queues(net_dev);
3221 netif_carrier_off(net_dev);
3222 @@ -1105,56 +1382,24 @@ static int dpaa2_eth_stop(struct net_dev
3223 } while (dpni_enabled && --retries);
3224 if (!retries) {
3225 netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
3226 - /* Must go on and disable NAPI nonetheless, so we don't crash at
3227 - * the next "ifconfig up"
3228 + /* Must go on and finish processing pending frames, so we don't
3229 + * crash at the next "ifconfig up"
3230 */
3231 + err = -ETIMEDOUT;
3232 }
3233
3234 - /* Wait for NAPI to complete on every core and disable it.
3235 - * In particular, this will also prevent NAPI from being rescheduled if
3236 - * a new CDAN is serviced, effectively discarding the CDAN. We therefore
3237 - * don't even need to disarm the channels, except perhaps for the case
3238 - * of a huge coalescing value.
3239 - */
3240 - disable_ch_napi(priv);
3241 + priv->refill_thresh = 0;
3242
3243 - /* Manually drain the Rx and TxConf queues */
3244 - drained = drain_ingress_frames(priv);
3245 - if (drained)
3246 - netdev_dbg(net_dev, "Drained %d frames.\n", drained);
3247 + /* Wait for all running napi poll routines to finish, so that no
3248 + * new refill operations are started
3249 + */
3250 + for (i = 0; i < priv->num_channels; i++)
3251 + napi_synchronize(&priv->channel[i]->napi);
3252
3253 /* Empty the buffer pool */
3254 drain_pool(priv);
3255
3256 - return 0;
3257 -}
3258 -
3259 -static int dpaa2_eth_init(struct net_device *net_dev)
3260 -{
3261 - u64 supported = 0;
3262 - u64 not_supported = 0;
3263 - struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3264 - u32 options = priv->dpni_attrs.options;
3265 -
3266 - /* Capabilities listing */
3267 - supported |= IFF_LIVE_ADDR_CHANGE;
3268 -
3269 - if (options & DPNI_OPT_NO_MAC_FILTER)
3270 - not_supported |= IFF_UNICAST_FLT;
3271 - else
3272 - supported |= IFF_UNICAST_FLT;
3273 -
3274 - net_dev->priv_flags |= supported;
3275 - net_dev->priv_flags &= ~not_supported;
3276 -
3277 - /* Features */
3278 - net_dev->features = NETIF_F_RXCSUM |
3279 - NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3280 - NETIF_F_SG | NETIF_F_HIGHDMA |
3281 - NETIF_F_LLTX;
3282 - net_dev->hw_features = net_dev->features;
3283 -
3284 - return 0;
3285 + return err;
3286 }
3287
3288 static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
3289 @@ -1200,25 +1445,6 @@ static void dpaa2_eth_get_stats(struct n
3290 }
3291 }
3292
3293 -static int dpaa2_eth_change_mtu(struct net_device *net_dev, int mtu)
3294 -{
3295 - struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3296 - int err;
3297 -
3298 - /* Set the maximum Rx frame length to match the transmit side;
3299 - * account for L2 headers when computing the MFL
3300 - */
3301 - err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
3302 - (u16)DPAA2_ETH_L2_MAX_FRM(mtu));
3303 - if (err) {
3304 - netdev_err(net_dev, "dpni_set_max_frame_length() failed\n");
3305 - return err;
3306 - }
3307 -
3308 - net_dev->mtu = mtu;
3309 - return 0;
3310 -}
3311 -
3312 /* Copy mac unicast addresses from @net_dev to @priv.
3313 * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
3314 */
3315 @@ -1380,16 +1606,430 @@ static int dpaa2_eth_set_features(struct
3316 return 0;
3317 }
3318
3319 +static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3320 +{
3321 + struct dpaa2_eth_priv *priv = netdev_priv(dev);
3322 + struct hwtstamp_config config;
3323 +
3324 + if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
3325 + return -EFAULT;
3326 +
3327 + switch (config.tx_type) {
3328 + case HWTSTAMP_TX_OFF:
3329 + priv->ts_tx_en = false;
3330 + break;
3331 + case HWTSTAMP_TX_ON:
3332 + priv->ts_tx_en = true;
3333 + break;
3334 + default:
3335 + return -ERANGE;
3336 + }
3337 +
3338 + if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
3339 + priv->ts_rx_en = false;
3340 + } else {
3341 + priv->ts_rx_en = true;
3342 + /* TS is set for all frame types, not only those requested */
3343 + config.rx_filter = HWTSTAMP_FILTER_ALL;
3344 + }
3345 +
3346 + return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
3347 + -EFAULT : 0;
3348 +}
3349 +